Reshape v7: remove (#1379)
authorIlya Lavrenov <ilya.lavrenov@intel.com>
Tue, 15 Sep 2020 12:08:17 +0000 (15:08 +0300)
committerGitHub <noreply@github.com>
Tue, 15 Sep 2020 12:08:17 +0000 (15:08 +0300)
* Removed shape inference fr IR v7 and older

* Disabled dynamic batch tests which require reshape

* Fixes tests 2

* Disabled MKLDNN tests with convolution reshape

* Fixed GPU tests

* Disable VPU tests with batch size > 1 for old IRs

* Removed most of shape infer functions for old representation

* Removed most of CNNLayer validators

* Fixed validators and keep only parseParams

* Removed tests on invalid IR v7

* Disabled more VPU tests

* Removed Backetize validator

* Disable one more Myriad tests case where reshape for old IR is needed

* Removed useless reshape

* Need to replace GRUCell with Unique

* Moved shape infer functions for experimental layers to Core IE

* Fixed shape inference functions not to depend on legacy

* Added missed SparseToDense

* Added descriptive error message

* Fixed comments

163 files changed:
inference-engine/src/inference_engine/CMakeLists.txt
inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp
inference-engine/src/inference_engine/cnn_network_ngraph_impl.hpp
inference-engine/src/inference_engine/generic_ie.cpp
inference-engine/src/inference_engine/shape_infer/ie_built_in_holder.cpp [new file with mode: 0644]
inference-engine/src/inference_engine/shape_infer/ie_built_in_holder.hpp [moved from inference-engine/src/legacy_api/include/legacy/shape_infer/built-in/ie_built_in_holder.hpp with 86% similarity]
inference-engine/src/inference_engine/shape_infer/ie_built_in_impl.hpp [new file with mode: 0644]
inference-engine/src/inference_engine/shape_infer/ie_detectionoutput_onnx_shape_infer.hpp [moved from inference-engine/src/legacy_api/src/shape_infer/built-in/ie_detectionoutput_onnx_shape_infer.hpp with 67% similarity]
inference-engine/src/inference_engine/shape_infer/ie_ishape_infer_extension.hpp [moved from inference-engine/src/legacy_api/include/legacy/ie_ishape_infer_extension.hpp with 100% similarity]
inference-engine/src/inference_engine/shape_infer/ie_priorgridgenerator_onnx_shape_infer.hpp [moved from inference-engine/src/legacy_api/src/shape_infer/built-in/ie_priorgridgenerator_onnx_shape_infer.hpp with 80% similarity]
inference-engine/src/inference_engine/shape_infer/ie_proposal_onnx_shape_infer.hpp [moved from inference-engine/src/legacy_api/src/shape_infer/built-in/ie_proposal_onnx_shape_infer.hpp with 71% similarity]
inference-engine/src/inference_engine/shape_infer/ie_proposal_shape_infer.hpp [moved from inference-engine/src/legacy_api/src/shape_infer/built-in/ie_proposal_shape_infer.hpp with 64% similarity]
inference-engine/src/inference_engine/shape_infer/ie_rnn_cell_shape_infer.hpp [moved from inference-engine/src/legacy_api/src/shape_infer/built-in/ie_rnn_cell_shape_infer.hpp with 60% similarity]
inference-engine/src/inference_engine/shape_infer/ie_roifeatureextractor_onnx_shape_infer.hpp [moved from inference-engine/src/legacy_api/src/shape_infer/built-in/ie_roifeatureextractor_onnx_shape_infer.hpp with 74% similarity]
inference-engine/src/inference_engine/shape_infer/ie_simpler_nms_shape_infer.hpp [moved from inference-engine/src/legacy_api/src/shape_infer/built-in/ie_simpler_nms_shape_infer.hpp with 71% similarity]
inference-engine/src/inference_engine/shape_infer/ie_sparse_to_dense_shape_infer.hpp [moved from inference-engine/src/legacy_api/src/shape_infer/built-in/ie_sparse_to_dense_shape_infer.hpp with 69% similarity]
inference-engine/src/inference_engine/shape_infer/ie_topkrois_onnx_shape_infer.hpp [moved from inference-engine/src/legacy_api/src/shape_infer/built-in/ie_topkrois_onnx_shape_infer.hpp with 67% similarity]
inference-engine/src/inference_engine/shape_infer/ie_unique_shape_infer.hpp [moved from inference-engine/src/legacy_api/src/shape_infer/built-in/ie_unique_shape_infer.hpp with 73% similarity]
inference-engine/src/legacy_api/include/legacy/cnn_network_impl.hpp
inference-engine/src/legacy_api/src/cnn_network_impl.cpp
inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_argmax_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_bin_conv_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_broadcast_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_bucketize_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_built_in_holder.cpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_built_in_impl.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_concat_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_conv_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_crop_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_ctc_greedy_decoder_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_deconv_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_deformable_conv_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_depth_to_space_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_detection_output_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_eltwise_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_equal_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_erf_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_fill_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_flatten_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_gather_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_gather_tree_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_gemm_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_inner_product_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_interp_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_non_max_suppression_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_one_hot_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_pad_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_permute_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_pool_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_priorbox_clustered_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_priorbox_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_psroi_pooling_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_quantize_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_range_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_reduce_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_region_yolo_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_reorg_yolo_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_resample_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_reshape_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_reverse_sequence_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_rnn_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_roi_pooling_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_scatter_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_select_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_shape_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_shuffle_channels_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_space_to_depth_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_sparse_fill_empty_rows_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_sparse_segment_reduce_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_sparse_weighted_reduce_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_split_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_squeeze_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_strided_slice_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_tensor_iterator_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_tile_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_topk_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_unsqueeze_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/ie_upsampling_shape_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/built-in/impl_register.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/broadcast_offset.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_add_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_broadcast_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_concat_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_const_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_const_infer_holder.cpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_const_infer_holder.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_const_infer_impl.cpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_const_infer_impl.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_convert_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_div_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_eltw_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_fill_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_gather_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_in_place_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_mul_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_onehot_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_permute_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_pow_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_power_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_range_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_reduce_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_reshape_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_shape_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_split_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_strided_slice_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_sub_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_tile_const_infer.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/ie_reshape_io_controllers.cpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/ie_reshape_io_controllers.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/ie_reshape_launcher.cpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/ie_reshape_launcher.hpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/ie_reshaper.cpp [deleted file]
inference-engine/src/legacy_api/src/shape_infer/ie_reshaper.hpp [deleted file]
inference-engine/src/mkldnn_plugin/mkldnn_extension_mngr.h
inference-engine/src/mkldnn_plugin/nodes/mkldnn_generic_node.h
inference-engine/src/plugin_api/generic_ie.hpp
inference-engine/tests/ie_test_utils/unit_test_utils/empty.cpp
inference-engine/tests/ie_test_utils/unit_test_utils/mocks/shape_infer/mock_input_controller.hpp [deleted file]
inference-engine/tests/ie_test_utils/unit_test_utils/mocks/shape_infer/mock_ishape_infer_impl.hpp [deleted file]
inference-engine/tests/ie_test_utils/unit_test_utils/mocks/shape_infer/mock_output_controller.hpp [deleted file]
inference-engine/tests/ie_test_utils/unit_test_utils/mocks/shape_infer/mock_reshaper_launcher.hpp [deleted file]
inference-engine/tests_deprecated/behavior/cldnn/CMakeLists.txt
inference-engine/tests_deprecated/behavior/gna/CMakeLists.txt
inference-engine/tests_deprecated/behavior/mkldnn/CMakeLists.txt
inference-engine/tests_deprecated/behavior/vpu/CMakeLists.txt
inference-engine/tests_deprecated/functional/cldnn/CMakeLists.txt
inference-engine/tests_deprecated/functional/cldnn/shared_tests_instance/common_single_layer_tests/single_layer_tests.cpp
inference-engine/tests_deprecated/functional/gna/CMakeLists.txt
inference-engine/tests_deprecated/functional/mkldnn/CMakeLists.txt
inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/common_single_layer_tests/single_layer_tests.cpp
inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/conv_tests.cpp
inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_seq_test.hpp
inference-engine/tests_deprecated/functional/vpu/CMakeLists.txt
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_concat_test.hpp
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convert_test.hpp
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution_test.cpp
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_exp_test.hpp
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_floor_test.hpp
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_log_test.hpp
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_squeeze_test.cpp
inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_unsqueeze_test.hpp
inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/common_single_layer_tests/single_layer_tests.cpp
inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/io_blob_tests/dims_tests.cpp
inference-engine/tests_deprecated/functional/vpu/shared_tests_instance/io_blob_tests/layout_tests.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/fake_layer.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_activation_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_scaleshift_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_batchnorm_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_concat_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_conv_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_deconv_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_depthwise_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_eltwise_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_fullyconnected_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_gemm_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_lrn_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_pooling_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_power_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_softmax_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_split_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_tile_test.cpp
inference-engine/tests_deprecated/unit/inference_engine_tests/util_const_infer_test.cpp

index 9ab8889..da7fb12 100644 (file)
@@ -29,6 +29,7 @@ set(IE_BASE_SOURCE_FILES
       ${CMAKE_CURRENT_SOURCE_DIR}/ie_parameter.cpp
       ${CMAKE_CURRENT_SOURCE_DIR}/ie_rtti.cpp
       ${CMAKE_CURRENT_SOURCE_DIR}/precision_utils.cpp
+      ${CMAKE_CURRENT_SOURCE_DIR}/shape_infer/ie_built_in_holder.cpp
       ${CMAKE_CURRENT_SOURCE_DIR}/network_serializer.cpp
       ${CMAKE_CURRENT_SOURCE_DIR}/network_serializer.hpp
       ${CMAKE_CURRENT_SOURCE_DIR}/system_allocator.cpp
@@ -123,6 +124,7 @@ add_library(${TARGET_NAME}_common_obj OBJECT
 
 target_compile_definitions(${TARGET_NAME}_common_obj PRIVATE IMPLEMENT_INFERENCE_ENGINE_API)
 target_include_directories(${TARGET_NAME}_common_obj PRIVATE
+    "${CMAKE_CURRENT_SOURCE_DIR}"
     $<TARGET_PROPERTY:${TARGET_NAME}_transformations,INTERFACE_INCLUDE_DIRECTORIES>
     $<TARGET_PROPERTY:${TARGET_NAME}_plugin_api,INTERFACE_INCLUDE_DIRECTORIES>)
 
index 3f3dae7..5d51b3e 100644 (file)
@@ -30,7 +30,7 @@
 #include "ie_itt.hpp"
 #include "network_serializer.hpp"
 #include "generic_ie.hpp"
-#include <legacy/shape_infer/built-in/ie_built_in_holder.hpp>
+#include "shape_infer/ie_built_in_holder.hpp"
 
 using namespace std;
 using namespace InferenceEngine;
index 56386a7..fa80f44 100644 (file)
 #include <legacy/cnn_network_impl.hpp>
 
 namespace InferenceEngine {
-namespace ShapeInfer {
-class Reshaper;
-
-using ReshaperPtr = std::shared_ptr<Reshaper>;
-}  // namespace ShapeInfer
-
 namespace details {
 
 /**
index 313bee4..dd0d22b 100644 (file)
@@ -14,6 +14,7 @@
 #include <vector>
 
 #include "blob_factory.hpp"
+#include "shape_infer/ie_ishape_infer_extension.hpp"
 #include <legacy/ie_ngraph_utils.hpp>
 #include "ngraph/util.hpp"
 #include "ngraph/graph_util.hpp"
diff --git a/inference-engine/src/inference_engine/shape_infer/ie_built_in_holder.cpp b/inference-engine/src/inference_engine/shape_infer/ie_built_in_holder.cpp
new file mode 100644 (file)
index 0000000..2ff5461
--- /dev/null
@@ -0,0 +1,84 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <algorithm>
+#include <memory>
+#include <string>
+
+#include "shape_infer/ie_built_in_holder.hpp"
+#include "shape_infer/ie_detectionoutput_onnx_shape_infer.hpp"
+#include "shape_infer/ie_priorgridgenerator_onnx_shape_infer.hpp"
+#include "shape_infer/ie_proposal_onnx_shape_infer.hpp"
+#include "shape_infer/ie_proposal_shape_infer.hpp"
+#include "shape_infer/ie_rnn_cell_shape_infer.hpp"
+#include "shape_infer/ie_roifeatureextractor_onnx_shape_infer.hpp"
+#include "shape_infer/ie_simpler_nms_shape_infer.hpp"
+#include "shape_infer/ie_sparse_to_dense_shape_infer.hpp"
+#include "shape_infer/ie_topkrois_onnx_shape_infer.hpp"
+#include "shape_infer/ie_unique_shape_infer.hpp"
+#include "shape_infer/ie_sparse_to_dense_shape_infer.hpp"
+
+namespace InferenceEngine {
+namespace ShapeInfer {
+
+BuiltInShapeInferHolder::ImplsHolder::Ptr BuiltInShapeInferHolder::GetImplsHolder() {
+    static ImplsHolder::Ptr localHolder;
+    if (localHolder == nullptr) {
+        localHolder = std::make_shared<ImplsHolder>();
+    }
+    return localHolder;
+}
+
+void BuiltInShapeInferHolder::AddImpl(const std::string& name, const IShapeInferImpl::Ptr& impl) {
+    GetImplsHolder()->list[name] = impl;
+}
+
+StatusCode BuiltInShapeInferHolder::getShapeInferTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept {
+    auto& factories = GetImplsHolder()->list;
+    types = new char*[factories.size()];
+    size = 0;
+    for (auto it = factories.begin(); it != factories.end(); it++, size++) {
+        types[size] = new char[it->first.size() + 1];
+        std::copy(it->first.begin(), it->first.end(), types[size]);
+        types[size][it->first.size()] = '\0';
+    }
+    return OK;
+}
+
+StatusCode BuiltInShapeInferHolder::getShapeInferImpl(IShapeInferImpl::Ptr& impl, const char* type,
+                                                      ResponseDesc* resp) noexcept {
+    auto& impls = BuiltInShapeInferHolder::GetImplsHolder()->list;
+    if (impls.find(type) != impls.end()) {
+        impl = impls[type];
+        return OK;
+    }
+    impl.reset();
+    return NOT_FOUND;
+}
+
+template <typename Impl>
+class ImplRegisterBase {
+public:
+    explicit ImplRegisterBase(const std::string& type) {
+        BuiltInShapeInferHolder::AddImpl(type, std::make_shared<Impl>(type));
+    }
+};
+
+#define REG_SHAPE_INFER_FOR_TYPE(__prim, __type) \
+    static ImplRegisterBase<__prim> __bi_reg__##__type(#__type)
+
+REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronDetectionOutputShapeProp, ExperimentalDetectronDetectionOutput);
+REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronPriorGridGeneratorShapeProp, ExperimentalDetectronPriorGridGenerator);
+REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronGenerateProposalsSingleImageShapeProp, ExperimentalDetectronGenerateProposalsSingleImage);
+REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronROIFeatureExtractorShapeProp, ExperimentalDetectronROIFeatureExtractor);
+REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronTopKROIsShapeProp, ExperimentalDetectronTopKROIs);
+REG_SHAPE_INFER_FOR_TYPE(SimplerNMSShapeProp, SimplerNMS);
+REG_SHAPE_INFER_FOR_TYPE(SparseToDenseShapeProp, SparseToDense);
+REG_SHAPE_INFER_FOR_TYPE(ProposalShapeProp, Proposal);
+REG_SHAPE_INFER_FOR_TYPE(RNNCellShapeProp, RNNCell);
+REG_SHAPE_INFER_FOR_TYPE(GRUCellShapeProp, GRUCell);
+REG_SHAPE_INFER_FOR_TYPE(UniqueShapeProp, Unique);
+
+}  // namespace ShapeInfer
+}  // namespace InferenceEngine
 #include <description_buffer.hpp>
 #include "caseless.hpp"
 
-#include <legacy/ie_ishape_infer_extension.hpp>
+#include "shape_infer/ie_ishape_infer_extension.hpp"
 
 namespace InferenceEngine {
 namespace ShapeInfer {
 
-IE_SUPPRESS_DEPRECATED_START
-
 /**
  *@brief Holder of shape infer implementations for build-in IE layers, that plugins support out-of-the-box
  */
-class INFERENCE_ENGINE_API_CLASS(BuiltInShapeInferHolder) : public IShapeInferExtension {
+class BuiltInShapeInferHolder : public IShapeInferExtension {
     struct ImplsHolder {
         using Ptr = std::shared_ptr<ImplsHolder>;
         InferenceEngine::details::caseless_map<std::string, IShapeInferImpl::Ptr> list;
@@ -48,7 +46,5 @@ private:
     static ImplsHolder::Ptr GetImplsHolder();
 };
 
-IE_SUPPRESS_DEPRECATED_END
-
 }  // namespace ShapeInfer
 }  // namespace InferenceEngine
diff --git a/inference-engine/src/inference_engine/shape_infer/ie_built_in_impl.hpp b/inference-engine/src/inference_engine/shape_infer/ie_built_in_impl.hpp
new file mode 100644 (file)
index 0000000..ecf8a62
--- /dev/null
@@ -0,0 +1,145 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <ie_iextension.h>
+#include <shape_infer/ie_ishape_infer_extension.hpp>
+#include <description_buffer.hpp>
+
+#include <list>
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace InferenceEngine {
+
+inline std::string GetParamAsString(const char* param, const std::map<std::string, std::string> & params) {
+    auto it = params.find(param);
+    if (it == params.end()) {
+        THROW_IE_EXCEPTION << "No such parameter name '" << param << "'";
+    }
+    return (*it).second;
+}
+
+inline int GetParamAsInt(const char* param, const std::map<std::string, std::string> & params) {
+    std::string val = GetParamAsString(param, params);
+    try {
+        return std::stoi(val);
+    } catch (...) {
+        THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer. Value "
+                           << val << " cannot be casted to int.";
+    }
+}
+
+inline bool GetParamAsBool(const char* param, const std::map<std::string, std::string> & params) {
+    std::string val = GetParamAsString(param, params);
+    std::string loweredCaseValue;
+    std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
+        return static_cast<char>(std::tolower(value));
+    });
+
+    bool result = false;
+
+    if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
+        // attempting parse using non alpha bool
+        return (GetParamAsInt(param, params) != 0);
+    }
+
+    return result;
+}
+
+std::string GetParamAsString(const char* param, const char* def,
+                             const std::map<std::string, std::string> & params) {
+    auto it = params.find(param);
+    if (it == params.end() || it->second.empty()) {
+        return def;
+    }
+    return (*it).second;
+}
+
+int GetParamAsInt(const char* param, int def,
+                  const std::map<std::string, std::string> & params) {
+    std::string val = GetParamAsString(param, std::to_string(def).c_str(), params);
+    try {
+        return std::stoi(val);
+    } catch (...) {
+        THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer. Value "
+                           << val << " cannot be casted to int.";
+    }
+}
+
+bool GetParamAsBool(const char* param, bool def,
+                    const std::map<std::string, std::string> & params) {
+    std::string val = GetParamAsString(param, std::to_string(def).c_str(), params);
+    std::string loweredCaseValue;
+    std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
+        return static_cast<char>(std::tolower(value));
+    });
+
+    bool result = false;
+
+    if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
+        // attempting parse using non alpha bool
+        return (GetParamAsInt(param, def, params) != 0);
+    }
+
+    return result;
+}
+
+inline unsigned int GetParamAsUInt(const char* param, const std::map<std::string, std::string> & params) {
+    std::string val = GetParamAsString(param, params);
+    std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer" +
+                          ". Value " + val + " cannot be casted to unsigned int.";
+    try {
+        int value = std::stoi(val);
+        if (value < 0) {
+            THROW_IE_EXCEPTION << message;
+        }
+        return static_cast<unsigned int>(value);
+    } catch (...) {
+        THROW_IE_EXCEPTION << message;
+    }
+}
+
+namespace ShapeInfer {
+
+/**
+ * @brief Base class for all built-in shape infer implementations. Contains common logic with validators and errors
+ * handling
+ */
+class BuiltInShapeInferImpl : public IShapeInferImpl {
+public:
+    explicit BuiltInShapeInferImpl(const std::string& type): _type(type) { }
+
+    virtual void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
+                                 const std::map<std::string, std::string>& params,
+                                 const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) = 0;
+
+    StatusCode inferShapes(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
+                           const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes,
+                           ResponseDesc* resp) noexcept override {
+        inShapes.clear();
+        for (const auto& blob : inBlobs) {
+            inShapes.push_back(blob->getTensorDesc().getDims());
+        }
+        outShapes.clear();
+        try {
+            inferShapesImpl(inBlobs, params, blobs, outShapes);
+            return OK;
+        } catch (const std::exception& ex) {
+            return InferenceEngine::DescriptionBuffer(GENERAL_ERROR, resp) << ex.what();
+        } catch (...) {
+            return InferenceEngine::DescriptionBuffer(UNEXPECTED) << "Unknown error";
+        }
+    }
+
+protected:
+    std::string _type;
+    std::vector<SizeVector> inShapes;
+};
+
+}  // namespace ShapeInfer
+}  // namespace InferenceEngine
@@ -15,8 +15,8 @@ namespace InferenceEngine {
 namespace ShapeInfer {
 
 /**
-*@brief Implementation of Shape inference for ExperimentalDetectronDetectionOutput layer
-*/
+ * @brief Implementation of Shape inference for ExperimentalDetectronDetectionOutput layer
+ */
 class ExperimentalDetectronDetectionOutputShapeProp : public BuiltInShapeInferImpl {
 protected:
     const int ROIS = 0;
@@ -27,17 +27,12 @@ public:
 
     void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
                          const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-
-        auto rois_num = cnnLayer.GetParamAsUInt("max_detections_per_image");
+        auto rois_num = GetParamAsUInt("max_detections_per_image", params);
         outShapes.push_back({rois_num, 4});
 
-        auto num_outputs = cnnLayer.GetParamAsUInt("num_outputs");
-        if (num_outputs > 3) THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
+        auto num_outputs = GetParamAsUInt("num_outputs", params);
+        if (num_outputs > 3)
+            THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
         if (num_outputs >= 2) {
             outShapes.push_back({rois_num});
         }
@@ -18,8 +18,8 @@ namespace InferenceEngine {
 namespace ShapeInfer {
 
 /**
-*@brief Implementation of Shape inference for ExperimentalDetectronPriorGridGenerator layer
-*/
+ * @brief Implementation of Shape inference for ExperimentalDetectronPriorGridGenerator layer
+ */
 class ExperimentalDetectronPriorGridGeneratorShapeProp : public BuiltInShapeInferImpl {
 protected:
     const int PRIORS = 0;
@@ -32,19 +32,13 @@ public:
 
     void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
                          const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-
         const auto& priors_shape = inShapes.at(PRIORS);
         const auto priors_num = priors_shape.at(0);
         const auto& featmap_shape = inShapes.at(FEATMAP);
         const auto grid_height = featmap_shape.at(H);
         const auto grid_width = featmap_shape.at(W);
 
-        const bool flatten = cnnLayer.GetParamAsBool("flatten", true);
+        const bool flatten = GetParamAsBool("flatten", true, params);
         if (flatten) {
             outShapes.push_back({grid_height * grid_width * priors_num, 4});
         } else {
@@ -15,21 +15,15 @@ namespace InferenceEngine {
 namespace ShapeInfer {
 
 /**
-*@brief Implementation of Shape inference for ExperimentalDetectronGenerateProposalsSingleImage layer
-*/
+ * @brief Implementation of Shape inference for ExperimentalDetectronGenerateProposalsSingleImage layer
+ */
 class ExperimentalDetectronGenerateProposalsSingleImageShapeProp : public BuiltInShapeInferImpl {
 public:
     explicit ExperimentalDetectronGenerateProposalsSingleImageShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
 
     void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
                          const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-
-        auto post_nms_count = cnnLayer.GetParamAsUInt("post_nms_count");
+        auto post_nms_count = GetParamAsUInt("post_nms_count", params);
         outShapes.push_back({post_nms_count, 4});
         outShapes.push_back({post_nms_count, });
     }
@@ -15,7 +15,7 @@ namespace InferenceEngine {
 namespace ShapeInfer {
 
 /**
- *@brief Implementation of Shape inference for Proposal layer
+ * @brief Implementation of Shape inference for Proposal layer
  */
 class ProposalShapeProp : public BuiltInShapeInferImpl {
 public:
@@ -23,14 +23,12 @@ public:
 
     void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
                          const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-        size_t post_nms_topn = static_cast<size_t>(cnnLayer.GetParamAsInt("post_nms_topn"));
-        auto num_outputs = cnnLayer.GetParamAsUInt("num_outputs");
-        if (num_outputs > 2) THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
+        size_t post_nms_topn = static_cast<size_t>(GetParamAsInt("post_nms_topn", params));
+        auto num_outputs = GetParamAsUInt("num_outputs", params);
+
+        if (num_outputs > 2)
+            THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
+
         outShapes.push_back({inShapes[0][0] * post_nms_topn, 5});
         if (num_outputs == 2)
             outShapes.push_back({inShapes[0][0] * post_nms_topn});
@@ -16,29 +16,24 @@ namespace InferenceEngine {
 namespace ShapeInfer {
 
 /**
- *@brief Implementation of Shape inference for DetectionOutput layer
+ * @brief Implementation of Shape inference for DetectionOutput layer
  */
-template <class CELL, int S>
+template <int S>
 class RNNBaseCellShapeProp : public BuiltInShapeInferImpl {
 public:
     explicit RNNBaseCellShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
 
     void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
                          const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CELL cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-
         auto state_dims = inShapes[1];
-        for (int i = 0; i < S; i++) outShapes.push_back(state_dims);
+        for (int i = 0; i < S; i++)
+            outShapes.push_back(state_dims);
     }
 };
 
-using RNNCellShapeProp = RNNBaseCellShapeProp<RNNCell, 1>;
-using GRUCellShapeProp = RNNBaseCellShapeProp<GRUCell, 1>;
-using LSTMCellShapeProp = RNNBaseCellShapeProp<LSTMCell, 2>;
+using RNNCellShapeProp = RNNBaseCellShapeProp<1>;
+using GRUCellShapeProp = RNNBaseCellShapeProp<1>;
+using LSTMCellShapeProp = RNNBaseCellShapeProp<2>;
 
 }  // namespace ShapeInfer
 }  // namespace InferenceEngine
@@ -15,8 +15,8 @@ namespace InferenceEngine {
 namespace ShapeInfer {
 
 /**
-*@brief Implementation of Shape inference for ExperimentalDetectronROIFeatureExtractor layer
-*/
+ * @brief Implementation of Shape inference for ExperimentalDetectronROIFeatureExtractor layer
+ */
 class ExperimentalDetectronROIFeatureExtractorShapeProp : public BuiltInShapeInferImpl {
 protected:
     const int ROIS = 0;
@@ -27,18 +27,12 @@ public:
 
     void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
                          const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-
         size_t rois_num = inShapes.at(ROIS).at(0);
         size_t channels_num = inShapes.at(FEATMAPS).at(1);
-        size_t output_size = static_cast<size_t>(cnnLayer.GetParamAsInt("output_size"));
+        size_t output_size = static_cast<size_t>(GetParamAsInt("output_size", params));
         outShapes.push_back({rois_num, channels_num, output_size, output_size});
 
-        auto num_outputs = cnnLayer.GetParamAsUInt("num_outputs");
+        auto num_outputs = GetParamAsUInt("num_outputs", params);
         if (num_outputs > 2) THROW_IE_EXCEPTION << "Incorrect value num_outputs: " << num_outputs;
         if (num_outputs == 2) {
             outShapes.push_back({rois_num, 4});
@@ -18,7 +18,7 @@ namespace InferenceEngine {
 namespace ShapeInfer {
 
 /**
- *@brief Implementation of Shape inference for SimplerNMS layer
+ * @brief Implementation of Shape inference for SimplerNMS layer
  */
 class SimplerNMSShapeProp : public BuiltInShapeInferImpl {
 public:
@@ -26,13 +26,7 @@ public:
 
     void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
                          const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-
-        size_t post_nms_topn = static_cast<size_t>(cnnLayer.GetParamAsInt("post_nms_topn"));
+        size_t post_nms_topn = static_cast<size_t>(GetParamAsInt("post_nms_topn", params));
         outShapes.push_back({post_nms_topn, 5});
     }
 };
@@ -14,22 +14,16 @@ namespace InferenceEngine {
 namespace ShapeInfer {
 
 /**
-*@brief Implementation of Shape inference for SparseToDense layer
+* @brief Implementation of Shape inference for SparseToDense layer
 */
 class SparseToDenseShapeProp : public BuiltInShapeInferImpl {
 public:
     explicit SparseToDenseShapeProp(const std::string& type) : BuiltInShapeInferImpl(type) {}
 
     void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
-        const std::map<std::string, std::string>& params,
-        const std::map<std::string, Blob::Ptr>& blobs,
-        std::vector<SizeVector>& outShapes) override {
-        LayerParams lp{};
-        SparseToDenseLayer sparse_to_dense_layer(lp);
-        sparse_to_dense_layer.params = params;
-        sparse_to_dense_layer.type = _type;
-        validate(&sparse_to_dense_layer, inBlobs, params, blobs);
-
+                         const std::map<std::string, std::string>& params,
+                         const std::map<std::string, Blob::Ptr>& blobs,
+                         std::vector<SizeVector>& outShapes) override {
         SizeVector shapes;
         if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::I32) {
             auto* buffer = inBlobs[1]->cbuffer().as<int*>();
@@ -4,35 +4,27 @@
 
 #pragma once
 
-#include <ie_layers.h>
-
 #include <description_buffer.hpp>
 #include <map>
 #include <memory>
 #include <string>
 #include <vector>
 
-#include "ie_built_in_impl.hpp"
+#include "shape_infer/ie_built_in_impl.hpp"
 
 namespace InferenceEngine {
 namespace ShapeInfer {
 
 /**
-*@brief Implementation of Shape inference for ExperimentalDetectronTopKROIs layer
-*/
+ * @brief Implementation of Shape inference for ExperimentalDetectronTopKROIs layer
+ */
 class ExperimentalDetectronTopKROIsShapeProp : public BuiltInShapeInferImpl {
 public:
     explicit ExperimentalDetectronTopKROIsShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
 
     void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
                          const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-
-        const auto max_rois = cnnLayer.GetParamAsUInt("max_rois");
+        const auto max_rois = GetParamAsUInt("max_rois", params);
         outShapes.push_back({max_rois, 4});
     }
 };
@@ -9,13 +9,13 @@
 #include <string>
 #include <vector>
 
-#include "ie_built_in_impl.hpp"
+#include "shape_infer/ie_built_in_impl.hpp"
 
 namespace InferenceEngine {
 namespace ShapeInfer {
 
 /**
- *@brief Implementation of Shape inference for Unique layer
+ * @brief Implementation of Shape inference for Unique layer
  */
 class UniqueShapeProp : public BuiltInShapeInferImpl {
 public:
@@ -23,18 +23,15 @@ public:
 
     void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
                          const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        UniqueLayer unique_layer(lp);
-        unique_layer.params = params;
-        unique_layer.type = _type;
-        validate(&unique_layer, inBlobs, params, blobs);
+        bool return_inverse = GetParamAsBool("return_inverse", params);
+        bool return_counts = GetParamAsBool("return_counts", params);
 
         // compute a number of outputs
         size_t num_outputs = 1;
-        if (unique_layer.return_counts) {
+        if (return_counts) {
             num_outputs++;
         }
-        if (unique_layer.return_inverse) {
+        if (return_inverse) {
             num_outputs++;
         }
 
index c7bb65f..506ce35 100644 (file)
 #include "description_buffer.hpp"
 
 #include <legacy/ie_layers.h>
-#include <legacy/ie_ishape_infer_extension.hpp>
 
 namespace InferenceEngine {
-namespace ShapeInfer {
-class Reshaper;
 
-using ReshaperPtr = std::shared_ptr<Reshaper>;
-}  // namespace ShapeInfer
+class IShapeInferExtension;
+using IShapeInferExtensionPtr = std::shared_ptr<IShapeInferExtension>;
+
 namespace details {
 
 class INFERENCE_ENGINE_API_CLASS(CNNNetworkImpl): public ICNNNetwork {
@@ -126,9 +124,6 @@ public:
     StatusCode reshape(const std::map<std::string, std::vector<size_t>>& inputShapes,
                        ResponseDesc* resp) noexcept override;
 
-    StatusCode AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension,
-                            InferenceEngine::ResponseDesc* resp) noexcept;
-
     StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const
         noexcept override;
 
@@ -139,7 +134,6 @@ protected:
     std::map<std::string, DataPtr> _outputData;
     std::string _name;
     DataPtr _emptyData;
-    ShapeInfer::ReshaperPtr _reshaper;
 };
 
 typedef std::shared_ptr<CNNNetworkImpl> CNNNetworkImplPtr;
index 9d93a61..67bf014 100644 (file)
@@ -29,7 +29,6 @@
 #include "legacy/details/ie_cnn_network_tools.h"
 #include <legacy/cnn_network_impl.hpp>
 #include "network_serializer_v7.hpp"
-#include <shape_infer/ie_reshaper.hpp>
 
 using namespace std;
 using namespace InferenceEngine;
@@ -364,31 +363,24 @@ size_t CNNNetworkImpl::getBatchSize() const noexcept {
 
 StatusCode CNNNetworkImpl::reshape(const std::map<std::string, std::vector<size_t>>& inputShapes,
                                    ResponseDesc* responseDesc) noexcept {
-    try {
-        if (!_reshaper) _reshaper = std::make_shared<ShapeInfer::Reshaper>(*this);
-        _reshaper->run(inputShapes);
-    } catch (const InferenceEngineException& e) {
-        return DescriptionBuffer(GENERAL_ERROR, responseDesc) << e.what();
-    } catch (const std::exception& e) {
-        return DescriptionBuffer(UNEXPECTED, responseDesc) << e.what();
-    } catch (...) {
-        return DescriptionBuffer(UNEXPECTED, responseDesc);
+    for (const auto& pair : _inputData) {
+        auto info = pair.second;
+        if (info) {
+            auto data = info->getInputData();
+            auto it = inputShapes.find(pair.first);
+            if (data && it != inputShapes.end()) {
+                auto newDims = it->second;
+                auto currentDims = data->getTensorDesc().getDims();
+                if (newDims != currentDims) {
+                    return DescriptionBuffer(NOT_IMPLEMENTED, responseDesc) <<
+                        "You have called setBatchSize + reshape for CNNNetwork object. Please, either: \n"
+                        "- [SUGGESTED] Regenerate IR with current version of Model Optimizer\n"
+                        "- [WORKAROUND] Call only reshape method where proper batch is already set\n";
+                }
+            }
+        }
     }
-    return OK;
-}
 
-StatusCode CNNNetworkImpl::AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension,
-                                        InferenceEngine::ResponseDesc* resp) noexcept {
-    try {
-        if (!_reshaper) _reshaper = std::make_shared<ShapeInfer::Reshaper>(*this);
-        _reshaper->AddExtension(extension);
-    } catch (const InferenceEngineException& e) {
-        return DescriptionBuffer(GENERAL_ERROR, resp) << e.what();
-    } catch (const std::exception& e) {
-        return DescriptionBuffer(UNEXPECTED, resp) << e.what();
-    } catch (...) {
-        return DescriptionBuffer(UNEXPECTED, resp);
-    }
     return OK;
 }
 
index f0401fe..dd27d57 100644 (file)
@@ -1105,10 +1105,6 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
         i.second->setLayout(thisInputData.getLayout());
         i.second->getPreProcess() = thisInputData.getPreProcess();
     }
-
-    for (const auto &ext : ::ngraph::op::GenericIE::getExtensions(graph)) {
-        cnnNetworkImpl->AddExtension(ext, nullptr);
-    }
 }
 
 std::shared_ptr<CNNNetworkImpl> convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function> &graph,
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_argmax_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_argmax_shape_infer.hpp
deleted file mode 100644 (file)
index 606a50d..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <legacy/ie_layers.h>
-
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for ArgMax layer
- */
-class ArgMaxShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit ArgMaxShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-        auto out_max_val = static_cast<size_t>(cnnLayer.GetParamAsInt("out_max_val", 0));
-        auto top_k = static_cast<size_t>(cnnLayer.GetParamAsInt("top_k", 0));
-        int axis = 0;
-        bool isValidAxis = true;
-        try {
-            axis = cnnLayer.GetParamAsInt("axis");
-        } catch (const details::InferenceEngineException& exception) {
-            isValidAxis = false;
-        }
-
-        auto firstInputShape = inShapes[0];
-        size_t num_top_axes = firstInputShape.size();
-        if (num_top_axes < 3) num_top_axes = 3;
-
-        SizeVector outputShape(num_top_axes, 1lu);
-        if (isValidAxis) {
-            if (axis < 0) {
-                axis = static_cast<int>(firstInputShape.size() + axis);
-            }
-            outputShape = firstInputShape;
-            outputShape[axis] = top_k;
-        } else {
-            outputShape[0] = firstInputShape[0];
-            outputShape[2] = top_k;
-            if (out_max_val) {
-                outputShape[1] = 2;
-            }
-        }
-        outShapes.push_back(outputShape);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_bin_conv_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_bin_conv_shape_infer.hpp
deleted file mode 100644 (file)
index 2d6d2b2..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <debug.h>
-
-#include <cmath>
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for BinaryConvolution layer
- */
-class BinConvShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit BinConvShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        BinaryConvolutionLayer binConvLayer(lp);
-        binConvLayer.params = params;
-        binConvLayer.type = _type;
-        validate(&binConvLayer, inBlobs, params, blobs);
-
-        auto dims = inShapes[0];
-        auto computeSpatialShape = [&](size_t inDim, int axis) {
-            size_t kernel = 0;
-            if (binConvLayer._dilation[axis])
-                kernel = (binConvLayer._kernel[axis] - 1) * binConvLayer._dilation[axis] + 1;
-            else
-                kernel = binConvLayer._kernel[axis];
-            size_t stride = binConvLayer._stride[axis];
-            size_t pad = binConvLayer._padding[axis];
-
-            float outDim;
-            std::string padType = binConvLayer._auto_pad;
-            if (padType == "valid") {
-                outDim = std::ceil((inDim - kernel + 1.f) / stride);
-            } else if (padType == "same_upper") {
-                outDim = std::ceil(1.f * inDim / stride);
-            } else if (padType == "same_lower") {
-                outDim = std::floor(1.f * inDim / stride);
-            } else {
-                int padEnd = binConvLayer._pads_end[axis];
-                outDim = std::floor(1.f * (inDim + pad + padEnd - kernel) / stride) + 1.f;
-            }
-
-            if (outDim < 0)
-                THROW_IE_EXCEPTION << "New shapes " << details::dumpVec(dims) << " make output shape negative";
-
-            return static_cast<size_t>(outDim);
-        };
-
-        size_t inputN = dims[0];
-        size_t OC = binConvLayer._out_depth;
-        SizeVector shapes;
-        shapes.push_back(inputN);
-        shapes.push_back(OC);
-        if (dims.size() == 5) shapes.push_back(computeSpatialShape(dims[dims.size() - 3], Z_AXIS));
-        shapes.push_back(computeSpatialShape(dims[dims.size() - 2], Y_AXIS));
-        shapes.push_back(computeSpatialShape(dims[dims.size() - 1], X_AXIS));
-        outShapes.push_back(shapes);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_broadcast_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_broadcast_shape_infer.hpp
deleted file mode 100644 (file)
index 8014fba..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-#include "precision_utils.h"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Broadcast layer
- */
-class BroadcastShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit BroadcastShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        BroadcastLayer broadcastLayer(lp);
-        broadcastLayer.params = params;
-        broadcastLayer.type = _type;
-        validate(&broadcastLayer, inBlobs, params, blobs);
-
-        SizeVector shapes;
-        if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::I32) {
-            auto* buffer = inBlobs[1]->cbuffer().as<int*>();
-            if (buffer != nullptr) {
-                shapes.assign(buffer, buffer + inBlobs[1]->size());
-            } else {
-                THROW_IE_EXCEPTION << "Second input must have allocated data";
-            }
-        } else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::FP32) {
-            auto* buffer = inBlobs[1]->cbuffer().as<float*>();
-            if (buffer != nullptr) {
-                for (int i = 0; i < inBlobs[1]->size(); i++) {
-                    shapes.push_back(static_cast<int>(buffer[i]));
-                }
-            } else {
-                THROW_IE_EXCEPTION << "Second input must have allocated data";
-            }
-        } else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::FP16) {
-            auto* buffer = inBlobs[1]->cbuffer().as<uint16_t*>();
-            if (buffer != nullptr) {
-                for (int i = 0; i < inBlobs[1]->size(); i++) {
-                    shapes.push_back(static_cast<int>(PrecisionUtils::f16tof32(buffer[i])));
-                }
-            }
-        } else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::I64) {
-            auto* buffer = inBlobs[1]->cbuffer().as<int64_t*>();
-            if (buffer != nullptr) {
-                shapes.assign(buffer, buffer + inBlobs[1]->size());
-            } else {
-                THROW_IE_EXCEPTION << "Second input must have allocated data";
-            }
-        } else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::U64) {
-            auto* buffer = inBlobs[1]->cbuffer().as<uint64_t*>();
-            if (buffer != nullptr) {
-                shapes.assign(buffer, buffer + inBlobs[1]->size());
-            } else {
-                THROW_IE_EXCEPTION << "Second input must have allocated data";
-            }
-        } else {
-            THROW_IE_EXCEPTION << "Second input must have I32 or FP32 or FP16 precision";
-        }
-
-        outShapes = {shapes};
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_bucketize_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_bucketize_shape_infer.hpp
deleted file mode 100644 (file)
index ea3c723..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include "ie_built_in_impl.hpp"
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
-*@brief Implementation of Shape inference for Bucketize layer
-*/
-class BucketizeShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit BucketizeShapeProp(const std::string& type) : BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
-        const std::map<std::string, std::string>& params,
-        const std::map<std::string, Blob::Ptr>& blobs,
-        std::vector<SizeVector>& outShapes) override {
-        LayerParams lp{};
-        BucketizeLayer bucketize_layer(lp);
-        bucketize_layer.params = params;
-        bucketize_layer.type = _type;
-        validate(&bucketize_layer, inBlobs, params, blobs);
-
-        // compute a number of outputs
-        size_t num_outputs = 1;
-
-        // reshape available outputs
-        outShapes.resize(num_outputs);
-        outShapes[0] = inShapes[0];
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_built_in_holder.cpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_built_in_holder.cpp
deleted file mode 100644 (file)
index 405906c..0000000
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <algorithm>
-#include <memory>
-#include <string>
-
-#include "legacy/shape_infer/built-in/ie_built_in_holder.hpp"
-#include "ie_argmax_shape_infer.hpp"
-#include "ie_bin_conv_shape_infer.hpp"
-#include "ie_broadcast_shape_infer.hpp"
-#include "ie_concat_shape_infer.hpp"
-#include "ie_conv_shape_infer.hpp"
-#include "ie_crop_shape_infer.hpp"
-#include "ie_ctc_greedy_decoder_shape_infer.hpp"
-#include "ie_deconv_shape_infer.hpp"
-#include "ie_deformable_conv_shape_infer.hpp"
-#include "ie_depth_to_space_shape_infer.hpp"
-#include "ie_detectionoutput_onnx_shape_infer.hpp"
-#include "ie_detection_output_shape_infer.hpp"
-#include "ie_eltwise_shape_infer.hpp"
-#include "ie_equal_shape_infer.hpp"
-#include "ie_erf_shape_infer.hpp"
-#include "ie_fill_shape_infer.hpp"
-#include "ie_flatten_shape_infer.hpp"
-#include "ie_gather_shape_infer.hpp"
-#include "ie_gather_tree_shape_infer.hpp"
-#include "ie_gemm_shape_infer.hpp"
-#include "ie_inner_product_shape_infer.hpp"
-#include "ie_interp_shape_infer.hpp"
-#include "ie_non_max_suppression_shape_infer.hpp"
-#include "ie_one_hot_shape_infer.hpp"
-#include "ie_pad_shape_infer.hpp"
-#include "ie_permute_shape_infer.hpp"
-#include "ie_pool_shape_infer.hpp"
-#include "ie_priorbox_clustered_shape_infer.hpp"
-#include "ie_priorbox_shape_infer.hpp"
-#include "ie_priorgridgenerator_onnx_shape_infer.hpp"
-#include "ie_proposal_onnx_shape_infer.hpp"
-#include "ie_proposal_shape_infer.hpp"
-#include "ie_psroi_pooling_shape_infer.hpp"
-#include "ie_quantize_shape_infer.hpp"
-#include "ie_range_shape_infer.hpp"
-#include "ie_reduce_shape_infer.hpp"
-#include "ie_region_yolo_shape_infer.hpp"
-#include "ie_reorg_yolo_shape_infer.hpp"
-#include "ie_resample_shape_infer.hpp"
-#include "ie_reshape_shape_infer.hpp"
-#include "ie_reverse_sequence_shape_infer.hpp"
-#include "ie_rnn_cell_shape_infer.hpp"
-#include "ie_rnn_shape_infer.hpp"
-#include "ie_roi_pooling_shape_infer.hpp"
-#include "ie_roifeatureextractor_onnx_shape_infer.hpp"
-#include "ie_scatter_shape_infer.hpp"
-#include "ie_select_shape_infer.hpp"
-#include "ie_shape_shape_infer.hpp"
-#include "ie_shuffle_channels_shape_infer.hpp"
-#include "ie_simpler_nms_shape_infer.hpp"
-#include "ie_space_to_depth_shape_infer.hpp"
-#include "ie_sparse_fill_empty_rows_shape_infer.hpp"
-#include "ie_sparse_segment_reduce_shape_infer.hpp"
-#include "ie_split_shape_infer.hpp"
-#include "ie_sparse_to_dense_shape_infer.hpp"
-#include "ie_bucketize_shape_infer.hpp"
-#include "ie_squeeze_shape_infer.hpp"
-#include "ie_sparse_weighted_reduce_shape_infer.hpp"
-#include "ie_strided_slice_shape_infer.hpp"
-#include "ie_tensor_iterator_shape_infer.hpp"
-#include "ie_tile_shape_infer.hpp"
-#include "ie_topk_shape_infer.hpp"
-#include "ie_topkrois_onnx_shape_infer.hpp"
-#include "ie_unique_shape_infer.hpp"
-#include "ie_unsqueeze_shape_infer.hpp"
-#include "ie_upsampling_shape_infer.hpp"
-#include "impl_register.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-BuiltInShapeInferHolder::ImplsHolder::Ptr BuiltInShapeInferHolder::GetImplsHolder() {
-    static ImplsHolder::Ptr localHolder;
-    if (localHolder == nullptr) {
-        localHolder = std::make_shared<ImplsHolder>();
-    }
-    return localHolder;
-}
-
-IE_SUPPRESS_DEPRECATED_START
-
-void BuiltInShapeInferHolder::AddImpl(const std::string& name, const IShapeInferImpl::Ptr& impl) {
-    GetImplsHolder()->list[name] = impl;
-}
-
-StatusCode BuiltInShapeInferHolder::getShapeInferTypes(char**& types, unsigned int& size, ResponseDesc* resp) noexcept {
-    auto& factories = GetImplsHolder()->list;
-    types = new char*[factories.size()];
-    size = 0;
-    for (auto it = factories.begin(); it != factories.end(); it++, size++) {
-        types[size] = new char[it->first.size() + 1];
-        std::copy(it->first.begin(), it->first.end(), types[size]);
-        types[size][it->first.size()] = '\0';
-    }
-    return OK;
-}
-
-StatusCode BuiltInShapeInferHolder::getShapeInferImpl(IShapeInferImpl::Ptr& impl, const char* type,
-                                                      ResponseDesc* resp) noexcept {
-    auto& impls = BuiltInShapeInferHolder::GetImplsHolder()->list;
-    if (impls.find(type) != impls.end()) {
-        impl = impls[type];
-        return OK;
-    }
-    impl.reset();
-    return NOT_FOUND;
-}
-
-IE_SUPPRESS_DEPRECATED_END
-
-// Register without implementation just to protect from adding custom implementation for them
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Input);
-REG_SHAPE_INFER_FOR_TYPE(DoNothingShapeProp, Output);
-REG_SHAPE_INFER_FOR_TYPE(MemoryShapeProp, Memory);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Const);
-
-// Outputs = Inputs
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Activation);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, ReLU);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, ReLU6);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, ELU);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, TanH);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Logistic);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Sigmoid);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, PReLU);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, SoftMax);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, LogSoftMax);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, LRN);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Norm);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Normalize);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Convert);
-// FIXME: Really Copy??? New MO doesn't generate this layer
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Copy);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Power);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, PowerFile);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, Clamp);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, ScaleShift);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, BatchNormalization);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, GRN);
-REG_SHAPE_INFER_FOR_TYPE(EqualShapeProp, MVN);
-
-REG_SHAPE_INFER_FOR_TYPE(ConvShapeProp, Convolution);
-REG_SHAPE_INFER_FOR_TYPE(DeconvShapeProp, Deconvolution);
-REG_SHAPE_INFER_FOR_TYPE(DeformableConvShapeProp, DeformableConvolution);
-REG_SHAPE_INFER_FOR_TYPE(PoolingShapeProp, Pooling);
-REG_SHAPE_INFER_FOR_TYPE(InnerProductShapeProp, InnerProduct);
-REG_SHAPE_INFER_FOR_TYPE(InnerProductShapeProp, FullyConnected);
-REG_SHAPE_INFER_FOR_TYPE(SplitShapeProp, Split);
-REG_SHAPE_INFER_FOR_TYPE(SplitShapeProp, Slice);
-REG_SHAPE_INFER_FOR_TYPE(PermuteShapeProp, Permute);
-REG_SHAPE_INFER_FOR_TYPE(FlattenShapeProp, Flatten);
-REG_SHAPE_INFER_FOR_TYPE(ReshapeShapeProp, Reshape);
-REG_SHAPE_INFER_FOR_TYPE(DetectionOutputShapeProp, DetectionOutput);
-REG_SHAPE_INFER_FOR_TYPE(PriorBoxClusteredShapeProp, PriorBoxClustered);
-REG_SHAPE_INFER_FOR_TYPE(PriorBoxShapeProp, PriorBox);
-REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronDetectionOutputShapeProp, ExperimentalDetectronDetectionOutput);
-REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronPriorGridGeneratorShapeProp, ExperimentalDetectronPriorGridGenerator);
-REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronGenerateProposalsSingleImageShapeProp, ExperimentalDetectronGenerateProposalsSingleImage);
-REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronROIFeatureExtractorShapeProp, ExperimentalDetectronROIFeatureExtractor);
-REG_SHAPE_INFER_FOR_TYPE(ExperimentalDetectronTopKROIsShapeProp, ExperimentalDetectronTopKROIs);
-REG_SHAPE_INFER_FOR_TYPE(RoiPoolingShapeProp, ROIPooling);
-REG_SHAPE_INFER_FOR_TYPE(PSRoiPoolingShapeProp, PSROIPooling);
-REG_SHAPE_INFER_FOR_TYPE(UpsamplingShapeProp, Upsampling);
-REG_SHAPE_INFER_FOR_TYPE(ResampleShapeProp, Resample);
-REG_SHAPE_INFER_FOR_TYPE(InterpShapeProp, Interp);
-REG_SHAPE_INFER_FOR_TYPE(SimplerNMSShapeProp, SimplerNMS);
-REG_SHAPE_INFER_FOR_TYPE(TileShapeProp, Tile);
-REG_SHAPE_INFER_FOR_TYPE(CropShapeProp, Crop);
-REG_SHAPE_INFER_FOR_TYPE(ConcatShapeProp, Concat);
-REG_SHAPE_INFER_FOR_TYPE(EltWiseShapeProp, Eltwise);
-REG_SHAPE_INFER_FOR_TYPE(EltWiseShapeProp, Mul);
-REG_SHAPE_INFER_FOR_TYPE(EltWiseShapeProp, Add);
-REG_SHAPE_INFER_FOR_TYPE(EltWiseShapeProp, Div);
-REG_SHAPE_INFER_FOR_TYPE(CTCGreedyDecoderShapeProp, CTCGreedyDecoder);
-REG_SHAPE_INFER_FOR_TYPE(ProposalShapeProp, Proposal);
-REG_SHAPE_INFER_FOR_TYPE(ReorgYoloShapeProp, ReorgYolo);
-REG_SHAPE_INFER_FOR_TYPE(RegionYoloShapeProp, RegionYolo);
-REG_SHAPE_INFER_FOR_TYPE(RNNShapeProp, RNNSequence);
-REG_SHAPE_INFER_FOR_TYPE(RNNShapeProp, GRUSequence);
-REG_SHAPE_INFER_FOR_TYPE(RNNShapeProp, LSTMSequence);
-REG_SHAPE_INFER_FOR_TYPE(RNNCellShapeProp, RNNCell);
-REG_SHAPE_INFER_FOR_TYPE(GRUCellShapeProp, GRUCell);
-REG_SHAPE_INFER_FOR_TYPE(LSTMCellShapeProp, LSTMCell);
-REG_SHAPE_INFER_FOR_TYPE(TensorIteratorShapeProp, TensorIterator);
-REG_SHAPE_INFER_FOR_TYPE(ArgMaxShapeProp, ArgMax);
-REG_SHAPE_INFER_FOR_TYPE(GemmShapeProp, Gemm);
-REG_SHAPE_INFER_FOR_TYPE(PadShapeProp, Pad);
-REG_SHAPE_INFER_FOR_TYPE(GatherShapeProp, Gather);
-REG_SHAPE_INFER_FOR_TYPE(StridedSliceShapeProp, StridedSlice);
-REG_SHAPE_INFER_FOR_TYPE(ShuffleChannelsShapeProp, ShuffleChannels);
-REG_SHAPE_INFER_FOR_TYPE(DepthToSpaceShapeProp, DepthToSpace);
-REG_SHAPE_INFER_FOR_TYPE(SpaceToDepthShapeProp, SpaceToDepth);
-REG_SHAPE_INFER_FOR_TYPE(SparseFillEmptyRowsShapeProp, SparseFillEmptyRows);
-REG_SHAPE_INFER_FOR_TYPE(SparseSegmentReduceShapeProp, SparseSegmentMean);
-REG_SHAPE_INFER_FOR_TYPE(SparseSegmentReduceShapeProp, SparseSegmentSqrtN);
-REG_SHAPE_INFER_FOR_TYPE(SparseSegmentReduceShapeProp, SparseSegmentSum);
-REG_SHAPE_INFER_FOR_TYPE(ExperimentalSparseWeightedReduceShapeProp, ExperimentalSparseWeightedSum);
-REG_SHAPE_INFER_FOR_TYPE(SparseToDenseShapeProp, SparseToDense);
-REG_SHAPE_INFER_FOR_TYPE(BucketizeShapeProp, Bucketize);
-REG_SHAPE_INFER_FOR_TYPE(ReverseSequenceShapeProp, ReverseSequence);
-REG_SHAPE_INFER_FOR_TYPE(SelectShapeProp, Select);
-REG_SHAPE_INFER_FOR_TYPE(SqueezeShapeProp, Squeeze);
-REG_SHAPE_INFER_FOR_TYPE(UnsqueezeShapeProp, Unsqueeze);
-REG_SHAPE_INFER_FOR_TYPE(RangeShapeProp, Range);
-REG_SHAPE_INFER_FOR_TYPE(FillShapeProp, Fill);
-REG_SHAPE_INFER_FOR_TYPE(BroadcastShapeProp, Broadcast);
-REG_SHAPE_INFER_FOR_TYPE(ShapeShapeProp, Shape);
-REG_SHAPE_INFER_FOR_TYPE(OneHotShapeProp, OneHot);
-REG_SHAPE_INFER_FOR_TYPE(QuantizeShapeProp, FakeQuantize);
-REG_SHAPE_INFER_FOR_TYPE(BinConvShapeProp, BinaryConvolution);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Abs);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Acos);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Acosh);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Asin);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Asinh);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Atan);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Atanh);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Ceil);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Cos);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Cosh);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Erf);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Floor);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, HardSigmoid);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Log);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Exp);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Neg);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Reciprocal);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Selu);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Sign);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Sin);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Sinh);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Softplus);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Softsign);
-REG_SHAPE_INFER_FOR_TYPE(MathShapeProp, Tan);
-REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceAnd);
-REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceL1);
-REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceL2);
-REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceLogSum);
-REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceLogSumExp);
-REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceMax);
-REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceMean);
-REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceMin);
-REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceOr);
-REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceProd);
-REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceSum);
-REG_SHAPE_INFER_FOR_TYPE(ReduceShapeProp, ReduceSumSquare);
-REG_SHAPE_INFER_FOR_TYPE(GatherTreeShapeProp, GatherTree);
-REG_SHAPE_INFER_FOR_TYPE(TopKShapeProp, TopK);
-REG_SHAPE_INFER_FOR_TYPE(UniqueShapeProp, Unique);
-REG_SHAPE_INFER_FOR_TYPE(NMSShapeProp, NonMaxSuppression);
-REG_SHAPE_INFER_FOR_TYPE(ScatterUpdateShapeProp, ScatterUpdate);
-REG_SHAPE_INFER_FOR_TYPE(ScatterElementsUpdateShapeProp, ScatterElementsUpdate);
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_built_in_impl.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_built_in_impl.hpp
deleted file mode 100644 (file)
index 695bfe8..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_iextension.h>
-#include <legacy/ie_layers.h>
-
-#include <description_buffer.hpp>
-#include <ie_layer_validators.hpp>
-#include <list>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-IE_SUPPRESS_DEPRECATED_START
-
-/**
- *@brief Base class for all built-in shape infer implementations. Contains common logic with validators and errors
- *handling
- */
-class BuiltInShapeInferImpl : public IShapeInferImpl {
-public:
-    explicit BuiltInShapeInferImpl(const std::string& type): _type(type) {
-        _validator = details::LayerValidators::getInstance()->getValidator(_type);
-        if (!_validator)
-            THROW_IE_EXCEPTION << "Internal error: failed to find validator for layer with type: " << _type;
-    }
-
-    void validate(CNNLayer* layer, const std::vector<Blob::CPtr>& inBlobs,
-                  const std::map<std::string, std::string>& params, const std::map<std::string, Blob::Ptr>& blobs) {
-        _validator->parseParams(layer);
-    }
-
-    virtual void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
-                                 const std::map<std::string, std::string>& params,
-                                 const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) = 0;
-
-    StatusCode inferShapes(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                           const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes,
-                           ResponseDesc* resp) noexcept override {
-        inShapes.clear();
-        for (const auto& blob : inBlobs) {
-            inShapes.push_back(blob->getTensorDesc().getDims());
-        }
-        outShapes.clear();
-        try {
-            inferShapesImpl(inBlobs, params, blobs, outShapes);
-            return OK;
-        } catch (const std::exception& ex) {
-            return InferenceEngine::DescriptionBuffer(GENERAL_ERROR, resp) << ex.what();
-        } catch (...) {
-            return InferenceEngine::DescriptionBuffer(UNEXPECTED) << "Unknown error";
-        }
-    }
-
-protected:
-    std::string _type;
-    details::LayerValidator::Ptr _validator;
-    std::vector<SizeVector> inShapes;
-};
-
-IE_SUPPRESS_DEPRECATED_END
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_concat_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_concat_shape_infer.hpp
deleted file mode 100644 (file)
index c04a6e8..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Concat layer
- */
-class ConcatShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit ConcatShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        ConcatLayer concatLayer(lp);
-        concatLayer.params = params;
-        concatLayer.type = _type;
-        validate(&concatLayer, inBlobs, params, blobs);
-
-        size_t sum(0);
-        size_t axis = concatLayer._axis;
-        outShapes.push_back(inShapes[0]);
-        for (const auto& inShape : inShapes) {
-            if (axis >= inShape.size()) THROW_IE_EXCEPTION << "Axis can't be more then number of input shapes";
-            sum += inShape[axis];
-        }
-        outShapes[0][axis] = sum;
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_conv_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_conv_shape_infer.hpp
deleted file mode 100644 (file)
index 0125ffd..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <cmath>
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Convolution layer
- */
-class ConvShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit ConvShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        ConvolutionLayer convLayer(lp);
-        convLayer.params = params;
-        convLayer.type = _type;
-        validate(&convLayer, inBlobs, params, blobs);
-
-        auto dims = inShapes[0];
-        auto dims_size = dims.size();
-        auto spacial_d_size = dims.size() - 2;
-        float* OD_temp = new float[spacial_d_size];
-        size_t* KDims = new size_t[spacial_d_size];
-        size_t inputN = dims[0];
-        for (int i = 0; i < spacial_d_size; i++) {
-            if (convLayer._dilation[i])
-                KDims[i] = (convLayer._kernel[i] - 1) * convLayer._dilation[i] + 1;
-            else
-                KDims[i] = convLayer._kernel[i];
-        }
-        size_t OC = convLayer._out_depth;
-        std::string padType = convLayer._auto_pad;
-        if (padType == "valid") {
-            for (int i = 0; i < spacial_d_size; i++)
-                OD_temp[i] = std::ceil((dims[dims_size - 1 - i] - KDims[i] + 1.f) / convLayer._stride[i]);
-        } else if (padType == "same_upper") {
-            for (int i = 0; i < spacial_d_size; i++)
-                OD_temp[i] = std::ceil(1.f * dims[dims_size - 1 - i] / convLayer._stride[i]);
-        } else if (padType == "same_lower") {
-            for (int i = 0; i < spacial_d_size; i++)
-                OD_temp[i] = std::floor(1.f * dims[dims_size - 1 - i] / convLayer._stride[i]);
-        } else {
-            for (int i = 0; i < spacial_d_size; i++) {
-                OD_temp[i] =
-                    std::floor(1.f *
-                               (dims[dims_size - 1 - i] + convLayer._padding[i] + convLayer._pads_end[i] - KDims[i]) /
-                               convLayer._stride[i]) +
-                    1.f;
-            }
-        }
-
-        for (int i = 0; i < spacial_d_size; i++)
-            if (OD_temp[i] < 0)
-                THROW_IE_EXCEPTION << "New shapes " << details::dumpVec(dims) << " make output shape negative";
-
-        SizeVector outShape = {inputN, OC};
-        for (int i = spacial_d_size - 1; i >= 0; i--) outShape.push_back(static_cast<size_t>(OD_temp[i]));
-
-        outShapes.push_back(outShape);
-
-        delete[] OD_temp;
-        delete[] KDims;
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_crop_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_crop_shape_infer.hpp
deleted file mode 100644 (file)
index 0ff6f49..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Crop layer
- */
-class CropShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit CropShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CropLayer cropLayer(lp);
-        cropLayer.params = params;
-        cropLayer.type = _type;
-        validate(&cropLayer, inBlobs, params, blobs);
-
-        outShapes.push_back(inShapes[0]);
-        if (inShapes.size() == 2) {
-            SizeVector cropShapes = inShapes[1];
-            for (int axis : cropLayer.axis) {
-                outShapes[0][axis] = cropShapes[axis];
-            }
-        } else {
-            std::vector<int> crop_end;
-            bool isDim = cropLayer.params.find("dim") != cropLayer.params.end();
-            if (!isDim) crop_end = cropLayer.GetParamAsInts("crop_end");
-            for (size_t i = 0; i < cropLayer.axis.size(); i++) {
-                outShapes[0][cropLayer.axis[i]] =
-                    isDim ? cropLayer.dim[i] : inShapes[0][cropLayer.axis[i]] - cropLayer.offset[i] - crop_end[i];
-            }
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_ctc_greedy_decoder_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_ctc_greedy_decoder_shape_infer.hpp
deleted file mode 100644 (file)
index 5950e82..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for CTCGreedyDecoder layer
- */
-class CTCGreedyDecoderShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit CTCGreedyDecoderShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        outShapes.clear();
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-
-        outShapes.push_back({inShapes[0][1], inShapes[0][0], 1, 1});
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_deconv_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_deconv_shape_infer.hpp
deleted file mode 100644 (file)
index 1f7ea56..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Deconvolution layer
- */
-class DeconvShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit DeconvShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        DeconvolutionLayer deconvLayer(lp);
-        deconvLayer.params = params;
-        deconvLayer.type = _type;
-        validate(&deconvLayer, inBlobs, params, blobs);
-
-        auto dims = inShapes[0];
-        auto dims_size = dims.size();
-        auto spacial_d_size = dims.size() - 2;
-        float* OD_temp = new float[spacial_d_size];
-        size_t* KDims = new size_t[spacial_d_size];
-        size_t inputN = dims[0];
-        for (int i = 0; i < spacial_d_size; i++) {
-            if (deconvLayer._dilation[i])
-                KDims[i] = (deconvLayer._kernel[i] - 1) * deconvLayer._dilation[i] + 1;
-            else
-                KDims[i] = deconvLayer._kernel[i];
-        }
-        size_t OC = deconvLayer._out_depth;
-        std::string padType = deconvLayer._auto_pad;
-        if (padType == "valid") {
-            for (int i = 0; i < spacial_d_size; i++)
-                OD_temp[i] = (dims[dims_size - 1 - i] - 1) * deconvLayer._stride[i] + KDims[i];
-        } else if ((padType == "same_upper") || (padType == "same_lower")) {
-            for (int i = 0; i < spacial_d_size; i++) OD_temp[i] = dims[dims_size - 1 - i] * deconvLayer._stride[i];
-        } else {
-            for (int i = 0; i < spacial_d_size; i++)
-                OD_temp[i] = deconvLayer._stride[i] * (dims[dims_size - 1 - i] - 1) + KDims[i] -
-                             deconvLayer._padding[i] - deconvLayer._pads_end[i];
-        }
-        for (int i = 0; i < spacial_d_size; i++)
-            if (OD_temp[i] < 0)
-                THROW_IE_EXCEPTION << "New shapes " << details::dumpVec(dims) << " make output shape negative";
-
-        SizeVector outShape = {inputN, OC};
-        for (int i = spacial_d_size - 1; i >= 0; i--) outShape.push_back(static_cast<size_t>(OD_temp[i]));
-
-        outShapes.emplace_back(outShape);
-
-        delete[] OD_temp;
-        delete[] KDims;
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_deformable_conv_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_deformable_conv_shape_infer.hpp
deleted file mode 100644 (file)
index a1fafcb..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <cmath>
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Deformable Convolution layer
- */
-class DeformableConvShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit DeformableConvShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        DeformableConvolutionLayer deformableConvLayer(lp);
-        deformableConvLayer.params = params;
-        deformableConvLayer.type = _type;
-        validate(&deformableConvLayer, inBlobs, params, blobs);
-
-        auto dims = inShapes[0];
-        auto dims_size = dims.size();
-        auto spacial_d_size = dims.size() - 2;
-        std::vector<float> OD_temp(spacial_d_size);
-        std::vector<size_t> KDims(spacial_d_size);
-        size_t inputN = dims[0];
-        for (int i = 0; i < spacial_d_size; i++) {
-            if (deformableConvLayer._dilation[i])
-                KDims[i] = (deformableConvLayer._kernel[i] - 1) * deformableConvLayer._dilation[i] + 1;
-            else
-                KDims[i] = deformableConvLayer._kernel[i];
-        }
-        size_t OC = deformableConvLayer._out_depth;
-        std::string padType = deformableConvLayer._auto_pad;
-        if (padType == "valid") {
-            for (int i = 0; i < spacial_d_size; i++)
-                OD_temp[i] = std::ceil((dims[dims_size - 1 - i] - KDims[i] + 1.f) / deformableConvLayer._stride[i]);
-        } else if (padType == "same_upper") {
-            for (int i = 0; i < spacial_d_size; i++)
-                OD_temp[i] = std::ceil(1.f * dims[dims_size - 1 - i] / deformableConvLayer._stride[i]);
-        } else if (padType == "same_lower") {
-            for (int i = 0; i < spacial_d_size; i++)
-                OD_temp[i] = std::floor(1.f * dims[dims_size - 1 - i] / deformableConvLayer._stride[i]);
-        } else {
-            for (int i = 0; i < spacial_d_size; i++)
-                OD_temp[i] = std::floor(1.f *
-                                        (dims[dims_size - 1 - i] + deformableConvLayer._padding[i] +
-                                         deformableConvLayer._pads_end[i] - KDims[i]) /
-                                        deformableConvLayer._stride[i]) +
-                             1.f;
-        }
-        for (int i = 0; i < spacial_d_size; i++)
-            if (OD_temp[i] < 0)
-                THROW_IE_EXCEPTION << "New shapes " << details::dumpVec(dims) << " make output shape negative";
-
-        SizeVector outShape = {inputN, OC};
-        for (int i = spacial_d_size - 1; i >= 0; i--) outShape.push_back(static_cast<size_t>(OD_temp[i]));
-
-        outShapes.emplace_back(outShape);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_depth_to_space_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_depth_to_space_shape_infer.hpp
deleted file mode 100644 (file)
index 48e3eeb..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for DepthToSpace layer
- */
-class DepthToSpaceShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit DepthToSpaceShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        DepthToSpaceLayer depthToSpaceLayer(lp);
-        depthToSpaceLayer.params = params;
-        depthToSpaceLayer.type = _type;
-        validate(&depthToSpaceLayer, inBlobs, params, blobs);
-
-        unsigned int block_size = depthToSpaceLayer.block_size;
-        outShapes = {inShapes[0]};
-
-        outShapes[0][outShapes[0].size() - 1] = inShapes[0][inShapes[0].size() - 1] * block_size;
-        outShapes[0][outShapes[0].size() - 2] = inShapes[0][inShapes[0].size() - 2] * block_size;
-        outShapes[0][outShapes[0].size() - 3] = inShapes[0][inShapes[0].size() - 3] / block_size / block_size;
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_detection_output_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_detection_output_shape_infer.hpp
deleted file mode 100644 (file)
index 5bb0d0b..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <legacy/ie_layers.h>
-
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for DetectionOutput layer
- */
-class DetectionOutputShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit DetectionOutputShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-
-        int top_k = cnnLayer.GetParamAsInt("keep_top_k");
-        outShapes.push_back({1, 1, static_cast<size_t>(top_k) * inShapes[0][0], 7});
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_eltwise_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_eltwise_shape_infer.hpp
deleted file mode 100644 (file)
index 7f10818..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <algorithm>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for EltWise layer
- */
-class EltWiseShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit EltWiseShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        EltwiseLayer eltwiseLayer(lp);
-        eltwiseLayer.params = params;
-        eltwiseLayer.type = _type;
-        validate(&eltwiseLayer, inBlobs, params, blobs);
-
-        if (inShapes.size() == 1) {
-            outShapes.push_back(inShapes[0]);
-        } else {
-            SizeVector outShape((std::max)(inShapes[0], inShapes[1]));
-            for (size_t ind = 0; ind < outShape.size(); ++ind) {
-                if (ind < inShapes[0].size() && ind < inShapes[1].size()) {
-                    outShape[ind] = (std::max)(inShapes[0][ind], inShapes[1][ind]);
-                } else if (ind >= inShapes[0].size()) {
-                    outShape[ind] = inShapes[1][ind];
-                } else {
-                    outShape[ind] = inShapes[0][ind];
-                }
-            }
-            outShapes.push_back(outShape);
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_equal_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_equal_shape_infer.hpp
deleted file mode 100644 (file)
index 6916318..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference that just assign input shapes to output shapes
- */
-class EqualShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit EqualShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        outShapes = inShapes;
-    }
-};
-
-class DoNothingShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit DoNothingShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {}
-};
-
-class MemoryShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit MemoryShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        std::stringstream ss;
-        ss.str(params.at("index"));
-        int idx;
-        ss >> idx;
-        //
-        if (idx == 1) {
-            outShapes = inShapes;
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_erf_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_erf_shape_infer.hpp
deleted file mode 100644 (file)
index 6521b88..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Math layers
- */
-class MathShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit MathShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        MathLayer mathLayer(lp);
-        mathLayer.params = params;
-        mathLayer.type = _type;
-        validate(&mathLayer, inBlobs, params, blobs);
-
-        outShapes = {inShapes[0]};
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_fill_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_fill_shape_infer.hpp
deleted file mode 100644 (file)
index bc2ad76..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Fill layer
- */
-class FillShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit FillShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        FillLayer fillLayer(lp);
-        fillLayer.params = params;
-        fillLayer.type = _type;
-        validate(&fillLayer, inBlobs, params, blobs);
-
-        auto dimsBlob = *inBlobs.begin();
-        SizeVector shape;
-        SizeVector dims = dimsBlob->getTensorDesc().getDims();
-        auto* buffer = dimsBlob->cbuffer().as<int32_t*>();
-        if (!buffer || dimsBlob->getTensorDesc().getPrecision() != Precision::I32)
-            THROW_IE_EXCEPTION << " Fill dimensions vector should be I32!";
-
-        for (int i = 0; i < dimsBlob->size(); i++) {
-            shape.push_back(buffer[i]);
-        }
-        outShapes = {shape};
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_flatten_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_flatten_shape_infer.hpp
deleted file mode 100644 (file)
index 44cc2f2..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <legacy/ie_layers.h>
-
-#include <description_buffer.hpp>
-#include <functional>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Reshape layer
- */
-class FlattenShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit FlattenShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        ReshapeLayer reshapeLayer(lp);
-        reshapeLayer.params = params;
-        reshapeLayer.type = _type;
-        validate(&reshapeLayer, inBlobs, params, blobs);
-
-        auto inputShape = inShapes[0];
-        size_t inputShapeTotal = std::accumulate(inputShape.begin(), inputShape.end(), 1lu, std::multiplies<size_t>());
-        SizeVector outShape;
-
-        int numAxes = reshapeLayer.num_axes;
-        int axis = reshapeLayer.axis;
-        size_t notFlatten = 1;
-        if (numAxes == -1 && axis == 0) {
-            outShape = {inputShapeTotal};
-        } else {
-            if (axis > 0) {
-                for (int i = 0; i < axis; i++) {
-                    notFlatten *= inputShape[i];
-                    outShape.push_back(inputShape[i]);
-                }
-            }
-            outShape.push_back(1);
-            if (numAxes > 0) {
-                for (int i = numAxes + 1; i < inputShape.size(); i++) {
-                    notFlatten *= inputShape[i];
-                    outShape.push_back(inputShape[i]);
-                }
-            }
-            outShape[axis] = inputShapeTotal / notFlatten;
-        }
-
-        outShapes.emplace_back(outShape);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_gather_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_gather_shape_infer.hpp
deleted file mode 100644 (file)
index b8a3da3..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Gather layer
- */
-class GatherShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit GatherShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        GatherLayer gatherLayer(lp);
-        gatherLayer.params = params;
-        gatherLayer.type = _type;
-        validate(&gatherLayer, inBlobs, params, blobs);
-
-        int axis = gatherLayer.axis;
-        if (axis < 0) axis += inShapes[0].size();
-
-        outShapes.resize(1);
-        outShapes[0].resize(inShapes[0].size() + inShapes[1].size() - 1);
-        for (int i = 0; i < axis; i++) outShapes[0][i] = inShapes[0][i];
-
-        for (size_t i = 0; i < inShapes[1].size(); i++) outShapes[0][i + axis] = inShapes[1][i];
-
-        for (size_t i = axis + 1; i < inShapes[0].size(); i++)
-            outShapes[0][i + inShapes[1].size() - 1] = inShapes[0][i];
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_gather_tree_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_gather_tree_shape_infer.hpp
deleted file mode 100644 (file)
index 2591fe0..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for GatherTree layer
- */
-class GatherTreeShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit GatherTreeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        GatherLayer gatherLayer(lp);
-        gatherLayer.params = params;
-        gatherLayer.type = _type;
-        validate(&gatherLayer, inBlobs, params, blobs);
-
-        outShapes.resize(1, inShapes[0]);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_gemm_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_gemm_shape_infer.hpp
deleted file mode 100644 (file)
index 766369a..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <algorithm>
-#include <cmath>
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Gemm layer
- */
-class GemmShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit GemmShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        // TODO: primitive does not support 5D tensor yet
-        LayerParams lp {};
-        GemmLayer gemmLayer(lp);
-        gemmLayer.params = params;
-        gemmLayer.type = _type;
-        validate(&gemmLayer, inBlobs, params, blobs);
-
-        auto dims0 = inShapes[0];
-        auto dims1 = inShapes[1];
-
-        SizeVector shapes;
-        for (int idx = 0; idx < dims0.size() - 2; idx++) {
-            unsigned long max_dim = dims0[idx] > dims1[idx] ? dims0[idx] : dims1[idx];
-
-            if (inShapes.size() == 3) {
-                auto dims2 = inShapes[2];
-                max_dim = max_dim > dims2[idx] ? max_dim : dims2[idx];
-            }
-
-            shapes.push_back(max_dim);
-        }
-
-        unsigned long xAxis = gemmLayer.transpose_a ? dims0.size() - 2 : dims0.size() - 1;
-        unsigned long yAxis = gemmLayer.transpose_b ? dims1.size() - 1 : dims1.size() - 2;
-
-        shapes.push_back(dims0[yAxis]);
-        shapes.push_back(dims1[xAxis]);
-        outShapes.push_back(shapes);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_inner_product_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_inner_product_shape_infer.hpp
deleted file mode 100644 (file)
index 0798a19..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-class InnerProductShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit InnerProductShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        FullyConnectedLayer fcLayer(lp);
-        fcLayer.params = params;
-        fcLayer.type = _type;
-        validate(&fcLayer, inBlobs, params, blobs);
-        size_t OC, ON;
-        ON = inShapes[0][0];
-        OC = fcLayer._out_num;
-        outShapes.emplace_back(std::initializer_list<size_t> {ON, OC});
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_interp_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_interp_shape_infer.hpp
deleted file mode 100644 (file)
index 21498d5..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <legacy/ie_layers.h>
-
-#include <description_buffer.hpp>
-#include <limits>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Interp layer
- */
-class InterpShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit InterpShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-        SizeVector outShape;
-        if (inBlobs.size() == 2) {
-            auto* buffer = inBlobs[1]->cbuffer().as<float*>();
-            if (buffer != nullptr) {
-                for (int i = 0; i < inBlobs[1]->size(); i++) {
-                    outShape.push_back(static_cast<unsigned long>(buffer[i]));
-                }
-            } else {
-                THROW_IE_EXCEPTION << "Second input must have allocated data";
-            }
-        } else {
-            auto factor = cnnLayer.GetParamAsFloat("factor", 0);
-            auto shrink_factor = cnnLayer.GetParamAsFloat("shrink_factor", 0);
-            auto zoom_factor = cnnLayer.GetParamAsFloat("zoom_factor", 0);
-            auto height = static_cast<size_t>(cnnLayer.GetParamAsInt("height", 0));
-            auto width = static_cast<size_t>(cnnLayer.GetParamAsInt("width", 0));
-
-            auto IS_ZERO = [](float value) {
-                return std::fabs(value) < std::numeric_limits<float>::epsilon();
-            };
-
-            bool noFactor = IS_ZERO(zoom_factor) && IS_ZERO(shrink_factor) && IS_ZERO(factor);
-
-            size_t N, C, H, W;
-            N = inShapes[0][0];
-            C = inShapes[0][1];
-            H = inShapes[0][2];
-            W = inShapes[0][3];
-
-            auto SETW = [&width, &W](size_t value) {
-                if (width) {
-                    W = width;
-                } else {
-                    W = value;
-                }
-            };
-
-            auto SETH = [&height, &H](size_t value) {
-                if (height) {
-                    H = height;
-                } else {
-                    H = value;
-                }
-            };
-
-            if (noFactor) {
-                SETW(width);
-                SETH(height);
-            } else {
-                float actualFactor = factor;
-                if (!IS_ZERO(shrink_factor) || !IS_ZERO(zoom_factor)) {
-                    if (!IS_ZERO(zoom_factor)) actualFactor = zoom_factor;
-                    if (!IS_ZERO(shrink_factor)) actualFactor /= shrink_factor;
-                }
-                SETW(W * actualFactor);
-                SETH(H * actualFactor);
-            }
-            outShape = {N, C, H, W};
-        }
-        outShapes.push_back(outShape);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_non_max_suppression_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_non_max_suppression_shape_infer.hpp
deleted file mode 100644 (file)
index 4f22ad9..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for NonMaxSuppression layer
- */
-class NMSShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit NMSShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        NonMaxSuppressionLayer nmsLayer(lp);
-        nmsLayer.params = params;
-        nmsLayer.type = _type;
-        validate(&nmsLayer, inBlobs, params, blobs);
-
-        outShapes.push_back({inShapes[1][0] * inShapes[1][1] * inShapes[1][2], 3});
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_one_hot_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_one_hot_shape_infer.hpp
deleted file mode 100644 (file)
index 0d0a524..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for the OneHot layer
- */
-class OneHotShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit OneHotShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlob, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        OneHotLayer oneHotLayer(lp);
-        oneHotLayer.params = params;
-        oneHotLayer.type = _type;
-        validate(&oneHotLayer, inBlob, params, blobs);
-        auto& inShape = inShapes[0];
-        SizeVector outShape;
-        auto actual_axis = (oneHotLayer.axis == -1) ? inShape.size() : oneHotLayer.axis;
-        for (std::size_t idx = 0; idx < inShape.size() + 1; ++idx) {
-            if (idx < actual_axis)
-                outShape.push_back(inShape[idx]);
-            else if (idx == actual_axis)
-                outShape.push_back(oneHotLayer.depth);
-            else
-                outShape.push_back(inShape[idx - 1]);
-        }
-        outShapes.push_back(outShape);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
\ No newline at end of file
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_pad_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_pad_shape_infer.hpp
deleted file mode 100644 (file)
index 778f281..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Pad layer
- */
-class PadShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit PadShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        PadLayer padLayer(lp);
-        padLayer.params = params;
-        padLayer.type = _type;
-        validate(&padLayer, inBlobs, params, blobs);
-
-        outShapes.push_back(inShapes[0]);
-        for (size_t i = 0; i < outShapes[0].size(); i++) {
-            outShapes[0][i] += padLayer.pads_begin[i] + padLayer.pads_end[i];
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_permute_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_permute_shape_infer.hpp
deleted file mode 100644 (file)
index 7d3737c..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <legacy/ie_layers.h>
-
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Permute layer
- */
-class PermuteShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit PermuteShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer permuteLayer(lp);
-        permuteLayer.params = params;
-        permuteLayer.type = _type;
-        validate(&permuteLayer, inBlobs, params, blobs);
-
-        std::vector<size_t> order;
-        std::vector<int> layerOrder = permuteLayer.GetParamAsInts("order");
-        for (auto ord : layerOrder) order.push_back(static_cast<size_t>(ord));
-
-        SizeVector outShape;
-        for (size_t i = 0; i < inShapes[0].size(); i++) {
-            outShape.push_back(inShapes[0][order[i]]);
-        }
-        outShapes.emplace_back(outShape);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_pool_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_pool_shape_infer.hpp
deleted file mode 100644 (file)
index f313213..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <cmath>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Pooling layer
- */
-class PoolingShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit PoolingShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        PoolingLayer poolLayer(lp);
-        poolLayer.params = params;
-        poolLayer.type = _type;
-        validate(&poolLayer, inBlobs, params, blobs);
-
-        auto dims = inShapes[0];
-        auto dims_size = dims.size();
-        auto spacial_d_size = dims.size() - 2;
-        float* OD_temp = new float[spacial_d_size];
-        for (int i = 0; i < spacial_d_size; i++) OD_temp[i] = 1.f;
-        size_t inputN = dims[0];
-        size_t IC = dims[1];
-
-        std::string padType = poolLayer._auto_pad;
-        if (padType == "valid") {
-            for (int i = 0; i < spacial_d_size; i++)
-                OD_temp[i] = std::ceil((dims[dims_size - 1 - i] - poolLayer._kernel[i] + 1.f) / poolLayer._stride[i]);
-        } else if (padType == "same_upper") {
-            for (int i = 0; i < spacial_d_size; i++)
-                OD_temp[i] = std::ceil(1.f * dims[dims_size - 1 - i] / poolLayer._stride[i]);
-        } else if (padType == "same_lower") {
-            for (int i = 0; i < spacial_d_size; i++)
-                OD_temp[i] = std::floor(1.f * dims[dims_size - 1 - i] / poolLayer._stride[i]);
-        } else {
-            auto it = std::find_if(poolLayer.params.begin(), poolLayer.params.end(),
-                                   [](decltype(*poolLayer.params.begin())& lhs) {
-                                       return lhs.first == "rounding-type" || lhs.first == "rounding_type";
-                                   });
-            bool isCeil = true;
-            if (it != poolLayer.params.end()) {
-                if (it->second == "floor") isCeil = false;
-            }
-            for (int i = 0; i < spacial_d_size; i++)
-                OD_temp[i] +=
-                    1.f *
-                    (dims[dims_size - 1 - i] + poolLayer._padding[i] + poolLayer._pads_end[i] - poolLayer._kernel[i]) /
-                    poolLayer._stride[i];
-            if (isCeil) {
-                for (int i = 0; i < spacial_d_size; i++) OD_temp[i] = std::ceil(OD_temp[i]);
-            } else {
-                for (int i = 0; i < spacial_d_size; i++) OD_temp[i] = std::floor(OD_temp[i]);
-            }
-            for (int i = 0; i < spacial_d_size; i++)
-                if ((OD_temp[i] - 1) * poolLayer._stride[i] >= dims[dims_size - 1 - i] + poolLayer._padding[i])
-                    --OD_temp[i];
-        }
-        for (int i = 0; i < spacial_d_size; i++)
-            if (OD_temp[i] < 0)
-                THROW_IE_EXCEPTION << "New shapes " << details::dumpVec(dims) << " make output shape negative";
-
-        SizeVector outShape = {inputN, IC};
-        for (int i = spacial_d_size - 1; i >= 0; i--) outShape.push_back(static_cast<size_t>(OD_temp[i]));
-
-        outShapes.emplace_back(outShape);
-
-        delete[] OD_temp;
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_priorbox_clustered_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_priorbox_clustered_shape_infer.hpp
deleted file mode 100644 (file)
index 5348f8b..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <legacy/ie_layers.h>
-
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for PriorBoxClustered layer
- */
-class PriorBoxClusteredShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit PriorBoxClusteredShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-        std::vector<float> widths = cnnLayer.GetParamAsFloats("width", {});
-        size_t res_prod = widths.size() * 4;
-        for (int i = 2; i < inShapes[0].size(); i++) res_prod *= inShapes[0][i];
-        outShapes.push_back({1, 2, res_prod});
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_priorbox_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_priorbox_shape_infer.hpp
deleted file mode 100644 (file)
index 027d9bc..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <legacy/ie_layers.h>
-
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for PriorBox layer
- */
-class PriorBoxShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit PriorBoxShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-        std::vector<float> min_sizes = cnnLayer.GetParamAsFloats("min_size", {});
-        std::vector<float> max_sizes = cnnLayer.GetParamAsFloats("max_size", {});
-        bool flip = static_cast<bool>(cnnLayer.GetParamAsInt("flip"));
-        const std::vector<float> aspect_ratios = cnnLayer.GetParamAsFloats("aspect_ratio", {});
-        size_t num_priors = 0;
-
-        bool scale_all_sizes = static_cast<bool>(cnnLayer.GetParamAsInt("scale_all_sizes", 1));
-
-        if (scale_all_sizes) {
-            num_priors = ((flip ? 2 : 1) * aspect_ratios.size() + 1) * min_sizes.size() + max_sizes.size();
-        } else {
-            num_priors = (flip ? 2 : 1) * aspect_ratios.size() + min_sizes.size() - 1;
-        }
-
-        size_t res_prod = num_priors * 4;
-        for (int i = 2; i < inShapes[0].size(); i++) res_prod *= inShapes[0][i];
-        outShapes.push_back({1, 2, res_prod});
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_psroi_pooling_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_psroi_pooling_shape_infer.hpp
deleted file mode 100644 (file)
index 5813e69..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <legacy/ie_layers.h>
-
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for PSRoiPooling layer
- */
-class PSRoiPoolingShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit PSRoiPoolingShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-        size_t output_dim = static_cast<size_t>(cnnLayer.GetParamAsInt("output_dim"));
-        size_t group_size = static_cast<size_t>(cnnLayer.GetParamAsInt("group_size"));
-        outShapes.push_back({inShapes[1][0], output_dim, group_size, group_size});
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_quantize_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_quantize_shape_infer.hpp
deleted file mode 100644 (file)
index 0612ab1..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-
-#include <algorithm>
-#include <cmath>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for quantize layer
- */
-class QuantizeShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit QuantizeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        QuantizeLayer quantizeLayer(lp);
-        quantizeLayer.params = params;
-        quantizeLayer.type = _type;
-        validate(&quantizeLayer, inBlobs, params, blobs);
-
-        outShapes.push_back(inShapes[0]);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_range_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_range_shape_infer.hpp
deleted file mode 100644 (file)
index b055790..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <cmath>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Range layer
- */
-class RangeShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit RangeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        RangeLayer rangeLayer(lp);
-        rangeLayer.params = params;
-        rangeLayer.type = _type;
-        validate(&rangeLayer, inBlobs, params, blobs);
-
-        const size_t RANGE_START = 0;
-        const size_t RANGE_LIMIT = 1;
-        const size_t RANGE_DELTA = 2;
-
-        float start = (inBlobs[RANGE_START]->cbuffer().as<float*>() +
-                       inBlobs[RANGE_START]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0];
-        float limit = (inBlobs[RANGE_LIMIT]->cbuffer().as<float*>() +
-                       inBlobs[RANGE_LIMIT]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0];
-        float delta = (inBlobs[RANGE_DELTA]->cbuffer().as<float*>() +
-                       inBlobs[RANGE_DELTA]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0];
-        size_t work_amount_dst = std::floor(std::abs((limit - start) / delta));
-        outShapes = {{work_amount_dst}};
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_reduce_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_reduce_shape_infer.hpp
deleted file mode 100644 (file)
index 365b270..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <algorithm>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Reduce layer
- */
-class ReduceShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit ReduceShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        ReduceLayer reduceLayer(lp);
-        reduceLayer.params = params;
-        reduceLayer.type = _type;
-        validate(&reduceLayer, inBlobs, params, blobs);
-
-        const size_t REDUCE_DATA = 0;
-        const size_t REDUCE_INDEXES = 1;
-        if (inBlobs.size() < 2) THROW_IE_EXCEPTION << " Incorrect number of inputs";
-
-        SizeVector idx_dims = inBlobs[REDUCE_INDEXES]->getTensorDesc().getDims();
-        if (idx_dims.size() > 1) THROW_IE_EXCEPTION << " Index vector should be 1 dimension";
-
-        if (inBlobs[REDUCE_INDEXES]->getTensorDesc().getPrecision() != Precision::I32)
-            THROW_IE_EXCEPTION << " Incorrect 'axes_to_reduction' input precision. Only I32 is supported!";
-
-        SizeVector data_dims = inBlobs[REDUCE_DATA]->getTensorDesc().getDims();
-        int32_t* idx_data = inBlobs[REDUCE_INDEXES]->cbuffer().as<int32_t*>() +
-                            inBlobs[REDUCE_INDEXES]->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        SizeVector axes;
-        for (size_t i = 0; i < idx_dims[0]; i++) {
-            int32_t axis = idx_data[i];
-            if (axis < 0) axis += data_dims.size();
-
-            if (static_cast<size_t>(axis) > data_dims.size())
-                THROW_IE_EXCEPTION << " Index to reduce exceeds data tensor dimension";
-            axes.push_back(static_cast<size_t>(axis));
-        }
-        bool keep_dims = reduceLayer.keep_dims;
-        SizeVector outShape;
-        SizeVector src_dims = inBlobs[REDUCE_DATA]->getTensorDesc().getDims();
-        for (size_t i = 0; i < src_dims.size(); i++) {
-            bool found = false;
-            for (size_t axis : axes)
-                if (i == axis) found = true;
-
-            if (found) {
-                if (keep_dims) outShape.push_back(1);
-            } else {
-                outShape.push_back(src_dims[i]);
-            }
-        }
-
-        outShapes.push_back(outShape);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_region_yolo_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_region_yolo_shape_infer.hpp
deleted file mode 100644 (file)
index 6613721..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <legacy/ie_layers.h>
-
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for RegionYolo layer
- */
-class RegionYoloShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit RegionYoloShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer layer(lp);
-        layer.params = params;
-        int classes;
-        int coords;
-        int num;
-        bool do_softmax;
-        std::vector<int> mask;
-        classes = layer.GetParamAsInt("classes", 1);
-        coords = layer.GetParamAsInt("coords", 1);
-        num = layer.GetParamAsInt("num", 1);
-        do_softmax = static_cast<bool>(layer.GetParamAsInt("do_softmax", 1));
-        mask = layer.GetParamAsInts("mask", {});
-        unsigned int axis = layer.GetParamAsUInt("axis", 1);
-        int end_axis = layer.GetParamAsInt("end_axis", 1);
-        if (end_axis < 0) end_axis += inShapes[0].size();
-
-        SizeVector outShape;
-        if (do_softmax) {
-            size_t flat_dim = 1;
-            for (size_t i = 0; i < axis; i++) {
-                outShape.push_back(inShapes[0][i]);
-            }
-            for (size_t i = axis; i < end_axis + 1; i++) {
-                flat_dim *= inShapes[0][i];
-            }
-            outShape.push_back(flat_dim);
-            for (size_t i = end_axis + 1; i < inShapes[0].size(); i++) {
-                outShape.push_back(inShapes[0][i]);
-            }
-        } else {
-            outShape = {inShapes[0][0], (classes + coords + 1) * mask.size(), inShapes[0][2], inShapes[0][3]};
-        }
-        outShapes.push_back({outShape});
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_reorg_yolo_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_reorg_yolo_shape_infer.hpp
deleted file mode 100644 (file)
index bc1a50c..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <legacy/ie_layers.h>
-
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for ReorgYolo layer
- */
-class ReorgYoloShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit ReorgYoloShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-        size_t stride = static_cast<size_t>(cnnLayer.GetParamAsInt("stride"));
-        SizeVector outShape;
-        for (size_t i = 0; i < inShapes[0].size(); i++) {
-            outShape.push_back(inShapes[0][i]);
-            if (i == 1) {
-                outShape[outShape.size() - 1] *= stride * stride;
-            } else if (i > 1) {
-                outShape[outShape.size() - 1] /= stride;
-            }
-        }
-        outShapes.push_back(outShape);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_resample_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_resample_shape_infer.hpp
deleted file mode 100644 (file)
index 964701c..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <legacy/ie_layers.h>
-
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Resample layer
- */
-class ResampleShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit ResampleShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-        SizeVector outShape;
-        if (inBlobs.size() == 2) {
-            switch (inBlobs[1]->getTensorDesc().getPrecision()) {
-            case Precision::FP32: {
-                auto* buffer = inBlobs[1]->cbuffer().as<float*>();
-
-                if (buffer != nullptr) {
-                    for (int i = 0; i < inBlobs[1]->size(); i++) {
-                        outShape.push_back(static_cast<unsigned long>(buffer[i]));
-                    }
-                } else {
-                    THROW_IE_EXCEPTION << "Second input must have allocated data";
-                }
-                break;
-            }
-            case Precision::I32: {
-                auto* buffer = inBlobs[1]->cbuffer().as<int32_t*>();
-
-                if (buffer != nullptr) {
-                    for (int i = 0; i < inBlobs[1]->size(); i++) {
-                        outShape.push_back(static_cast<unsigned long>(buffer[i]));
-                    }
-                } else {
-                    THROW_IE_EXCEPTION << "Second input must have allocated data";
-                }
-                break;
-            }
-            default:
-                THROW_IE_EXCEPTION << "Unsupported second input precision";
-            }
-        } else {
-            auto scale = cnnLayer.GetParamAsFloat("factor");
-            outShape = {inShapes[0][0], inShapes[0][1]};
-            for (int i = 2; i < inShapes[0].size(); i++)
-                outShape.push_back(static_cast<size_t>(std::ceil(inShapes[0][i] * scale)));
-        }
-        outShapes.push_back(outShape);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_reshape_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_reshape_shape_infer.hpp
deleted file mode 100644 (file)
index f09b308..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <legacy/ie_layers.h>
-
-#include <functional>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-#include "precision_utils.h"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Reshape layer
- */
-class ReshapeShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit ReshapeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        ReshapeLayer reshapeLayer(lp);
-        reshapeLayer.params = params;
-        reshapeLayer.type = _type;
-        validate(&reshapeLayer, inBlobs, params, blobs);
-
-        SizeVector outShape;
-        std::vector<int> reshapeMask;
-        if (inBlobs.size() == 2) {
-            if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::FP32) {
-                auto* buffer = inBlobs[1]->cbuffer().as<float*>();
-                if (buffer != nullptr) {
-                    for (int i = 0; i < inBlobs[1]->size(); i++) {
-                        reshapeMask.push_back(static_cast<int>(buffer[i]));
-                    }
-                } else {
-                    THROW_IE_EXCEPTION << "Second input must have allocated data";
-                }
-            } else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::I32) {
-                auto* buffer = inBlobs[1]->cbuffer().as<int*>();
-                if (buffer != nullptr) {
-                    reshapeMask.assign(buffer, buffer + inBlobs[1]->size());
-                } else {
-                    THROW_IE_EXCEPTION << "Second input must have allocated data";
-                }
-            } else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::I64) {
-                auto* buffer = inBlobs[1]->cbuffer().as<int64_t*>();
-                if (buffer != nullptr) {
-                    reshapeMask.assign(buffer, buffer + inBlobs[1]->size());
-                } else {
-                    THROW_IE_EXCEPTION << "Second input must have allocated data";
-                }
-            } else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::U64) {
-                auto* buffer = inBlobs[1]->cbuffer().as<uint64_t*>();
-                if (buffer != nullptr) {
-                    reshapeMask.assign(buffer, buffer + inBlobs[1]->size());
-                } else {
-                    THROW_IE_EXCEPTION << "Second input must have allocated data";
-                }
-            } else if (inBlobs[1]->getTensorDesc().getPrecision() == Precision::FP16) {
-                auto* buffer = inBlobs[1]->cbuffer().as<uint16_t*>();
-                if (buffer != nullptr) {
-                    for (int i = 0; i < inBlobs[1]->size(); i++) {
-                        reshapeMask.push_back(static_cast<int>(PrecisionUtils::f16tof32(buffer[i])));
-                    }
-                } else {
-                    THROW_IE_EXCEPTION << "Second input must have allocated data";
-                }
-            } else {
-                THROW_IE_EXCEPTION << "Second input has unsupported precision";
-            }
-        } else {
-            reshapeMask = reshapeLayer.shape;
-        }
-        auto inputShape = inShapes[0];
-        size_t inputShapeTotal = std::accumulate(inputShape.begin(), inputShape.end(), 1lu, std::multiplies<size_t>());
-
-        if (reshapeMask.empty()) {
-            outShape = {inputShapeTotal};
-        } else {
-            size_t res = 1;
-            for (int i = 0; i < reshapeMask.size(); i++) {
-                if (reshapeMask[i] == 0) {
-                    res *= inputShape[i];
-                } else if (reshapeMask[i] != -1) {
-                    res *= reshapeMask[i];
-                }
-            }
-            size_t newDim = inputShapeTotal / res;
-            for (int i = 0; i < reshapeMask.size(); i++) {
-                if (reshapeMask[i] == 0) {
-                    outShape.push_back(inputShape[i]);
-                } else if (reshapeMask[i] == -1) {
-                    outShape.push_back(newDim);
-                } else {
-                    outShape.push_back(reshapeMask[i]);
-                }
-            }
-            size_t outputShapeTotal = std::accumulate(outShape.begin(), outShape.end(), 1lu, std::multiplies<size_t>());
-            if (inputShapeTotal != outputShapeTotal) {
-                THROW_IE_EXCEPTION << "Invalid reshape mask (dim attribute): number of elements in input: "
-                                   << details::dumpVec(inputShape) << " and output: " << details::dumpVec(outShape)
-                                   << " mismatch";
-            }
-        }
-        outShapes.emplace_back(outShape);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_reverse_sequence_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_reverse_sequence_shape_infer.hpp
deleted file mode 100644 (file)
index 05d8ce0..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for ReverseSequence layer
- */
-class ReverseSequenceShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit ReverseSequenceShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        ReverseSequenceLayer reverseSequenceLayer(lp);
-        reverseSequenceLayer.params = params;
-        reverseSequenceLayer.type = _type;
-        validate(&reverseSequenceLayer, inBlobs, params, blobs);
-
-        outShapes = {inShapes[0]};
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_rnn_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_rnn_shape_infer.hpp
deleted file mode 100644 (file)
index f7f007d..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for DetectionOutput layer
- */
-class RNNShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit RNNShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        RNNSequenceLayer rnn(lp);
-        rnn.params = params;
-        rnn.type = _type;
-        IE_SUPPRESS_DEPRECATED_START
-        rnn.precision = Precision::FP32;  // FIXME: No ability to discover current precision. Assume fp32
-        IE_SUPPRESS_DEPRECATED_END
-        validate(&rnn, inBlobs, params, blobs);
-
-        int state_size = rnn.hidden_size;
-        int ns = rnn.cellType == RNNCellBase::LSTM ? 2 : 1;
-
-        auto data_dims = inShapes[0];
-        data_dims[2] = static_cast<size_t>(state_size);
-        outShapes.push_back(data_dims);
-
-        for (int i = 1; i < 1 + ns; i++) {
-            outShapes.push_back(inShapes[i]);
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_roi_pooling_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_roi_pooling_shape_infer.hpp
deleted file mode 100644 (file)
index 6a648a4..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <legacy/ie_layers.h>
-
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for RoiPooling layer
- */
-class RoiPoolingShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit RoiPoolingShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-
-        SizeVector out_shapes = {inShapes[1][0], inShapes[0][1]};
-        for (auto attr : {"pooled_d", "pooled_h", "pooled_w"}) {  // desired IR format: pooled="...,d,h,w"
-            int pooled = cnnLayer.GetParamAsInt(attr, -1);
-            if (pooled >= 0) {
-                out_shapes.push_back(static_cast<size_t>(pooled));
-            }
-        }
-        outShapes.push_back(out_shapes);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_scatter_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_scatter_shape_infer.hpp
deleted file mode 100644 (file)
index 12fa9ea..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for ScatterUpdate layer
- */
-class ScatterUpdateShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit ScatterUpdateShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        ScatterUpdateLayer scatterUpdateLayer(lp);
-        scatterUpdateLayer.params = params;
-        scatterUpdateLayer.type = _type;
-        validate(&scatterUpdateLayer, inBlobs, params, blobs);
-
-        outShapes = {inShapes[0]};
-    }
-};
-
-/**
- *@brief Implementation of Shape inference for ScatterElementsUpdate layer
- */
-class ScatterElementsUpdateShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit ScatterElementsUpdateShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        ScatterElementsUpdateLayer scatterElementsUpdateLayer(lp);
-        scatterElementsUpdateLayer.params = params;
-        scatterElementsUpdateLayer.type = _type;
-        validate(&scatterElementsUpdateLayer, inBlobs, params, blobs);
-
-        outShapes = {inShapes[0]};
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_select_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_select_shape_infer.hpp
deleted file mode 100644 (file)
index 4554c61..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Select layer
- */
-class SelectShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit SelectShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        SelectLayer selectLayer(lp);
-        selectLayer.params = params;
-        selectLayer.type = _type;
-        validate(&selectLayer, inBlobs, params, blobs);
-        outShapes.push_back(inShapes[1]);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_shape_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_shape_shape_infer.hpp
deleted file mode 100644 (file)
index 0634a16..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-
-#include <cmath>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Shape layer
- */
-class ShapeShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit ShapeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        outShapes.push_back({inShapes[0].size()});
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_shuffle_channels_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_shuffle_channels_shape_infer.hpp
deleted file mode 100644 (file)
index f93ecdb..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for ShuffleChannels layer
- */
-class ShuffleChannelsShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit ShuffleChannelsShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        ShuffleChannelsLayer shuffleChannelsLayer(lp);
-        shuffleChannelsLayer.params = params;
-        shuffleChannelsLayer.type = _type;
-        validate(&shuffleChannelsLayer, inBlobs, params, blobs);
-
-        outShapes = {inShapes[0]};
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_space_to_depth_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_space_to_depth_shape_infer.hpp
deleted file mode 100644 (file)
index f4c371a..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for SpaceToDepth layer
- */
-class SpaceToDepthShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit SpaceToDepthShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        SpaceToDepthLayer spaceToDepthLayer(lp);
-        spaceToDepthLayer.params = params;
-        spaceToDepthLayer.type = _type;
-        validate(&spaceToDepthLayer, inBlobs, params, blobs);
-
-        unsigned int block_size = spaceToDepthLayer.block_size;
-        outShapes = {inShapes[0]};
-
-        outShapes[0][outShapes[0].size() - 1] = inShapes[0][inShapes[0].size() - 1] / block_size;
-        outShapes[0][outShapes[0].size() - 2] = inShapes[0][inShapes[0].size() - 2] / block_size;
-        outShapes[0][outShapes[0].size() - 3] = inShapes[0][inShapes[0].size() - 3] * block_size * block_size;
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_sparse_fill_empty_rows_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_sparse_fill_empty_rows_shape_infer.hpp
deleted file mode 100644 (file)
index ce7fa05..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for SparseFillEmptyRows layer
- */
-class SparseFillEmptyRowsShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit SparseFillEmptyRowsShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        THROW_IE_EXCEPTION << "SparseFillEmptyRows is not re-shapeable layer.";
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_sparse_segment_reduce_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_sparse_segment_reduce_shape_infer.hpp
deleted file mode 100644 (file)
index 652244a..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for SparseSegmentReduce layer
- */
-class SparseSegmentReduceShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit SparseSegmentReduceShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        SparseSegmentReduceLayer sparse_segment_reduce_layer(lp);
-        sparse_segment_reduce_layer.params = params;
-        sparse_segment_reduce_layer.type = _type;
-        validate(&sparse_segment_reduce_layer, inBlobs, params, blobs);
-
-        // reshape output
-        auto output_shape = inShapes[0];
-        output_shape[0] = inShapes[1][0];
-        outShapes = {output_shape};
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_sparse_weighted_reduce_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_sparse_weighted_reduce_shape_infer.hpp
deleted file mode 100644 (file)
index bf96fc5..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include "ie_built_in_impl.hpp"
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
-*@brief Implementation of Shape inference for ExperimentalSparseWeightedReduce layer
-*/
-class ExperimentalSparseWeightedReduceShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit ExperimentalSparseWeightedReduceShapeProp(const std::string& type) : BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
-        const std::map<std::string, std::string>& params,
-        const std::map<std::string, Blob::Ptr>& blobs,
-        std::vector<SizeVector>& outShapes) override {
-        LayerParams lp{};
-        ExperimentalSparseWeightedReduceLayer sparse_weighted_reduce_layer(lp);
-        sparse_weighted_reduce_layer.params = params;
-        sparse_weighted_reduce_layer.type = _type;
-        validate(&sparse_weighted_reduce_layer, inBlobs, params, blobs);
-
-        // compute a number of outputs
-        size_t num_outputs = 1;
-
-        // reshape available outputs
-        outShapes.resize(num_outputs);
-        outShapes[0] = inShapes[3];
-
-        if (inBlobs[2]->getTensorDesc().getPrecision() == Precision::I32) {
-            auto* buffer = inBlobs[2]->cbuffer().as<int*>();
-            if (buffer != nullptr) {
-                outShapes[0][0] = static_cast<size_t>(buffer[0]);
-            } else {
-                THROW_IE_EXCEPTION << "The third input must have allocated data";
-            }
-        } else {
-            THROW_IE_EXCEPTION << "The third must have I32 precision";
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_split_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_split_shape_infer.hpp
deleted file mode 100644 (file)
index dcd7825..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Split layer
- */
-class SplitShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit SplitShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        SplitLayer splitLayer(lp);
-        splitLayer.params = params;
-        splitLayer.type = _type;
-        validate(&splitLayer, inBlobs, params, blobs);
-
-        std::vector<int> out_sizes = splitLayer.GetParamAsInts("out_sizes", {});
-        if (out_sizes.empty()) THROW_IE_EXCEPTION << "Value of out_sizes attribute is empty";
-
-        size_t sum(0);
-        for (const auto& size : out_sizes) sum += size;
-        if (sum != inShapes[0][splitLayer._axis])
-            THROW_IE_EXCEPTION << "The sum of the dimensions on the axis(" << splitLayer._axis
-                               << ") is not equal out_sizes: " << details::dumpVec(out_sizes);
-
-        for (const auto& size : out_sizes) {
-            outShapes.push_back(inShapes[0]);
-            outShapes[outShapes.size() - 1][splitLayer._axis] = static_cast<size_t>(size);
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_squeeze_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_squeeze_shape_infer.hpp
deleted file mode 100644 (file)
index 472ccf4..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#ifndef NOMINMAX
-#define NOMINMAX
-#endif
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Squeeze layer
- */
-class SqueezeShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit SqueezeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer layer(lp);
-        layer.params = params;
-        layer.type = _type;
-        validate(&layer, inBlobs, params, blobs);
-
-        const size_t SQUEEZE_DATA = 0;
-        const size_t SQUEEZE_INDEXES = 1;
-
-        SizeVector data_dims;
-        SizeVector idx_dims;
-
-        idx_dims = inBlobs[SQUEEZE_INDEXES]->getTensorDesc().getDims();
-        if (idx_dims.size() > 1) THROW_IE_EXCEPTION << " Index vector should be 1 dimension";
-
-        data_dims = inBlobs[SQUEEZE_DATA]->getTensorDesc().getDims();
-
-        if (data_dims.size() <= idx_dims[0] && !(data_dims.size() == 1 && idx_dims[0] == 1))
-            THROW_IE_EXCEPTION << " Incompatible number of data dimensions and indexes vector length!";
-        SizeVector outShape;
-        switch (inBlobs[SQUEEZE_INDEXES]->getTensorDesc().getPrecision()) {
-        case Precision::FP32: {
-            procIndices<float>(inBlobs, SQUEEZE_INDEXES, data_dims, outShape, idx_dims);
-        } break;
-        case Precision::FP16: {
-            procIndices<ie_fp16>(inBlobs, SQUEEZE_INDEXES, data_dims, outShape, idx_dims);
-        } break;
-        case Precision::I32: {
-            procIndices<int32_t>(inBlobs, SQUEEZE_INDEXES, data_dims, outShape, idx_dims);
-        } break;
-        case Precision::I64: {
-            procIndices<int64_t>(inBlobs, SQUEEZE_INDEXES, data_dims, outShape, idx_dims);
-        } break;
-        case Precision::U64: {
-            procIndices<uint64_t>(inBlobs, SQUEEZE_INDEXES, data_dims, outShape, idx_dims);
-        } break;
-        default:
-            THROW_IE_EXCEPTION
-                << "Incorrect 'indices_to_squeeze' input precision. Only FP32, FP16, I32, I64 and U64 are supported!";
-        }
-        outShapes.push_back(outShape);
-    }
-
-private:
-    template <typename T>
-    void procIndices(const std::vector<Blob::CPtr>& inBlobs, const size_t SQUEEZE_INDEXES, SizeVector& data_dims,
-                     SizeVector& outShape, const SizeVector& idx_dims) {
-        T* idx_data = inBlobs[SQUEEZE_INDEXES]->cbuffer().as<T*>() +
-                      inBlobs[SQUEEZE_INDEXES]->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        for (size_t i = 0; i < idx_dims[0]; i++) {
-            auto axis = castToInt32(idx_data[i]);
-            if (axis < 0) axis += data_dims.size();
-
-            if (axis > data_dims.size()) {
-                THROW_IE_EXCEPTION << "Index to squeeze exceeds data tensor dimension";
-            } else if (data_dims[axis] != 1) {
-                THROW_IE_EXCEPTION << "Index to squeeze of data tensor dimension is not 1";
-            }
-        }
-        for (size_t j = 0; j < data_dims.size(); j++) {
-            bool found = false;
-            for (size_t i = 0; i < inBlobs[SQUEEZE_INDEXES]->size(); i++) {
-                auto axis = castToInt32(idx_data[i]);
-                if (axis < 0) axis += data_dims.size();
-                if (j == static_cast<size_t>(axis)) found = true;
-            }
-            if (!found) outShape.push_back(data_dims[j]);
-        }
-    }
-
-    int32_t castToInt32(ie_fp16 x) {
-        return static_cast<int32_t>(InferenceEngine::PrecisionUtils::f16tof32(x));
-    }
-
-    int32_t castToInt32(uint64_t x) {
-        return static_cast<int32_t>(x);
-    }
-
-    int32_t castToInt32(int64_t x) {
-        return static_cast<int32_t>(x);
-    }
-
-    int32_t castToInt32(int32_t x) {
-        return x;
-    }
-
-    int32_t castToInt32(float x) {
-        return static_cast<int32_t>(x);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_strided_slice_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_strided_slice_shape_infer.hpp
deleted file mode 100644 (file)
index 55e1dad..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <algorithm>
-#include <map>
-#include <memory>
-#include <shape_infer/const_infer/ie_strided_slice_const_infer.hpp>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for StridedSlice layer
- */
-class StridedSliceShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit StridedSliceShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        StridedSliceHelper helper(inBlobs, params);
-        outShapes.push_back(helper.getOutputShape());
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_tensor_iterator_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_tensor_iterator_shape_infer.hpp
deleted file mode 100644 (file)
index e858221..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <legacy/ie_layers.h>
-
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <shape_infer/ie_reshaper.hpp>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for DetectionOutput layer
- */
-class TensorIteratorShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit TensorIteratorShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void setOriginalLayer(const CNNLayer* layer) {
-        auto ti = dynamic_cast<const TensorIterator*>(layer);
-        if (!ti) THROW_IE_EXCEPTION << "Error during shape infer. Original layer is not TensorIterator.";
-        _original_ti = ti;
-    }
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        TensorIterator ti(lp);
-        ti.params = params;
-        ti.type = _type;
-        ti.body = _original_ti->body;
-        ti.back_edges = _original_ti->back_edges;
-        ti.input_port_map = _original_ti->input_port_map;
-        ti.output_port_map = _original_ti->output_port_map;
-        validate(&ti, inBlobs, params, blobs);
-
-        // TODO: make util function to calculate num of iteration
-        int num_iteration = 1;
-
-        // Prepare input shapes for internal body
-        std::map<std::string, std::vector<size_t>> newInShapes;
-        for (auto& port_map : ti.input_port_map) {
-            int ext_port = port_map.from;
-            int int_port = port_map.to;
-            auto int_name = ti.body.inputs[int_port]->getName();
-
-            auto shape = inShapes[ext_port];
-            if (port_map.axis != -1) {
-                int size = shape[port_map.axis];
-                int start = port_map.start < 0 ? port_map.start + size + 1 : port_map.start;
-                int end = port_map.end < 0 ? port_map.end + size + 1 : port_map.end;
-
-                num_iteration = std::abs(end - start) / std::abs(port_map.stride);
-
-                // port with iterating through. Change dimension with iteration
-                shape[port_map.axis] = port_map.part_size;
-            }
-
-            newInShapes[int_name] = shape;
-        }
-
-        // Body shape infer
-        _body_reshaper = std::make_shared<Reshaper>(_original_ti->body.inputs);
-        _body_reshaper->runNoApply(newInShapes);
-
-        outShapes.resize(ti.output_port_map.size());
-        for (auto& port_map : ti.output_port_map) {
-            int ext_port = port_map.from;
-            int int_port = port_map.to;
-            auto& int_out_data = ti.body.outputs[int_port];
-            auto shape = _body_reshaper->getResultShapeFor(int_out_data);
-
-            if (port_map.axis != -1) {
-                // port with iterating through. Change dimension with iteration
-                shape[port_map.axis] *= num_iteration;
-            }
-
-            outShapes[ext_port] = shape;
-        }
-    }
-
-    void apply() {
-        if (!_body_reshaper)
-            THROW_IE_EXCEPTION << "Request of apply reshape results while shape infer was not finished";
-        _body_reshaper->apply();
-        _body_reshaper.reset(); // WA: reset _body_reshaper to release ownership for input data
-    }
-
-private:
-    const TensorIterator* _original_ti = nullptr;
-    std::shared_ptr<Reshaper> _body_reshaper;
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_tile_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_tile_shape_infer.hpp
deleted file mode 100644 (file)
index 947d858..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Tile layer
- */
-class TileShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit TileShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        TileLayer tileLayer(lp);
-        tileLayer.params = params;
-        tileLayer.type = _type;
-        validate(&tileLayer, inBlobs, params, blobs);
-        outShapes.push_back(inShapes[0]);
-        outShapes[0][tileLayer.axis] *= tileLayer.tiles;
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_topk_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_topk_shape_infer.hpp
deleted file mode 100644 (file)
index 28cfa0f..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <algorithm>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for TopK layer
- */
-class TopKShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit TopKShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        TopKLayer topKLayer(lp);
-        topKLayer.params = params;
-        topKLayer.type = _type;
-        validate(&topKLayer, inBlobs, params, blobs);
-
-        const size_t TOPK_DATA = 0;
-        const size_t TOPK_K = 1;
-
-        if (inBlobs[TOPK_DATA]->getTensorDesc().getPrecision() != Precision::FP32)
-            THROW_IE_EXCEPTION << " Incorrect input data tensor precision. Only FP32 is supported!";
-
-        if (inBlobs[TOPK_K]->getTensorDesc().getPrecision() != Precision::I32)
-            THROW_IE_EXCEPTION << " Incorrect input index value precision. Only I32 is supported!";
-
-        if (inBlobs[TOPK_K]->getTensorDesc().getDims().size() > 1)
-            THROW_IE_EXCEPTION << " Index vector should be 1 dimension";
-
-        SizeVector src_dims = inBlobs[TOPK_DATA]->getTensorDesc().getDims();
-        int axis_ = topKLayer.axis;
-        if (axis_ < 0) axis_ += src_dims.size();
-
-        size_t axis = static_cast<size_t>(axis_);
-
-        if (src_dims.size() < (1 + axis))
-            THROW_IE_EXCEPTION << " Incorrect input parameters dimensions and axis number!";
-
-        int* src_k = inBlobs[TOPK_K]->cbuffer().as<int*>();
-        if (src_k == nullptr) THROW_IE_EXCEPTION << " Only const input for 'k' is supported!";
-
-        src_k += inBlobs[TOPK_K]->getTensorDesc().getBlockingDesc().getOffsetPadding();
-
-        outShapes.push_back(inShapes[0]);
-        outShapes.push_back(inShapes[0]);
-        outShapes[0][axis] = static_cast<size_t>(src_k[0]);
-        outShapes[1][axis] = static_cast<size_t>(src_k[0]);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_unsqueeze_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_unsqueeze_shape_infer.hpp
deleted file mode 100644 (file)
index 8b522f5..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#ifndef NOMINMAX
-#define NOMINMAX
-#endif
-
-#include <algorithm>
-#include <iostream>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Unsqueeze layer
- */
-class UnsqueezeShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit UnsqueezeShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer unsqueezeLayer(lp);
-        unsqueezeLayer.params = params;
-        unsqueezeLayer.type = _type;
-        validate(&unsqueezeLayer, inBlobs, params, blobs);
-
-        const size_t UNSQUEEZE_DATA = 0;
-        const size_t UNSQUEEZE_INDEXES = 1;
-
-        SizeVector idx_dims = inBlobs[UNSQUEEZE_INDEXES]->getTensorDesc().getDims();
-        SizeVector data_dims = inBlobs[UNSQUEEZE_DATA]->getTensorDesc().getDims();
-        SizeVector outShape;
-        if (idx_dims.size() > 1) THROW_IE_EXCEPTION << " Index vector should be 1 dimension";
-
-        switch (inBlobs[UNSQUEEZE_INDEXES]->getTensorDesc().getPrecision()) {
-        case Precision::FP32: {
-            procIndices<float>(inBlobs, UNSQUEEZE_INDEXES, data_dims, outShape, idx_dims);
-        } break;
-        case Precision::FP16: {
-            procIndices<ie_fp16>(inBlobs, UNSQUEEZE_INDEXES, data_dims, outShape, idx_dims);
-        } break;
-        case Precision::I32: {
-            procIndices<int32_t>(inBlobs, UNSQUEEZE_INDEXES, data_dims, outShape, idx_dims);
-        } break;
-        default:
-            THROW_IE_EXCEPTION << "Incorrect 'indices_to_set' input precision. Only FP32, FP16 and I32 are supported!";
-        }
-        outShapes.push_back(outShape);
-    }
-
-private:
-    template <typename T>
-    void procIndices(const std::vector<Blob::CPtr>& inBlobs, const size_t UNSQUEEZE_INDEXES, SizeVector& data_dims,
-                     SizeVector& outShape, const SizeVector& idx_dims) {
-        T* idx_data = inBlobs[UNSQUEEZE_INDEXES]->cbuffer().as<T*>() +
-                      inBlobs[UNSQUEEZE_INDEXES]->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        if (!idx_data) {
-            outShape = data_dims;
-            return;
-        }
-        size_t max = data_dims.size();
-        for (size_t i = 0; i < idx_dims[0]; i++) {
-            auto axis = static_cast<size_t>(castToInt32(idx_data[i]));
-            max = std::max(max, axis);
-        }
-        max++;
-        if ((idx_dims[0] + data_dims.size()) < max) {
-            THROW_IE_EXCEPTION << "Indices_to_set for unsqueeze layer is out of tensor dimension";
-        }
-        max = inBlobs[UNSQUEEZE_INDEXES]->size() + data_dims.size();
-        for (size_t i = 0, j = 0, k = 0; i < max; i++) {
-            size_t index_to_push = 1;
-
-            if (k < inBlobs[UNSQUEEZE_INDEXES]->size() && i == castToInt32(idx_data[k])) {
-                k++;
-            } else {
-                index_to_push = data_dims[j++];
-            }
-
-            outShape.push_back(index_to_push);
-        }
-    }
-
-    int32_t castToInt32(ie_fp16 x) {
-        return static_cast<int32_t>(InferenceEngine::PrecisionUtils::f16tof32(x));
-    }
-
-    int32_t castToInt32(int32_t x) {
-        return x;
-    }
-
-    int32_t castToInt32(float x) {
-        return static_cast<int32_t>(x);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_upsampling_shape_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/ie_upsampling_shape_infer.hpp
deleted file mode 100644 (file)
index 17c9d43..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <legacy/ie_layers.h>
-
-#include <description_buffer.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_built_in_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Shape inference for Upsampling layer
- */
-class UpsamplingShapeProp : public BuiltInShapeInferImpl {
-public:
-    explicit UpsamplingShapeProp(const std::string& type): BuiltInShapeInferImpl(type) {}
-
-    void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs, const std::map<std::string, std::string>& params,
-                         const std::map<std::string, Blob::Ptr>& blobs, std::vector<SizeVector>& outShapes) override {
-        LayerParams lp {};
-        CNNLayer cnnLayer(lp);
-        cnnLayer.params = params;
-        cnnLayer.type = _type;
-        validate(&cnnLayer, inBlobs, params, blobs);
-        size_t scale = static_cast<size_t>(cnnLayer.GetParamAsInt("scale"));
-        SizeVector out_shapes = {inShapes[0][0], inShapes[0][1]};
-        for (int i = 2; i < inShapes[0].size(); i++) {
-            out_shapes.push_back(inShapes[0][i] * scale);
-        }
-        outShapes.push_back(out_shapes);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/built-in/impl_register.hpp b/inference-engine/src/legacy_api/src/shape_infer/built-in/impl_register.hpp
deleted file mode 100644 (file)
index a8cd489..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <memory>
-#include <string>
-
-#include "legacy/shape_infer/built-in/ie_built_in_holder.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-template <typename Impl>
-class ImplRegisterBase {
-public:
-    explicit ImplRegisterBase(const std::string& type) {
-        BuiltInShapeInferHolder::AddImpl(type, std::make_shared<Impl>(type));
-    }
-};
-
-#define REG_SHAPE_INFER_FOR_TYPE(__prim, __type) static ImplRegisterBase<__prim> __bi_reg__##__type(#__type)
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/broadcast_offset.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/broadcast_offset.hpp
deleted file mode 100644 (file)
index 336af17..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-#include <precision_utils.h>
-
-#include <ie_precision.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-class BroadcastOffset {
-    SizeVector dims;
-    SizeVector offset_v;
-
-    SizeVector getDims(const SizeVector& originDims, const SizeVector& outputDims) {
-        SizeVector d(outputDims.size(), 1);
-        for (int i = 0; i < originDims.size(); i++) {
-            d[d.size() - 1 - i] = originDims[originDims.size() - 1 - i];
-        }
-        return d;
-    }
-
-    SizeVector getOffset(const SizeVector& originDims, const SizeVector& outDims) {
-        SizeVector o(originDims.size());
-        if (originDims.size() != outDims.size())
-            THROW_IE_EXCEPTION << "Cannot calculate offsets! Incorrect patameters for eltwise broadcast!";
-        int k = 1;
-        for (int i = originDims.size() - 1; i >= 0; i--) {
-            o[i] = (originDims[i] == outDims[i]) ? k : 0;
-            k *= originDims[i];
-        }
-        return o;
-    }
-
-public:
-    BroadcastOffset(const SizeVector& originDims, const SizeVector& outputDims) {
-        dims = getDims(originDims, outputDims);
-        offset_v = getOffset(dims, outputDims);
-    }
-
-    size_t offset(const SizeVector& v) const {
-        size_t off = 0;
-        if (v.size() != offset_v.size())
-            THROW_IE_EXCEPTION << "Cannot calculate offsets! Incorrect patameters for eltwise broadcast!";
-        for (size_t i = 0; i < v.size(); i++) {
-            off += v[i] * offset_v[i];
-        }
-        return off;
-    }
-
-    SizeVector offset_dims(size_t l) const {
-        size_t n_dims = dims.size();
-        SizeVector pos(n_dims);
-        for (int rd = 1; rd <= n_dims; ++rd) {
-            const size_t d = n_dims - rd;
-            const size_t cur_dim = dims[d];
-            pos[d] = l % cur_dim;
-            l /= cur_dim;
-        }
-        return pos;
-    }
-};
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_add_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_add_const_infer.hpp
deleted file mode 100644 (file)
index 6e08ff4..0000000
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-#include <precision_utils.h>
-
-#include <ie_precision.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "broadcast_offset.hpp"
-#include "ie_const_infer_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Const inference for TBD layer
- *
- *   Table of output data type value with given input parameters
- *
- *
- *              U8       I32        I64        FP16        FP32
- *     =============================================================
- *     U8   ==  U8       I32        I64        FP16        FP32
- *          ==
- *     I32  ==  I32      I32        I64        FP32        FP32
- *          ==
- *     I64  ==  I64      I64        I64        FP32        FP32
- *          ==
- *     FP16 ==  FP16     FP32       FP32       FP16        FP32
- *          ==
- *     FP32 ==  FP32     FP32       FP32       FP32        FP32
- *
- *     There is a special case with FP16 precision. Convert input data to FP32 and add. After that
- *     convert output data to FP16, if both of input parameters have FP16 precision or one - FP16 and another - U8.
- */
-
-class AddConstInfer : public ConstInferImpl {
-public:
-    explicit AddConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    struct fp16tofp32 {
-        inline float operator()(ie_fp16 value) {
-            return static_cast<float>(PrecisionUtils::f16tof32(value));
-        }
-    };
-
-    struct fp32tofp16 {
-        inline ie_fp16 operator()(float value) {
-            return static_cast<float>(PrecisionUtils::f32tof16(value));
-        }
-    };
-
-    template <typename dataType>
-    struct noConversion {
-        inline dataType operator()(dataType value) {
-            return value;
-        }
-    };
-
-    template <typename inDatatype1, typename inDatatype2, typename outDatatype, class ConversionInData1,
-              class ConversionInData2, class ConversionOutData>
-    void add(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-             const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) {
-        auto* firstBlobBuffer = inData[0]->cbuffer().as<inDatatype1*>();
-        auto* secondBlobBuffer = inData[1]->cbuffer().as<inDatatype2*>();
-
-        if (!firstBlobBuffer || !secondBlobBuffer) {
-            THROW_IE_EXCEPTION << "empty input data";
-        }
-
-        auto outBlob = *outData.begin();
-        auto* outBuffer = outBlob->buffer().as<outDatatype*>();
-        if (!outBuffer) THROW_IE_EXCEPTION << "empty output data";
-
-        BroadcastOffset outOff(outBlob->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
-        BroadcastOffset inOff1(inData[0]->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
-        BroadcastOffset inOff2(inData[1]->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
-
-        for (size_t i = 0; i < outBlob->size(); i++) {
-            SizeVector offsetDims = outOff.offset_dims(i);
-            outBuffer[outOff.offset(offsetDims)] =
-                ConversionOutData()(ConversionInData1()(firstBlobBuffer[inOff1.offset(offsetDims)]) +
-                                    ConversionInData2()(secondBlobBuffer[inOff2.offset(offsetDims)]));
-        }
-    }
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        size_t numInputs = inData.size();
-        if (inData.size() != 2)
-            THROW_IE_EXCEPTION << "Unsupported number of inputs: " << numInputs << ". 2 inputs is supported";
-
-        auto compare =
-            getPrecisionMask(inData[0]->getTensorDesc().getPrecision(), inData[1]->getTensorDesc().getPrecision(),
-                             outData[0]->getTensorDesc().getPrecision());
-
-        switch (compare) {
-        case getPrecisionMask(Precision::U8, Precision::U8, Precision::U8):
-            add<uint8_t, uint8_t, uint8_t, noConversion<uint8_t>, noConversion<uint8_t>, noConversion<uint8_t>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U8, Precision::I32, Precision::I32):
-            add<uint8_t, int, int, noConversion<uint8_t>, noConversion<int>, noConversion<int>>(inData, params, blobs,
-                                                                                                outData);
-            break;
-        case getPrecisionMask(Precision::U8, Precision::I64, Precision::I64):
-            add<uint8_t, long long int, long long int, noConversion<uint8_t>, noConversion<long long int>,
-                noConversion<long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U8, Precision::U64, Precision::U64):
-            add<uint8_t, unsigned long long int, unsigned long long int, noConversion<uint8_t>,
-                noConversion<unsigned long long int>, noConversion<unsigned long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U8, Precision::FP16, Precision::FP16):
-            add<uint8_t, ie_fp16, ie_fp16, noConversion<uint8_t>, fp16tofp32, fp32tofp16>(inData, params, blobs,
-                                                                                          outData);
-            break;
-        case getPrecisionMask(Precision::U8, Precision::FP32, Precision::FP32):
-            add<uint8_t, float, float, noConversion<uint8_t>, noConversion<float>, noConversion<float>>(inData, params,
-                                                                                                        blobs, outData);
-            break;
-
-        case getPrecisionMask(Precision::I32, Precision::U8, Precision::I32):
-            add<int, uint8_t, int, noConversion<int>, noConversion<uint8_t>, noConversion<int>>(inData, params, blobs,
-                                                                                                outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::I32, Precision::I32):
-            add<int, int, int, noConversion<int>, noConversion<int>, noConversion<int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::I64, Precision::I64):
-            add<int, long long int, long long int, noConversion<int>, noConversion<long long int>,
-                noConversion<long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::U64, Precision::U64):
-            add<int, unsigned long long int, unsigned long long int, noConversion<int>,
-                noConversion<unsigned long long int>, noConversion<unsigned long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::FP16, Precision::FP32):
-            add<int, ie_fp16, float, noConversion<int>, fp16tofp32, noConversion<float>>(inData, params, blobs,
-                                                                                         outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::FP32, Precision::FP32):
-            add<int, float, float, noConversion<int>, noConversion<float>, noConversion<float>>(inData, params, blobs,
-                                                                                                outData);
-            break;
-
-        case getPrecisionMask(Precision::I64, Precision::U8, Precision::I64):
-            add<long long int, uint8_t, long long int, noConversion<long long int>, noConversion<uint8_t>,
-                noConversion<long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I64, Precision::I32, Precision::I64):
-            add<long long int, int, long long int, noConversion<long long int>, noConversion<int>,
-                noConversion<long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I64, Precision::I64, Precision::I64):
-            add<long long int, long long int, long long int, noConversion<long long int>, noConversion<long long int>,
-                noConversion<long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I64, Precision::FP16, Precision::FP32):
-            add<long long int, ie_fp16, float, noConversion<long long int>, fp16tofp32, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I64, Precision::FP32, Precision::FP32):
-            add<long long int, float, float, noConversion<long long int>, noConversion<float>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-
-        case getPrecisionMask(Precision::U64, Precision::U8, Precision::U64):
-            add<unsigned long long int, uint8_t, unsigned long long int, noConversion<unsigned long long int>, noConversion<uint8_t>,
-                noConversion<unsigned long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U64, Precision::I32, Precision::U64):
-            add<unsigned long long int, int, unsigned long long int, noConversion<unsigned long long int>, noConversion<int>,
-                noConversion<unsigned long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U64, Precision::U64, Precision::U64):
-            add<unsigned long long int, unsigned long long int, unsigned long long int,
-                noConversion<unsigned long long int>, noConversion<unsigned long long int>,
-                noConversion<unsigned long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U64, Precision::FP16, Precision::FP32):
-            add<unsigned long long int, ie_fp16, float, noConversion<unsigned long long int>, fp16tofp32, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U64, Precision::FP32, Precision::FP32):
-            add<unsigned long long int, float, float, noConversion<unsigned long long int>, noConversion<float>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-
-        case getPrecisionMask(Precision::FP16, Precision::U8, Precision::FP16):
-            add<ie_fp16, uint8_t, ie_fp16, fp16tofp32, noConversion<uint8_t>, fp32tofp16>(inData, params, blobs,
-                                                                                          outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::I32, Precision::FP32):
-            add<ie_fp16, int, float, fp16tofp32, noConversion<int>, noConversion<float>>(inData, params, blobs,
-                                                                                         outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::I64, Precision::FP32):
-            add<ie_fp16, long long int, float, fp16tofp32, noConversion<long long int>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::U64, Precision::FP32):
-            add<ie_fp16, unsigned long long int, float, fp16tofp32, noConversion<unsigned long long int>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::FP16, Precision::FP16):
-            add<ie_fp16, ie_fp16, ie_fp16, fp16tofp32, fp16tofp32, fp32tofp16>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::FP32, Precision::FP16):
-            add<ie_fp16, float, ie_fp16, fp16tofp32, noConversion<float>, fp32tofp16>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::FP32, Precision::FP32):
-            add<ie_fp16, float, float, fp16tofp32, noConversion<float>, noConversion<float>>(inData, params, blobs,
-                                                                                             outData);
-            break;
-
-        case getPrecisionMask(Precision::FP32, Precision::U8, Precision::FP32):
-            add<float, uint8_t, float, noConversion<float>, noConversion<uint8_t>, noConversion<float>>(inData, params,
-                                                                                                        blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::I32, Precision::FP32):
-            add<float, int, float, noConversion<float>, noConversion<int>, noConversion<float>>(inData, params, blobs,
-                                                                                                outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::I64, Precision::FP32):
-            add<float, long long int, float, noConversion<float>, noConversion<unsigned long long int>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::U64, Precision::FP32):
-            add<float, unsigned long long int, float, noConversion<float>, noConversion<long long int>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::FP16, Precision::FP32):
-            add<float, ie_fp16, float, noConversion<float>, fp16tofp32, noConversion<float>>(inData, params, blobs,
-                                                                                             outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::FP16, Precision::FP16):
-            add<float, ie_fp16, ie_fp16, noConversion<float>, fp16tofp32, fp32tofp16>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::FP32, Precision::FP32):
-            add<float, float, float, noConversion<float>, noConversion<float>, noConversion<float>>(inData, params,
-                                                                                                    blobs, outData);
-            break;
-        default:
-            THROW_IE_EXCEPTION << "Unsupported precision!";
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_broadcast_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_broadcast_const_infer.hpp
deleted file mode 100644 (file)
index f24e9fd..0000000
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-
-#include <cmath>
-#include <ie_algorithm.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_const_infer_impl.hpp"
-#include "ie_memcpy.h"
-#include "ie_parallel.hpp"
-#include "precision_utils.h"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Const inference for Broadcast layer
- */
-class BroadcastConstInfer : public ConstInferImpl {
-private:
-    const size_t BROADCAST_INPUT = 0;
-    const size_t BROADCAST_SHAPE = 1;
-
-public:
-    explicit BroadcastConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        LayerParams lp {};
-        CNNLayer layer(lp);
-        layer.params = params;
-
-        if (outData.empty()) THROW_IE_EXCEPTION << "Incorrect number of input/output edges!";
-
-        if (inData.size() != 2) THROW_IE_EXCEPTION << "Incorrect number of input edges!";
-
-        if (inData[BROADCAST_SHAPE]->getTensorDesc().getDims().size() > 1)
-            THROW_IE_EXCEPTION << "Shape vector should be 1 dimension";
-
-        size_t data_size = inData[BROADCAST_INPUT]->getTensorDesc().getPrecision().size();
-        size_t shape_size = (inData[BROADCAST_SHAPE]->getTensorDesc().getDims())[0];
-        SizeVector dst_dims = outData[0]->getTensorDesc().getDims();
-        SizeVector src_dims = inData[BROADCAST_INPUT]->getTensorDesc().getDims();
-
-        if (!src_dims.size()) src_dims = SizeVector(1, 1);
-
-        if (dst_dims.size() != shape_size) {
-            THROW_IE_EXCEPTION << "Output tensor dimension mismatch";
-        }
-
-        if (src_dims.size() > dst_dims.size()) {
-            THROW_IE_EXCEPTION << "Output tensor dimension is smaller then input tensor dimension";
-        }
-
-        InferenceEngine::SizeVector dstStrides = outData[0]->getTensorDesc().getBlockingDesc().getStrides();
-        InferenceEngine::SizeVector srcStrides =
-            inData[BROADCAST_INPUT]->getTensorDesc().getBlockingDesc().getStrides();
-        InferenceEngine::SizeVector src_aligned(dst_dims.size());
-        InferenceEngine::SizeVector srcStrides_aligned(dst_dims.size());
-        if (!srcStrides.size()) srcStrides = SizeVector(1, 1);
-
-        size_t prefix_size = dst_dims.size() - src_dims.size();
-        for (size_t i = 0; i < dst_dims.size(); i++) {
-            if (i < prefix_size) {
-                src_aligned[i] = 1;
-                srcStrides_aligned[i] = srcStrides[0];
-            } else {
-                src_aligned[i] = src_dims[i - prefix_size];
-                srcStrides_aligned[i] = srcStrides[i - prefix_size];
-            }
-        }
-
-        size_t work_amount_dst = dstStrides[0] * dst_dims[0];
-        const uint8_t* src_data = inData[BROADCAST_INPUT]->cbuffer().as<const uint8_t*>() +
-                                  inData[BROADCAST_INPUT]->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        uint8_t* dst_data =
-            outData[0]->cbuffer().as<uint8_t*>() + outData[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
-
-        parallel_nt(0, [&](const int ithr, const int nthr) {
-            size_t i, src_idx, start = 0, end = 0;
-            SizeVector counters(dst_dims.size(), 0);
-            splitter(work_amount_dst, nthr, ithr, start, end);
-            for (int j = dst_dims.size() - 1, i = start; j >= 0; j--) {
-                counters[j] = i % dst_dims[j];
-                i /= dst_dims[j];
-            }
-            for (size_t iwork = start * data_size; iwork < end * data_size; iwork += data_size) {
-                for (i = 0, src_idx = 0; i < dst_dims.size(); ++i)
-                    src_idx += counters[i] ? ((counters[i] % src_aligned[i]) * srcStrides_aligned[i]) : 0;
-
-                ie_memcpy(&dst_data[iwork], data_size, &src_data[src_idx * data_size], data_size);
-
-                for (int j = dst_dims.size() - 1; j >= 0; j--) {
-                    counters[j] = (counters[j] + 1) % dst_dims[j];
-                    if (counters[j] != 0) break;
-                }
-            }
-        });
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_concat_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_concat_const_infer.hpp
deleted file mode 100644 (file)
index 1ac37d3..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-#include <ie_memcpy.h>
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_const_infer_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Const inference for Tile layer
- */
-class ConcatConstInfer : public ConstInferImpl {
-public:
-    explicit ConcatConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        LayerParams lp {};
-        ConcatLayer layer(lp);
-        layer.params = params;
-        layer.type = _type;
-        _validator->parseParams(&layer);
-
-        auto outBlob = *outData.begin();
-        SizeVector outShape = outBlob->getTensorDesc().getDims();
-        auto* outBuffer = outBlob->buffer().as<int8_t*>();
-
-        size_t outerSize = 1;
-        for (int i = 0; i < layer._axis; i++) outerSize *= outShape[i];
-
-        size_t outIdx = 0;
-        for (size_t osIdx = 0; osIdx < outerSize; osIdx++) {
-            for (auto& inBlob : inData) {
-                if (inBlob->getTensorDesc().getPrecision() != outBlob->getTensorDesc().getPrecision())
-                    THROW_IE_EXCEPTION << "Unsupported concat layer with different precisions! Out precision: " +
-                                              std::string(outBlob->getTensorDesc().getPrecision().name());
-                const auto* inBuffer = inBlob->cbuffer().as<int8_t*>();
-                size_t innerSize = inBlob->size() / outerSize;
-
-                for (size_t j = 0; j < innerSize; j++, outIdx++) {
-                    memcpy(outBuffer + outIdx * outBlob->element_size(),
-                           inBuffer + (osIdx * innerSize + j) * inBlob->element_size(), inBlob->element_size());
-                }
-            }
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_const_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_const_const_infer.hpp
deleted file mode 100644 (file)
index 7301576..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Const inference for TBD layer
- */
-class ConstConstInfer : public ConstInferImpl {
-public:
-    explicit ConstConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        auto it = blobs.find("custom");
-        if (it == blobs.end()) THROW_IE_EXCEPTION << "Missed `custom` blob";
-        // TODO: copy instead of putting pointer?
-        outData[0] = (*it).second;
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_const_infer_holder.cpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_const_infer_holder.cpp
deleted file mode 100644 (file)
index 44f03e0..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#ifdef __INTEL_COMPILER
-#pragma warning disable : 2586
-#endif
-
-#include "ie_const_infer_holder.hpp"
-
-#include <list>
-#include <memory>
-#include <string>
-
-#include "ie_add_const_infer.hpp"
-#include "ie_broadcast_const_infer.hpp"
-#include "ie_concat_const_infer.hpp"
-#include "ie_const_const_infer.hpp"
-#include "ie_convert_const_infer.hpp"
-#include "ie_div_const_infer.hpp"
-#include "ie_eltw_const_infer.hpp"
-#include "ie_fill_const_infer.hpp"
-#include "ie_gather_const_infer.hpp"
-#include "ie_in_place_const_infer.hpp"
-#include "ie_mul_const_infer.hpp"
-#include "ie_onehot_const_infer.hpp"
-#include "ie_permute_const_infer.hpp"
-#include "ie_power_const_infer.hpp"
-#include "ie_range_const_infer.hpp"
-#include "ie_reduce_const_infer.hpp"
-#include "ie_reshape_const_infer.hpp"
-#include "ie_shape_const_infer.hpp"
-#include "ie_split_const_infer.hpp"
-#include "ie_strided_slice_const_infer.hpp"
-#include "ie_tile_const_infer.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-ConstInferHolder::ImplsHolder::Ptr ConstInferHolder::GetImplsHolder() {
-    static ImplsHolder::Ptr localHolder;
-    if (localHolder == nullptr) {
-        localHolder = std::make_shared<ImplsHolder>();
-    }
-    return localHolder;
-}
-
-void ConstInferHolder::AddImpl(const std::string& name, const IConstInferImpl::Ptr& impl) {
-    GetImplsHolder()->list[name] = impl;
-}
-
-std::list<std::string> ConstInferHolder::getConstInferTypes() {
-    std::list<std::string> types;
-    auto& factories = GetImplsHolder()->list;
-    for (const auto& factory : factories) {
-        types.push_back(factory.first);
-    }
-    return types;
-}
-
-IConstInferImpl::Ptr ConstInferHolder::getConstInferImpl(const std::string& type) {
-    auto& impls = ConstInferHolder::GetImplsHolder()->list;
-    if (impls.find(type) != impls.end()) {
-        return impls[type];
-    }
-    return nullptr;
-}
-
-REG_CONST_INFER_FOR_TYPE(MulConstInfer, Mul);
-REG_CONST_INFER_FOR_TYPE(AddConstInfer, Add);
-REG_CONST_INFER_FOR_TYPE(DivConstInfer, Div);
-REG_CONST_INFER_FOR_TYPE(EltwiseConstInfer, Eltwise);
-REG_CONST_INFER_FOR_TYPE(ShapeConstInfer, Shape);
-REG_CONST_INFER_FOR_TYPE(ConstConstInfer, Const);
-REG_CONST_INFER_FOR_TYPE(PowerConstInfer, Power);
-REG_CONST_INFER_FOR_TYPE(TileConstInfer, Tile);
-REG_CONST_INFER_FOR_TYPE(ReshapeConstInfer, Reshape);
-REG_CONST_INFER_FOR_TYPE(GatherConstInfer, Gather);
-REG_CONST_INFER_FOR_TYPE(SplitConstInfer, Split);
-REG_CONST_INFER_FOR_TYPE(ConcatConstInfer, Concat);
-REG_CONST_INFER_FOR_TYPE(InPlaceConstInfer, Unsqueeze);
-REG_CONST_INFER_FOR_TYPE(InPlaceConstInfer, Squeeze);
-REG_CONST_INFER_FOR_TYPE(StridedSliceConstInfer, StridedSlice);
-REG_CONST_INFER_FOR_TYPE(FillConstInfer, Fill);
-REG_CONST_INFER_FOR_TYPE(RangeConstInfer, Range);
-REG_CONST_INFER_FOR_TYPE(BroadcastConstInfer, Broadcast);
-REG_CONST_INFER_FOR_TYPE(OneHotConstInfer, OneHot);
-REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceAnd);
-REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceL1);
-REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceL2);
-REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceLogSum);
-REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceLogSumExp);
-REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceMax);
-REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceMean);
-REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceMin);
-REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceOr);
-REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceProd);
-REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceSum);
-REG_CONST_INFER_FOR_TYPE(ReduceConstInfer, ReduceSumSquare);
-REG_CONST_INFER_FOR_TYPE(PermuteConstInfer, Permute);
-REG_CONST_INFER_FOR_TYPE(ConvertConstInfer, Convert);
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_const_infer_holder.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_const_infer_holder.hpp
deleted file mode 100644 (file)
index 555d128..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_iextension.h>
-
-#include <description_buffer.hpp>
-#include <list>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "caseless.hpp"
-#include "ie_const_infer_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Holder of const infer implementations for build-in IE layers, that plugins support out-of-the-box
- */
-class ConstInferHolder {
-    struct ImplsHolder {
-        using Ptr = std::shared_ptr<ImplsHolder>;
-        InferenceEngine::details::caseless_map<std::string, IConstInferImpl::Ptr> list;
-    };
-
-public:
-    std::list<std::string> getConstInferTypes();
-
-    IConstInferImpl::Ptr getConstInferImpl(const std::string& type);
-
-    static void AddImpl(const std::string& name, const IConstInferImpl::Ptr& impl);
-
-private:
-    static ImplsHolder::Ptr GetImplsHolder();
-};
-
-template <typename Impl>
-class ImplRegisterBase {
-public:
-    explicit ImplRegisterBase(const std::string& type) {
-        ConstInferHolder::AddImpl(type, std::make_shared<Impl>(type));
-    }
-};
-
-#define REG_CONST_INFER_FOR_TYPE(__prim, __type) static ImplRegisterBase<__prim> __ci_reg__##__type(#__type)
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_const_infer_impl.cpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_const_infer_impl.cpp
deleted file mode 100644 (file)
index 4107c21..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include "ie_const_infer_impl.hpp"
-
-#include <map>
-#include <string>
-#include <vector>
-
-using namespace InferenceEngine;
-using namespace ShapeInfer;
-
-void ConstInferImpl::infer(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                           const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) {
-    std::string errorPrefix = "Ref infer error for Layer with `" + _type + "` type: ";
-    if (outData.empty()) THROW_IE_EXCEPTION << errorPrefix + "output data is empty";
-    for (auto const& data : outData) {
-        if (data->buffer() == nullptr) THROW_IE_EXCEPTION << errorPrefix + "output data is not allocated";
-    }
-    // TODO: check for direct (NCHW, NCH, NC) and FP32
-    inferImpl(inData, params, blobs, outData);
-}
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_const_infer_impl.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_const_infer_impl.hpp
deleted file mode 100644 (file)
index e7c2844..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_layer_validators.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- * @experimental
- * @class IConstInferImpl
- * @brief This class provides interface for the layer's implementation to propagate const
- */
-class IConstInferImpl {
-public:
-    using Ptr = std::shared_ptr<IConstInferImpl>;
-
-    virtual ~IConstInferImpl() = default;
-
-    /**
-     * @brief all shapes are valid, blobs are allocated
-     *
-     */
-    virtual void infer(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                       const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) = 0;
-};
-
-class ConstInferImpl : public IConstInferImpl {
-public:
-    explicit ConstInferImpl(const std::string& type): _type(type) {
-        _validator = details::LayerValidators::getInstance()->getValidator(_type);
-        if (!_validator)
-            THROW_IE_EXCEPTION << "Internal error: failed to find validator for layer with type: " << _type;
-    }
-
-    virtual void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                           const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) = 0;
-
-    void infer(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-               const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override;
-
-protected:
-    std::string _type;
-    // to get parsed descendant CNNLayer from map<string,string>
-    details::LayerValidator::Ptr _validator;
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_convert_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_convert_const_infer.hpp
deleted file mode 100644 (file)
index e0eb229..0000000
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-#include <ie_memcpy.h>
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_const_infer_impl.hpp"
-#include "ie_parallel.hpp"
-#include "ie_precision.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Const inference for Tile layer
- */
-class ConvertConstInfer : public ConstInferImpl {
-    template <typename src_d, typename dst_d>
-    void exec_cast(const Blob::CPtr& inData, Blob::Ptr& outData) {
-        const src_d* src_data =
-            inData->cbuffer().as<src_d*>() + inData->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        dst_d* dst_data =
-            outData->buffer().as<dst_d*>() + outData->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        if (inData->size() != outData->size())
-            THROW_IE_EXCEPTION << " Convert constant inference error: Input and output buffers have different sizes! "
-                                  "Input buffer size = `"
-                               << inData->size() << "` output buffer size = `" << outData->size() << "`";
-        parallel_for(inData->size(), [&](size_t i) {
-            dst_data[i] = static_cast<dst_d>(src_data[i]);
-        });
-    }
-
-    template<typename dst_d>
-    void exec_from_fp16_cast(const Blob::CPtr &inData, Blob::Ptr &outData) {
-        const ie_fp16 *src_data =
-                inData->cbuffer().as<ie_fp16 *>() + inData->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        dst_d *dst_data =
-                outData->buffer().as<dst_d *>() + outData->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        if (inData->size() != outData->size())
-            THROW_IE_EXCEPTION << " Convert constant inference error: Input and output buffers have different sizes! "
-                                  "Input buffer size = `"
-                               << inData->size() << "` output buffer size = `" << outData->size() << "`";
-        parallel_for(inData->size(), [&](size_t i) {
-            dst_data[i] = static_cast<dst_d>(PrecisionUtils::f16tof32(src_data[i]));
-        });
-    }
-
-    template<typename src_d>
-    void exec_to_fp16_cast(const Blob::CPtr &inData, Blob::Ptr &outData) {
-        const src_d* src_data =
-                inData->cbuffer().as<src_d*>() + inData->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        ie_fp16* dst_data =
-                outData->buffer().as<ie_fp16*>() + outData->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        if (inData->size() != outData->size())
-            THROW_IE_EXCEPTION << " Convert constant inference error: Input and output buffers have different sizes! "
-                                  "Input buffer size = `"
-                               << inData->size() << "` output buffer size = `" << outData->size() << "`";
-        parallel_for(inData->size(), [&](size_t i) {
-            dst_data[i] = PrecisionUtils::f32tof16(static_cast<float>(src_data[i]));
-        });
-    }
-
-public:
-    explicit ConvertConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        LayerParams lp {};
-        ConcatLayer layer(lp);
-        layer.params = params;
-        _validator->parseParams(&layer);
-        if (inData.size() != 1)
-            THROW_IE_EXCEPTION << " Convert constant inference error: incorrect number of inputs! Expected 1, got "
-                               << inData.size();
-        if (outData.size() != 1)
-            THROW_IE_EXCEPTION << " Convert constant inference error: incorrect number of outputs! Expected 1, got "
-                               << outData.size();
-        if (layer.params["precision"] != outData[0]->getTensorDesc().getPrecision().name())
-            THROW_IE_EXCEPTION << " Convert constant inference error: layer `precision` parameter and actual output "
-                                  "data precision mismatch! "
-                                  "`precision`=\""
-                               << layer.params["precision"] << "\", "
-                               << "`output_data_precision`=\"" << outData[0]->getTensorDesc().getPrecision() << "\"";
-
-        auto compare =
-            getPrecisionMask(inData[0]->getTensorDesc().getPrecision(), outData[0]->getTensorDesc().getPrecision());
-        switch (compare) {
-        case getPrecisionMask(Precision::I32, Precision::I32):
-            exec_cast<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::I32>::value_type>(
-                inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::I64, Precision::I64):
-            exec_cast<PrecisionTrait<Precision::I64>::value_type, PrecisionTrait<Precision::I64>::value_type>(
-                inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::U64, Precision::U64):
-            exec_cast<PrecisionTrait<Precision::U64>::value_type, PrecisionTrait<Precision::U64>::value_type>(
-                inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::FP32):
-            exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::FP32>::value_type>(
-                inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::I64):
-            exec_cast<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::I64>::value_type>(
-                inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::U64):
-            exec_cast<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::U64>::value_type>(
-                inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::FP32):
-            exec_cast<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::FP32>::value_type>(
-                inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::I32):
-            exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::I32>::value_type>(
-                inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::I64):
-            exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::I64>::value_type>(
-                inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::U64):
-            exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::U64>::value_type>(
-                inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::U8):
-            exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::U8>::value_type>(
-                    inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::BOOL):
-            exec_cast<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::BOOL>::value_type>(
-                    inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::BOOL, Precision::BOOL):
-            exec_cast<PrecisionTrait<Precision::BOOL>::value_type, PrecisionTrait<Precision::BOOL>::value_type>(
-                    inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::FP32):
-            exec_from_fp16_cast<PrecisionTrait<Precision::FP32>::value_type>(inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::I32):
-            exec_from_fp16_cast<PrecisionTrait<Precision::I32>::value_type>(inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::I64):
-            exec_from_fp16_cast<PrecisionTrait<Precision::I64>::value_type>(inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::U64):
-            exec_from_fp16_cast<PrecisionTrait<Precision::U64>::value_type>(inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::U8):
-            exec_from_fp16_cast<PrecisionTrait<Precision::U8>::value_type>(inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::BOOL):
-            exec_from_fp16_cast<PrecisionTrait<Precision::BOOL>::value_type>(inData[0], outData[0]);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::FP16):
-            exec_to_fp16_cast<PrecisionTrait<Precision::FP32>::value_type>(inData[0], outData[0]);
-            break;
-        default:
-            THROW_IE_EXCEPTION << " Convert constant inference error: Unsupported precision configuration! "
-                               << " Input precision: " << inData[0]->getTensorDesc().getPrecision()
-                               << ", output precision: " << outData[0]->getTensorDesc().getPrecision();
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_div_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_div_const_infer.hpp
deleted file mode 100644 (file)
index 0796486..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Const inference for TBD layer
- */
-class DivConstInfer : public ConstInferImpl {
-public:
-    explicit DivConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        size_t numInputs = inData.size();
-        if (inData.size() != 2)
-            THROW_IE_EXCEPTION << "Unsupported number of inputs: " << numInputs << ". 2 inputs is supported";
-        auto* firstBlobBuffer = inData[0]->cbuffer().as<float*>();
-        auto* secondBlobBuffer = inData[1]->cbuffer().as<float*>();
-
-        if (!firstBlobBuffer || !secondBlobBuffer) {
-            THROW_IE_EXCEPTION << "empty input data";
-        }
-        auto outBlob = *outData.begin();
-        auto* outBuffer = outBlob->buffer().as<float*>();
-        if (!outBuffer) THROW_IE_EXCEPTION << "empty output data";
-        if (inData[0]->size() != inData[1]->size()) {
-            THROW_IE_EXCEPTION << "inputs with different shapes are not supported";
-        }
-        for (int i = 0; i < outBlob->size(); i++) {
-            if (secondBlobBuffer[i] == 0) THROW_IE_EXCEPTION << "division by zero";
-            outBuffer[i] = firstBlobBuffer[i] / secondBlobBuffer[i];
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_eltw_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_eltw_const_infer.hpp
deleted file mode 100644 (file)
index a067719..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_add_const_infer.hpp"
-#include "ie_div_const_infer.hpp"
-#include "ie_mul_const_infer.hpp"
-#include "ie_pow_const_infer.hpp"
-#include "ie_sub_const_infer.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- * @brief Eltwise wrapper on top of Mul/Add/Div operation
- */
-class EltwiseConstInfer : public ConstInferImpl {
-public:
-    explicit EltwiseConstInfer(const std::string& type): ConstInferImpl(type) {
-        _sum = std::shared_ptr<ConstInferImpl>(new AddConstInfer(_type));
-        _sub = std::shared_ptr<ConstInferImpl>(new SubConstInfer(_type));
-        _mul = std::shared_ptr<ConstInferImpl>(new MulConstInfer(_type));
-        _div = std::shared_ptr<ConstInferImpl>(new DivConstInfer(_type));
-        _pow = std::shared_ptr<ConstInferImpl>(new PowConstInfer(_type));
-    }
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        auto found = params.find("operation");
-        IE_ASSERT(found != params.end()) << "Eltwise layer has no attribute operation.";
-        std::string operation = found->second;
-
-        std::shared_ptr<ConstInferImpl> actual;
-        if (operation == "sum")
-            actual = _sum;
-        else if (operation == "sub")
-            actual = _sub;
-        else if (operation == "mul")
-            actual = _mul;
-        else if (operation == "div")
-            actual = _div;
-        else if (operation == "pow")
-            actual = _pow;
-        else
-            THROW_IE_EXCEPTION << "Unsupported eltwise operation type " << operation
-                               << ". "
-                                  "IE cannot propagate constants through this layer.";
-
-        actual->inferImpl(inData, params, blobs, outData);
-    }
-
-private:
-    std::shared_ptr<ConstInferImpl> _mul, _div, _sum, _sub, _pow;
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_fill_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_fill_const_infer.hpp
deleted file mode 100644 (file)
index 0c95c7e..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-#include <ie_memcpy.h>
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_const_infer_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Const inference for Fill layer
- */
-class FillConstInfer : public ConstInferImpl {
-public:
-    explicit FillConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        const size_t FILL_DIMS = 0;
-        const size_t FILL_VALUE = 1;
-        if (inData.empty() || outData.empty()) THROW_IE_EXCEPTION << " Incorrect number of input/output edges!";
-
-        if (inData.size() != 2) THROW_IE_EXCEPTION << " Incorrect number of input edges!";
-
-        SizeVector dims = inData[FILL_DIMS]->getTensorDesc().getDims();
-        if (dims.size() > 1) THROW_IE_EXCEPTION << " Fill dimensions vector should be 1 dimension";
-
-        if (inData[FILL_DIMS]->getTensorDesc().getPrecision() != Precision::I32)
-            THROW_IE_EXCEPTION << " Fill dimensions vector should be I32!";
-
-        SizeVector value_dims = inData[FILL_VALUE]->getTensorDesc().getDims();
-        if (value_dims.size() > 1) THROW_IE_EXCEPTION << " Value scalar should have 1 dimension";
-
-        if (!(inData[FILL_VALUE]->getTensorDesc().getPrecision() == Precision::I32 &&
-              outData[0]->getTensorDesc().getPrecision() == Precision::I32) &&
-            !(inData[FILL_VALUE]->getTensorDesc().getPrecision() == Precision::FP32 &&
-              outData[0]->getTensorDesc().getPrecision() == Precision::FP32)) {
-            THROW_IE_EXCEPTION << " 'Value' input scalars and output tensor should have same precision and only FP32 "
-                                  "and I32 are supported!";
-        }
-
-        int32_t* fill_dims = inData[FILL_DIMS]->cbuffer().as<int32_t*>() +
-                             inData[FILL_DIMS]->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        size_t fill_size = inData[FILL_DIMS]->getTensorDesc().getDims()[0];
-        SizeVector dst_dims = outData[0]->getTensorDesc().getDims();
-
-        if (dst_dims.size() != fill_size) {
-            THROW_IE_EXCEPTION << "Output tensor dimension mismatch";
-        }
-
-        size_t work_amount_dst = 1;
-        for (size_t i = 0; i < dst_dims.size(); i++) {
-            work_amount_dst *= fill_dims[i];
-            if (static_cast<int>(dst_dims[i]) != fill_dims[i]) {
-                THROW_IE_EXCEPTION << "Output tensor dimension size mismatch";
-            }
-        }
-
-        switch (outData[0]->getTensorDesc().getPrecision()) {
-        case Precision::FP32: {
-            float* dst_data =
-                outData[0]->cbuffer().as<float*>() + outData[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
-            float value = (inData[FILL_VALUE]->cbuffer().as<float*>() +
-                           inData[FILL_VALUE]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0];
-
-            parallel_nt(0, [&](const int ithr, const int nthr) {
-                size_t start = 0, end = 0;
-                splitter(work_amount_dst, nthr, ithr, start, end);
-                std::fill_n(dst_data + start, end - start, value);
-            });
-        } break;
-        case Precision::I32: {
-            int32_t* dst_data =
-                outData[0]->cbuffer().as<int32_t*>() + outData[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
-            int32_t value = (inData[FILL_VALUE]->cbuffer().as<int32_t*>() +
-                             inData[FILL_VALUE]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0];
-
-            parallel_nt(0, [&](const int ithr, const int nthr) {
-                size_t start = 0, end = 0;
-                splitter(work_amount_dst, nthr, ithr, start, end);
-                std::fill_n(dst_data + start, end - start, value);
-            });
-        } break;
-        default:
-            THROW_IE_EXCEPTION << "Incorrect output precision. Only FP32 and I32 are supported!";
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_gather_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_gather_const_infer.hpp
deleted file mode 100644 (file)
index 7767bbb..0000000
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-
-#include <cmath>
-#include <ie_algorithm.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_const_infer_impl.hpp"
-#include "ie_parallel.hpp"
-#include "precision_utils.h"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-struct GatherParams {
-    size_t dataLength = 1;
-    int axis = 0;
-    size_t indexRange = 0;
-    size_t numDictionaries = 1;
-};
-
-/**
- *@brief Implementation of Const inference for Gather layer
- */
-class GatherConstInfer : public ConstInferImpl {
-public:
-    explicit GatherConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    struct f32toUi32 {
-        inline unsigned int operator()(const float value) {
-            return static_cast<unsigned int>(value);
-        }
-    };
-
-    struct f16toUi32 {
-        inline unsigned int operator()(const ie_fp16 value) {
-            return static_cast<unsigned int>(PrecisionUtils::f16tof32(value));
-        }
-    };
-
-    struct i32toUi32 {
-        inline unsigned int operator()(const int32_t value) {
-            return static_cast<unsigned int>(value);
-        }
-    };
-
-    template <typename index_t, class Conversion>
-    void gather(const Blob::CPtr& indexes, const Blob::CPtr& dictionary, Blob::Ptr output, const GatherParams& p) {
-        size_t src_indexSize = indexes->size();
-        const index_t* src_index =
-            indexes->cbuffer().as<const index_t*>() + indexes->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        const uint8_t* src_dataDict = dictionary->cbuffer().as<const uint8_t*>() +
-                                      dictionary->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        uint8_t* dst_data =
-            output->cbuffer().as<uint8_t*>() + output->getTensorDesc().getBlockingDesc().getOffsetPadding();
-
-        parallel_for(src_indexSize, [&](size_t i) {
-            unsigned int idx = Conversion()(src_index[i]);
-
-            //  Index clipping
-            if (idx < p.indexRange) {
-                //  Copying data to destination from Dictionary
-                for (size_t j = 0; j < p.numDictionaries; j++) {
-                    ie_memcpy(&dst_data[p.dataLength * (i + j * src_indexSize)],
-                              output->byteSize() - (p.dataLength * (i + j * src_indexSize)),
-                              &src_dataDict[p.dataLength * (idx + j * p.indexRange)], p.dataLength);
-                }
-            } else {
-                for (size_t j = 0; j < p.numDictionaries; j++) {
-                    memset(&dst_data[p.dataLength * (i + j * src_indexSize)], 0, p.dataLength);
-                }
-            }
-        });
-    }
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        LayerParams lp {};
-        CNNLayer layer(lp);
-        layer.params = params;
-
-        const size_t GATHER_DICTIONARY = 0;
-        const size_t GATHER_INDEXES = 1;
-
-        if (inData.size() != 2 || outData.empty()) THROW_IE_EXCEPTION << " Incorrect number of input/output edges!";
-
-        Precision inIdxPrecision = inData[GATHER_INDEXES]->getTensorDesc().getPrecision();
-        if (inIdxPrecision != Precision::FP32 && inIdxPrecision != Precision::FP16 && inIdxPrecision != Precision::I32)
-            THROW_IE_EXCEPTION << " Incorrect input precision. Only FP32|FP16|I32 are supported!";
-
-        Precision inDataPrecision = inData[GATHER_DICTIONARY]->getTensorDesc().getPrecision();
-        if (inDataPrecision != Precision::FP32 && inDataPrecision != Precision::FP16 &&
-            inIdxPrecision != Precision::I32)
-            THROW_IE_EXCEPTION << " Incorrect input precision. Only FP32|FP16|I32 are supported!";
-
-        //  Remove redundant dimensions
-        const SizeVector& dictionary_dims = inData[GATHER_DICTIONARY]->getTensorDesc().getDims();
-        if (dictionary_dims.size() == 0) THROW_IE_EXCEPTION << " Incorrect input parameters dimension!";
-
-        GatherParams p;
-        p.axis = static_cast<int>(layer.GetParamAsInt("axis"));
-        // Dictionary must be at least rank axis + 1
-        if (!(-static_cast<int>(dictionary_dims.size()) <= p.axis && p.axis < static_cast<int>(dictionary_dims.size())))
-            THROW_IE_EXCEPTION << " Incorrect input parameters dimensions and axis number!";
-
-        if (p.axis < 0) p.axis += dictionary_dims.size();
-
-        //  Find number of dictionaries, index range and data length
-        for (size_t i = 0; i < p.axis; i++) p.numDictionaries *= dictionary_dims[i];
-        p.indexRange = dictionary_dims[p.axis];
-        for (size_t i = p.axis + 1; i < dictionary_dims.size(); i++) p.dataLength *= dictionary_dims[i];
-
-        if (p.dataLength == 0) THROW_IE_EXCEPTION << " Incorrect input parameters dimension!";
-
-        p.dataLength *= inData[GATHER_DICTIONARY]->getTensorDesc().getPrecision().size();
-
-        switch (inData[GATHER_INDEXES]->getTensorDesc().getPrecision()) {
-        case Precision::FP32:
-            gather<float, f32toUi32>(inData[GATHER_INDEXES], inData[GATHER_DICTIONARY], outData[0], p);
-            break;
-        case Precision::FP16:
-            gather<ie_fp16, f16toUi32>(inData[GATHER_INDEXES], inData[GATHER_DICTIONARY], outData[0], p);
-            break;
-        case Precision::I32:
-            gather<int32_t, i32toUi32>(inData[GATHER_INDEXES], inData[GATHER_DICTIONARY], outData[0], p);
-            break;
-        default:
-            THROW_IE_EXCEPTION << " Unsupported precision!";
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_in_place_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_in_place_const_infer.hpp
deleted file mode 100644 (file)
index 51067a6..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Const inference for Unsqueeze layer
- */
-class InPlaceConstInfer : public ConstInferImpl {
-public:
-    explicit InPlaceConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        auto inBlob = inData[0];
-        auto outBlob = outData[0];
-        auto* inBuffer = inBlob->cbuffer().as<uint8_t*>();
-        auto* outBuffer = outBlob->buffer().as<uint8_t*>();
-        ie_memcpy(outBuffer, outData[0]->byteSize(), inBuffer, inBlob->byteSize());
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_mul_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_mul_const_infer.hpp
deleted file mode 100644 (file)
index 7664a98..0000000
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-#include <precision_utils.h>
-
-#include <ie_precision.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "broadcast_offset.hpp"
-#include "ie_const_infer_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Const inference for TBD layer
- *
- * Table of output data type value with given input parameters
- *
- *
- *              U8       I32        I64        FP16        FP32
- *     =============================================================
- *     U8   ==  U8       I32        I64        FP16        FP32
- *          ==
- *     I32  ==  I32      I32        I64        FP32        FP32
- *          ==
- *     I64  ==  I64      I64        I64        FP32        FP32
- *          ==
- *     FP16 ==  FP16     FP32       FP32       FP16        FP32
- *          ==
- *     FP32 ==  FP32     FP32       FP32       FP32        FP32
- *
- *     There is a special case with FP16 precision. Convert input data to FP32 and multiply. After that
- *     convert output data to FP16, if both of input parameters have FP16 precision or one - FP16 and another - U8.
- */
-
-class MulConstInfer : public ConstInferImpl {
-public:
-    explicit MulConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    struct fp16tofp32 {
-        inline float operator()(ie_fp16 value) {
-            return static_cast<float>(PrecisionUtils::f16tof32(value));
-        }
-    };
-
-    struct fp32tofp16 {
-        inline ie_fp16 operator()(float value) {
-            return static_cast<float>(PrecisionUtils::f32tof16(value));
-        }
-    };
-
-    template <typename dataType>
-    struct noConversion {
-        inline dataType operator()(dataType value) {
-            return value;
-        }
-    };
-
-    template <typename inDatatype1, typename inDatatype2, typename outDatatype, class ConversionInData1,
-              class ConversionInData2, class ConversionOutData>
-    void mul(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-             const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) {
-        auto* firstBlobBuffer = inData[0]->cbuffer().as<inDatatype1*>();
-        auto* secondBlobBuffer = inData[1]->cbuffer().as<inDatatype2*>();
-        if (!firstBlobBuffer || !secondBlobBuffer) {
-            THROW_IE_EXCEPTION << "empty input data";
-        }
-
-        auto outBlob = *outData.begin();
-        auto* outBuffer = outBlob->buffer().as<outDatatype*>();
-        if (!outBuffer) THROW_IE_EXCEPTION << "empty output data";
-
-        BroadcastOffset outOff(outBlob->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
-        BroadcastOffset inOff1(inData[0]->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
-        BroadcastOffset inOff2(inData[1]->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
-
-        for (size_t i = 0; i < outBlob->size(); i++) {
-            SizeVector offsetDims = outOff.offset_dims(i);
-            outBuffer[outOff.offset(offsetDims)] =
-                ConversionOutData()(ConversionInData1()(firstBlobBuffer[inOff1.offset(offsetDims)]) *
-                                    ConversionInData2()(secondBlobBuffer[inOff2.offset(offsetDims)]));
-        }
-    }
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        size_t numInputs = inData.size();
-        if (inData.size() != 2)
-            THROW_IE_EXCEPTION << "Unsupported number of inputs: " << numInputs << ". 2 inputs is supported";
-
-        auto compare =
-            getPrecisionMask(inData[0]->getTensorDesc().getPrecision(), inData[1]->getTensorDesc().getPrecision(),
-                             outData[0]->getTensorDesc().getPrecision());
-
-        switch (compare) {
-        case getPrecisionMask(Precision::U8, Precision::U8, Precision::U8):
-            mul<uint8_t, uint8_t, uint8_t, noConversion<uint8_t>, noConversion<uint8_t>, noConversion<uint8_t>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U8, Precision::I32, Precision::I32):
-            mul<uint8_t, int, int, noConversion<uint8_t>, noConversion<int>, noConversion<int>>(inData, params, blobs,
-                                                                                                outData);
-            break;
-        case getPrecisionMask(Precision::U8, Precision::I64, Precision::I64):
-            mul<uint8_t, long long int, long long int, noConversion<uint8_t>, noConversion<long long int>,
-                noConversion<long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U8, Precision::U64, Precision::U64):
-            mul<uint8_t, unsigned long long int, unsigned long long int, noConversion<uint8_t>,
-                noConversion<unsigned long long int>, noConversion<unsigned long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U8, Precision::FP16, Precision::FP16):
-            mul<uint8_t, ie_fp16, ie_fp16, noConversion<uint8_t>, fp16tofp32, fp32tofp16>(inData, params, blobs,
-                                                                                          outData);
-            break;
-        case getPrecisionMask(Precision::U8, Precision::FP32, Precision::FP32):
-            mul<uint8_t, float, float, noConversion<uint8_t>, noConversion<float>, noConversion<float>>(inData, params,
-                                                                                                        blobs, outData);
-            break;
-
-        case getPrecisionMask(Precision::I32, Precision::U8, Precision::I32):
-            mul<int, uint8_t, int, noConversion<int>, noConversion<uint8_t>, noConversion<int>>(inData, params, blobs,
-                                                                                                outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::I32, Precision::I32):
-            mul<int, int, int, noConversion<int>, noConversion<int>, noConversion<int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::I64, Precision::I64):
-            mul<int, long long int, long long int, noConversion<int>, noConversion<long long int>,
-                noConversion<long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::U64, Precision::U64):
-            mul<int, unsigned long long int, unsigned long long int, noConversion<int>,
-                noConversion<unsigned long long int>, noConversion<unsigned long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::FP16, Precision::FP32):
-            mul<int, ie_fp16, float, noConversion<int>, fp16tofp32, noConversion<float>>(inData, params, blobs,
-                                                                                         outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::FP32, Precision::FP32):
-            mul<int, float, float, noConversion<int>, noConversion<float>, noConversion<float>>(inData, params, blobs,
-                                                                                                outData);
-            break;
-
-        case getPrecisionMask(Precision::I64, Precision::U8, Precision::I64):
-            mul<long long int, uint8_t, long long int, noConversion<long long int>, noConversion<uint8_t>,
-                noConversion<long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I64, Precision::I32, Precision::I64):
-            mul<long long int, int, long long int, noConversion<long long int>, noConversion<int>,
-                noConversion<long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I64, Precision::I64, Precision::I64):
-            mul<long long int, long long int, long long int, noConversion<long long int>, noConversion<long long int>,
-                noConversion<long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I64, Precision::FP16, Precision::FP32):
-            mul<long long int, ie_fp16, float, noConversion<long long int>, fp16tofp32, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I64, Precision::FP32, Precision::FP32):
-            mul<long long int, float, float, noConversion<long long int>, noConversion<float>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-
-        case getPrecisionMask(Precision::U64, Precision::U8, Precision::U64):
-            mul<unsigned long long int, uint8_t, unsigned long long int, noConversion<unsigned long long int>,
-                noConversion<uint8_t>, noConversion<unsigned long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U64, Precision::I32, Precision::U64):
-            mul<unsigned long long int, int, unsigned long long int, noConversion<unsigned long long int>,
-                noConversion<int>, noConversion<unsigned long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U64, Precision::U64, Precision::U64):
-            mul<unsigned long long int, unsigned long long int, unsigned long long int,
-                noConversion<unsigned long long int>, noConversion<unsigned long long int>,
-                noConversion<unsigned long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U64, Precision::FP16, Precision::FP32):
-            mul<unsigned long long int, ie_fp16, float, noConversion<unsigned long long int>, fp16tofp32, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U64, Precision::FP32, Precision::FP32):
-            mul<unsigned long long int, float, float, noConversion<unsigned long long int>, noConversion<float>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-
-        case getPrecisionMask(Precision::FP16, Precision::U8, Precision::FP16):
-            mul<ie_fp16, uint8_t, ie_fp16, fp16tofp32, noConversion<uint8_t>, fp32tofp16>(inData, params, blobs,
-                                                                                          outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::I32, Precision::FP32):
-            mul<ie_fp16, int, float, fp16tofp32, noConversion<int>, noConversion<float>>(inData, params, blobs,
-                                                                                         outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::I64, Precision::FP32):
-            mul<ie_fp16, long long int, float, fp16tofp32, noConversion<long long int>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::U64, Precision::FP32):
-            mul<ie_fp16, unsigned long long int, float, fp16tofp32, noConversion<unsigned long long int>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::FP16, Precision::FP16):
-            mul<ie_fp16, ie_fp16, ie_fp16, fp16tofp32, fp16tofp32, fp32tofp16>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::FP32, Precision::FP32):
-            mul<ie_fp16, float, float, fp16tofp32, noConversion<float>, noConversion<float>>(inData, params, blobs,
-                                                                                             outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::FP32, Precision::FP16):
-            mul<ie_fp16, float, ie_fp16, fp16tofp32, noConversion<float>, fp32tofp16>(inData, params, blobs, outData);
-            break;
-
-        case getPrecisionMask(Precision::FP32, Precision::U8, Precision::FP32):
-            mul<float, uint8_t, float, noConversion<float>, noConversion<uint8_t>, noConversion<float>>(inData, params,
-                                                                                                        blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::I32, Precision::FP32):
-            mul<float, int, float, noConversion<float>, noConversion<int>, noConversion<float>>(inData, params, blobs,
-                                                                                                outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::I64, Precision::FP32):
-            mul<float, long long int, float, noConversion<float>, noConversion<long long int>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::U64, Precision::FP32):
-            mul<float, unsigned long long int, float, noConversion<float>, noConversion<unsigned long long int>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::FP16, Precision::FP32):
-            mul<float, ie_fp16, float, noConversion<float>, fp16tofp32, noConversion<float>>(inData, params, blobs,
-                                                                                             outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::FP16, Precision::FP16):
-            mul<float, ie_fp16, ie_fp16, noConversion<float>, fp16tofp32, fp32tofp16>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::FP32, Precision::FP32):
-            mul<float, float, float, noConversion<float>, noConversion<float>, noConversion<float>>(inData, params,
-                                                                                                    blobs, outData);
-            break;
-        default:
-            THROW_IE_EXCEPTION << "Unsupported precision!";
-        }
-    }
-};
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_onehot_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_onehot_const_infer.hpp
deleted file mode 100644 (file)
index 16f7421..0000000
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_const_infer_impl.hpp"
-#include "precision_utils.h"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- * @brief Implementation of Const inference for OneHot layer
- */
-class OneHotConstInfer : public ConstInferImpl {
-public:
-    explicit OneHotConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    template <typename T>
-    void inferImplBody(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                       std::vector<Blob::Ptr>& outData) {
-        OneHotLayer layer(LayerParams {});
-        layer.params = params;
-        layer.type = _type;
-        _validator->parseParams(&layer);
-        auto src_dims = inData[0]->getTensorDesc().getDims();
-
-        const auto* src_data = inData[0]->cbuffer().as<const T*>();
-        auto* dst_data = outData[0]->buffer().as<T*>();
-        std::size_t prefix_size = 1;
-        auto input_dims = inData[0]->getTensorDesc().getDims();
-
-        std::size_t actual_axis = (layer.axis == -1) ? src_dims.size() : layer.axis;
-        for (size_t i = 0; i < actual_axis; ++i) prefix_size *= input_dims[i];
-
-        std::size_t suffix_size = inData[0]->size() / prefix_size;
-
-        std::size_t dst_offset = 0;
-        for (std::size_t prefix_idx = 0; prefix_idx < prefix_size; ++prefix_idx) {
-            for (std::size_t depth_idx = 0; depth_idx < layer.depth; ++depth_idx) {
-                for (std::size_t suffix_idx = 0; suffix_idx < suffix_size; suffix_idx++) {
-                    auto src_index = prefix_idx * suffix_size + suffix_idx;
-                    auto v = static_cast<std::size_t>(src_data[src_index]);
-                    dst_data[dst_offset++] = (v == depth_idx) ? layer.on_value : layer.off_value;
-                }
-            }
-        }
-    }
-
-    void inferImplBody_fp16(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                            std::vector<Blob::Ptr>& outData) {
-        OneHotLayer layer(LayerParams {});
-        layer.params = params;
-        layer.type = _type;
-        _validator->parseParams(&layer);
-        auto src_dims = inData[0]->getTensorDesc().getDims();
-
-        const auto* src_data = inData[0]->cbuffer().as<const int16_t*>();
-        auto* dst_data = outData[0]->buffer().as<int16_t*>();
-        std::size_t prefix_size = 1;
-        auto input_dims = inData[0]->getTensorDesc().getDims();
-
-        std::size_t actual_axis = (layer.axis == -1) ? src_dims.size() : layer.axis;
-        for (size_t i = 0; i < actual_axis; ++i) prefix_size *= input_dims[i];
-
-        std::size_t suffix_size = inData[0]->size() / prefix_size;
-
-        int16_t val_on = PrecisionUtils::f32tof16(layer.on_value);
-        int16_t val_off = PrecisionUtils::f32tof16(layer.off_value);
-
-        std::size_t dst_offset = 0;
-        for (std::size_t prefix_idx = 0; prefix_idx < prefix_size; ++prefix_idx) {
-            for (std::size_t depth_idx = 0; depth_idx < layer.depth; ++depth_idx) {
-                for (std::size_t suffix_idx = 0; suffix_idx < suffix_size; suffix_idx++) {
-                    auto src_index = prefix_idx * suffix_size + suffix_idx;
-                    auto v = static_cast<std::size_t>(src_data[src_index]);
-                    dst_data[dst_offset++] = (v == depth_idx) ? val_on : val_off;
-                }
-            }
-        }
-    }
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        // OneHot are specified by precision of output tensors for IR v7.
-        // V10 OneHot spec will use on/off value as inputs tensors so
-        // the total layer precision will be same as precision of "on_value"
-        // input.
-        auto precision = outData[0]->getTensorDesc().getPrecision();
-        switch (precision) {
-        case Precision::FP32:
-            inferImplBody<PrecisionTrait<Precision::FP32>::value_type>(inData, params, outData);
-            break;
-        case Precision::FP16:
-            inferImplBody_fp16(inData, params, outData);
-            break;
-        case Precision::Q78:
-            inferImplBody<PrecisionTrait<Precision::Q78>::value_type>(inData, params, outData);
-            break;
-        case Precision::I16:
-            inferImplBody<PrecisionTrait<Precision::I16>::value_type>(inData, params, outData);
-            break;
-        case Precision::U8:
-            inferImplBody<PrecisionTrait<Precision::U8>::value_type>(inData, params, outData);
-            break;
-        case Precision::I8:
-            inferImplBody<PrecisionTrait<Precision::I8>::value_type>(inData, params, outData);
-            break;
-        case Precision::U16:
-            inferImplBody<PrecisionTrait<Precision::U16>::value_type>(inData, params, outData);
-            break;
-        case Precision::I32:
-            inferImplBody<PrecisionTrait<Precision::I32>::value_type>(inData, params, outData);
-            break;
-        case Precision::I64:
-            inferImplBody<PrecisionTrait<Precision::I64>::value_type>(inData, params, outData);
-            break;
-        case Precision::U64:
-            inferImplBody<PrecisionTrait<Precision::U64>::value_type>(inData, params, outData);
-            break;
-        case Precision::BOOL:
-            inferImplBody<PrecisionTrait<Precision::BOOL>::value_type>(inData, params, outData);
-            break;
-        default:
-            THROW_IE_EXCEPTION << "OneHot const inference: Unsupported precision " << precision.name();
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_permute_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_permute_const_infer.hpp
deleted file mode 100644 (file)
index 5304358..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-
-#include <cmath>
-#include <ie_algorithm.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "precision_utils.h"
-#include "ie_const_infer_impl.hpp"
-#include "ie_parallel.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Const inference for Broadcast layer
- */
-class PermuteConstInfer : public ConstInferImpl {
-public:
-    explicit PermuteConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        LayerParams lp {};
-        CNNLayer layer(lp);
-        layer.params = params;
-
-        if (outData.empty()) THROW_IE_EXCEPTION << "Incorrect number of input/output edges!";
-
-        if (inData.size() != 1) THROW_IE_EXCEPTION << "Incorrect number of input edges!";
-
-        if (inData[0]->getTensorDesc().getPrecision() != outData[0]->getTensorDesc().getPrecision()) {
-            THROW_IE_EXCEPTION << "Input and output tensors should have same precision!";
-        }
-
-        std::vector<size_t> order;
-        std::vector<int> layerOrder = layer.GetParamAsInts("order");
-        for (auto ord : layerOrder) order.push_back(static_cast<size_t>(ord));
-
-        TensorDesc srcDesc = inData[0]->getTensorDesc();
-
-        SizeVector& dims = srcDesc.getDims();
-        InferenceEngine::SizeVector orderedDims;
-        for (auto ord : order) {
-            orderedDims.push_back(dims[ord]);
-        }
-        TensorDesc dstDesc(InferenceEngine::Precision::FP32, dims, {orderedDims, order});
-
-        size_t dataSize = inData[0]->size();
-        const auto* src_data = inData[0]->cbuffer().as<const uint8_t*>();
-        auto* dst_data = outData[0]->buffer().as<uint8_t*>();
-
-        parallel_for(dataSize, [&](size_t i) {
-            memcpy(dst_data + dstDesc.offset(i) * outData[0]->element_size(),
-                   src_data + srcDesc.offset(i) * inData[0]->element_size(), inData[0]->element_size());
-        });
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_pow_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_pow_const_infer.hpp
deleted file mode 100644 (file)
index 97f74ae..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-#include <precision_utils.h>
-
-#include <cmath>
-#include <ie_precision.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "broadcast_offset.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-class PowConstInfer : public ConstInferImpl {
-public:
-    explicit PowConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    struct fp16tofp32 {
-        inline float operator()(ie_fp16 value) {
-            return static_cast<float>(PrecisionUtils::f16tof32(value));
-        }
-    };
-
-    struct fp32tofp16 {
-        inline ie_fp16 operator()(float value) {
-            return static_cast<float>(PrecisionUtils::f32tof16(value));
-        }
-    };
-
-    template <typename dataType>
-    struct noConversion {
-        inline dataType operator()(dataType value) {
-            return value;
-        }
-    };
-
-    template <typename inDatatype1, typename inDatatype2, typename outDatatype, class ConversionInData1,
-              class ConversionInData2, class ConversionOutData>
-    void pow(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-             const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) {
-        auto* firstBlobBuffer = inData[0]->cbuffer().as<inDatatype1*>();
-        auto* secondBlobBuffer = inData[1]->cbuffer().as<inDatatype2*>();
-        if (!firstBlobBuffer || !secondBlobBuffer) {
-            THROW_IE_EXCEPTION << "empty input data";
-        }
-
-        auto outBlob = *outData.begin();
-        auto* outBuffer = outBlob->buffer().as<outDatatype*>();
-        if (!outBuffer) THROW_IE_EXCEPTION << "empty output data";
-
-        BroadcastOffset outOff(outBlob->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
-        BroadcastOffset inOff1(inData[0]->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
-        BroadcastOffset inOff2(inData[1]->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
-        for (size_t i = 0; i < outBlob->size(); i++) {
-            SizeVector offsetDims = outOff.offset_dims(i);
-            outBuffer[outOff.offset(offsetDims)] =
-                ConversionOutData()(std::pow(ConversionInData1()(firstBlobBuffer[inOff1.offset(offsetDims)]),
-                                             ConversionInData2()(secondBlobBuffer[inOff2.offset(offsetDims)])));
-        }
-    }
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        size_t numInputs = inData.size();
-        if (inData.size() != 2)
-            THROW_IE_EXCEPTION << "Unsupported number of inputs: " << numInputs << ". 2 inputs is supported";
-
-        auto compare =
-            getPrecisionMask(inData[0]->getTensorDesc().getPrecision(), inData[1]->getTensorDesc().getPrecision(),
-                             outData[0]->getTensorDesc().getPrecision());
-        switch (compare) {
-        case getPrecisionMask(Precision::FP32, Precision::FP32, Precision::FP32):
-            pow<float, float, float, noConversion<float>, noConversion<float>, noConversion<float>>(inData, params,
-                                                                                                    blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::I32, Precision::FP32):
-            pow<int32_t, int32_t, float, noConversion<int32_t>, noConversion<int32_t>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::FP16, Precision::FP16):
-            pow<ie_fp16, ie_fp16, ie_fp16, noConversion<ie_fp16>, noConversion<ie_fp16>, noConversion<ie_fp16>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::I32, Precision::FP16):
-            pow<int32_t, int32_t, float, noConversion<int32_t>, noConversion<int32_t>, fp32tofp16>(inData, params,
-                                                                                                   blobs, outData);
-            break;
-        default:
-            THROW_IE_EXCEPTION << "Not supported data type in port 0";
-        }
-    }
-};
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
\ No newline at end of file
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_power_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_power_const_infer.hpp
deleted file mode 100644 (file)
index 7a8a0d2..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-
-#include <cmath>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_const_infer_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Const inference for TBD layer
- */
-class PowerConstInfer : public ConstInferImpl {
-public:
-    explicit PowerConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        LayerParams lp {};
-        PowerLayer layer(lp);
-        layer.params = params;
-        layer.type = _type;
-        _validator->parseParams(&layer);
-
-        float scale = layer.scale;
-        float power = layer.power;
-        float shift = layer.offset;
-
-        // TODO: check for access and sizes
-        auto* input = inData[0]->cbuffer().as<float*>();
-        auto* output = outData[0]->buffer().as<float*>();
-        size_t dataSize = inData[0]->size();
-
-        if (power == 1.0f) {
-            for (int i = 0; i < dataSize; i++) {
-                output[i] = input[i] * scale + shift;
-            }
-        } else {
-            for (int i = 0; i < dataSize; i++) {
-                output[i] = pow(input[i] * scale + shift, power);
-            }
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_range_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_range_const_infer.hpp
deleted file mode 100644 (file)
index 447205a..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-#include <ie_memcpy.h>
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_const_infer_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Const inference for Fill layer
- */
-class RangeConstInfer : public ConstInferImpl {
-public:
-    explicit RangeConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    template <typename data_t>
-    void range(data_t start, data_t limit, data_t delta, const Blob::Ptr& output) {
-        size_t dst_size = (output->getTensorDesc().getDims())[0];
-        data_t* dst_data =
-            output->cbuffer().as<data_t*>() + output->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        size_t work_amount_dst = std::floor(std::abs((limit - start) / delta));
-        if (work_amount_dst != dst_size) THROW_IE_EXCEPTION << "Range indexes exceeds data tensor dimension";
-
-        parallel_nt(0, [&](const int ithr, const int nthr) {
-            size_t iwork = 0, end = 0;
-            splitter(work_amount_dst, nthr, ithr, iwork, end);
-            data_t dst_value = start + iwork * delta;
-
-            for (; iwork < end; ++iwork, dst_value += delta) {
-                dst_data[iwork] = dst_value;
-            }
-        });
-    }
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        const size_t RANGE_START = 0;
-        const size_t RANGE_LIMIT = 1;
-        const size_t RANGE_DELTA = 2;
-        if (inData.empty() || outData.empty()) THROW_IE_EXCEPTION << " Incorrect number of input/output edges!";
-
-        if (inData.size() != 3) THROW_IE_EXCEPTION << " Incorrect number of input edges!";
-
-        SizeVector start_dims = inData[RANGE_START]->getTensorDesc().getDims();
-        if (start_dims.size() > 1) THROW_IE_EXCEPTION << " Start scalar should have 1 dimension";
-
-        SizeVector limit_dims = inData[RANGE_LIMIT]->getTensorDesc().getDims();
-        if (limit_dims.size() > 1) THROW_IE_EXCEPTION << " Limit scalar should have 1 dimension";
-
-        SizeVector delta_dims = inData[RANGE_DELTA]->getTensorDesc().getDims();
-        if (delta_dims.size() > 1) THROW_IE_EXCEPTION << " Delta scalar should have 1 dimension";
-
-        SizeVector dst_dims = outData[0]->getTensorDesc().getDims();
-        if (dst_dims.size() > 1) THROW_IE_EXCEPTION << " Output vector should have 1 dimension";
-
-        if (!(inData[RANGE_START]->getTensorDesc().getPrecision() == Precision::I32 &&
-              inData[RANGE_LIMIT]->getTensorDesc().getPrecision() == Precision::I32 &&
-              inData[RANGE_DELTA]->getTensorDesc().getPrecision() == Precision::I32 &&
-              outData[0]->getTensorDesc().getPrecision() == Precision::I32) &&
-            !(inData[RANGE_START]->getTensorDesc().getPrecision() == Precision::FP32 &&
-              inData[RANGE_LIMIT]->getTensorDesc().getPrecision() == Precision::FP32 &&
-              inData[RANGE_DELTA]->getTensorDesc().getPrecision() == Precision::FP32 &&
-              outData[0]->getTensorDesc().getPrecision() == Precision::FP32)) {
-            THROW_IE_EXCEPTION
-                << " 'Start', 'Limit', 'Delta' input scalars and output tensor should have same precision"
-                << "and only FP32 and I32 are supported!";
-        }
-
-        switch (outData[0]->getTensorDesc().getPrecision()) {
-        case Precision::FP32: {
-            range((inData[RANGE_START]->cbuffer().as<float*>() +
-                   inData[RANGE_START]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0],
-                  (inData[RANGE_LIMIT]->cbuffer().as<float*>() +
-                   inData[RANGE_LIMIT]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0],
-                  (inData[RANGE_DELTA]->cbuffer().as<float*>() +
-                   inData[RANGE_DELTA]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0],
-                  outData[0]);
-        } break;
-        case Precision::I32: {
-            range((inData[RANGE_START]->cbuffer().as<int32_t*>() +
-                   inData[RANGE_START]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0],
-                  (inData[RANGE_LIMIT]->cbuffer().as<int32_t*>() +
-                   inData[RANGE_LIMIT]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0],
-                  (inData[RANGE_DELTA]->cbuffer().as<int32_t*>() +
-                   inData[RANGE_DELTA]->getTensorDesc().getBlockingDesc().getOffsetPadding())[0],
-                  outData[0]);
-        } break;
-        default:
-            THROW_IE_EXCEPTION << "Incorrect output precision. Only FP32 and I32 are supported!";
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_reduce_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_reduce_const_infer.hpp
deleted file mode 100644 (file)
index 5a2795b..0000000
+++ /dev/null
@@ -1,416 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-
-#include <cfloat>
-#include <cmath>
-#include <ie_algorithm.hpp>
-#include <limits>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_const_infer_impl.hpp"
-#include "ie_parallel.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Const inference for Reduce layer
- */
-class ReduceConstInfer : public ConstInferImpl {
-private:
-    const size_t REDUCE_DATA = 0;
-    const size_t REDUCE_INDEXES = 1;
-
-    template <typename src_t, typename dst_t>
-    void reduce(SizeVector src_dims, SizeVector srcStrides, const src_t* src_data, dst_t* dst_data,
-                size_t work_amount_dst, size_t reduced_dims_work_amount, SizeVector axes_for_reduction,
-                SizeVector dst_dims, dst_t init_value, std::string reduceType) {
-        // I don't know why func 2 is necessary!
-        std::function<dst_t(dst_t, src_t)> func1;
-        std::function<dst_t(dst_t, src_t)> func2;
-        if (reduceType == "ReduceAnd") {
-            func1 = [](dst_t x, src_t y) -> dst_t {
-                return x && y;
-            };
-            func2 = [](dst_t x, src_t y) -> dst_t {
-                return x && y;
-            };
-        } else if (reduceType == "ReduceL1") {
-            func1 = [](dst_t x, src_t y) -> dst_t {
-                return x + (std::abs)(y);
-            };
-            func2 = [](dst_t x, src_t y) -> dst_t {
-                return x + y;
-            };
-        } else if (reduceType == "ReduceL2") {
-            func1 = [](dst_t x, src_t y) -> dst_t {
-                return x + y * y;
-            };
-            func2 = [](dst_t x, src_t y) -> dst_t {
-                return x + y;
-            };
-        } else if (reduceType == "ReduceLogSum") {
-            func1 = [](dst_t x, src_t y) -> dst_t {
-                return x + y;
-            };
-            func2 = [](dst_t x, src_t y) -> dst_t {
-                return x + y;
-            };
-        } else if (reduceType == "ReduceLogSumExp") {
-            func1 = [](dst_t x, src_t y) -> dst_t {
-                return x + expf(y);
-            };
-            func2 = [](dst_t x, src_t y) -> dst_t {
-                return x + y;
-            };
-        } else if (reduceType == "ReduceMax") {
-            func1 = [](dst_t x, src_t y) -> dst_t {
-                return x > y ? x : y;
-            };
-            func2 = [](dst_t x, src_t y) -> dst_t {
-                return x > y ? x : y;
-            };
-        } else if (reduceType == "ReduceMean") {
-            func1 = [](dst_t x, src_t y) -> dst_t {
-                return (x + y);
-            };
-            func2 = [](dst_t x, src_t y) -> dst_t {
-                return (x + y);
-            };
-        } else if (reduceType == "ReduceMin") {
-            func1 = [](dst_t x, src_t y) -> dst_t {
-                return x < y ? x : y;
-            };
-            func2 = [](dst_t x, src_t y) -> dst_t {
-                return x < y ? x : y;
-            };
-        } else if (reduceType == "ReduceOr") {
-            func1 = [](dst_t x, src_t y) -> dst_t {
-                return x || y;
-            };
-            func2 = [](dst_t x, src_t y) -> dst_t {
-                return x || y;
-            };
-        } else if (reduceType == "ReduceProd") {
-            func1 = [](dst_t x, src_t y) -> dst_t {
-                return x * y;
-            };
-            func2 = [](dst_t x, src_t y) -> dst_t {
-                return x * y;
-            };
-        } else if (reduceType == "ReduceSum") {
-            func1 = [](dst_t x, src_t y) -> dst_t {
-                return x + y;
-            };
-            func2 = [](dst_t x, src_t y) -> dst_t {
-                return x + y;
-            };
-        } else if (reduceType == "ReduceSumSquare") {
-            func1 = [](dst_t x, src_t y) -> dst_t {
-                return x + y * y;
-            };
-            func2 = [](dst_t x, src_t y) -> dst_t {
-                return x + y;
-            };
-        }
-
-        unsigned int nthr = parallel_get_max_threads();
-        if ((work_amount_dst + 1) >= nthr) {
-            parallel_nt(0, [&](const int ithr, const int nthr) {
-                int j;
-                size_t i, start = 0, end = 0;
-                SizeVector dst_counters(dst_dims.size(), 0);
-                splitter(work_amount_dst, nthr, ithr, start, end);
-                for (j = dst_dims.size() - 1, i = start; j >= 0; j--) {
-                    dst_counters[j] = i % dst_dims[j];
-                    i /= dst_dims[j];
-                }
-                for (size_t src_idx, dst_idx = start; dst_idx < end; ++dst_idx) {
-                    dst_t reduce_prod = init_value;
-                    bool update_idx = true;
-                    SizeVector src_counters = dst_counters;
-                    for (i = 0; i < reduced_dims_work_amount; ++i) {
-                        if (update_idx) {
-                            src_idx = 0;
-                            for (j = 0; j < static_cast<int>(src_dims.size()); ++j)
-                                src_idx += (src_counters[j] % src_dims[j]) * srcStrides[j];
-                            update_idx = false;
-                        }
-                        reduce_prod = func1(reduce_prod, src_data[src_idx]);
-                        for (j = axes_for_reduction.size() - 1; j >= 0; j--) {
-                            src_counters[axes_for_reduction[j]]++;
-                            if (src_counters[axes_for_reduction[j]] < src_dims[axes_for_reduction[j]]) {
-                                src_idx += srcStrides[axes_for_reduction[j]];
-                                break;
-                            } else {
-                                src_counters[axes_for_reduction[j]] = 0;
-                                update_idx = true;
-                            }
-                        }
-                    }
-                    dst_data[dst_idx] = reduce_prod;
-                    for (j = dst_dims.size() - 1; j >= 0; j--) {
-                        dst_counters[j]++;
-                        if (dst_counters[j] < dst_dims[j])
-                            break;
-                        else
-                            dst_counters[j] = 0;
-                    }
-                }
-            });
-        } else {
-            std::vector<dst_t> reduce_prod((nthr * work_amount_dst), init_value);
-            if (work_amount_dst == 1) {
-                parallel_nt(nthr, [&](const int ithr, const int nthr) {
-                    size_t i, start = 0, end = 0;
-                    splitter((srcStrides[0] * src_dims[0]), nthr, ithr, start, end);
-                    for (i = start; i < end; ++i) reduce_prod[ithr] = func1(reduce_prod[ithr], src_data[i]);
-                });
-            } else {
-                SizeVector dstStrides(dst_dims.size(), 1);
-                for (int j = dst_dims.size() - 1; j >= 1; --j) dstStrides[j - 1] = dstStrides[j] * dst_dims[j];
-                parallel_nt(nthr, [&](const int ithr, const int nthr) {
-                    int j;
-                    bool update_idx = true;
-                    size_t i, src_idx, dst_idx = 0, start = 0, end = 0;
-                    splitter((srcStrides[0] * src_dims[0]), nthr, ithr, start, end);
-                    SizeVector src_counters(src_dims.size(), 0);
-                    for (j = src_dims.size() - 1, src_idx = start; j >= 0; j--) {
-                        src_counters[j] = src_idx % src_dims[j];
-                        src_idx /= src_dims[j];
-                    }
-                    for (src_idx = start; src_idx < end; ++src_idx) {
-                        if (update_idx) {
-                            for (i = 0, dst_idx = 0; i < dst_dims.size(); ++i)
-                                dst_idx += (src_counters[i] % dst_dims[i]) * dstStrides[i];
-                            update_idx = false;
-                        }
-                        reduce_prod[ithr * work_amount_dst + dst_idx] =
-                            func1(reduce_prod[ithr * work_amount_dst + dst_idx], src_data[src_idx]);
-                        for (j = src_dims.size() - 1; j >= 0; j--) {
-                            src_counters[j]++;
-                            if (src_counters[j] < src_dims[j]) {
-                                if (dst_dims[j] > 1) dst_idx += dstStrides[j];
-                                break;
-                            } else {
-                                src_counters[j] = 0;
-                                update_idx = true;
-                            }
-                        }
-                    }
-                });
-            }
-            for (size_t dst_idx = 0; dst_idx < work_amount_dst; dst_idx++) {
-                for (size_t ithr = work_amount_dst; ithr < (nthr * work_amount_dst); ithr += work_amount_dst)
-                    reduce_prod[dst_idx] = func2(reduce_prod[dst_idx], reduce_prod[dst_idx + ithr]);
-                dst_data[dst_idx] = reduce_prod[dst_idx];
-            }
-        }
-    }
-
-    template <typename src_d, typename dst_d>
-    void exec_reduce(const std::vector<Blob::CPtr>& insData, std::vector<Blob::Ptr>& outData, std::string reduce_mode,
-                     SizeVector src_dims, SizeVector srcStrides, size_t work_amount_dst,
-                     size_t reduced_dims_work_amount, SizeVector axes_for_reduction, SizeVector our_dims, dst_d min_val,
-                     dst_d max_val) {
-        const src_d* src_data = insData[REDUCE_DATA]->cbuffer().as<src_d*>() +
-                                insData[REDUCE_DATA]->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        dst_d* dst_data =
-            outData[0]->cbuffer().as<dst_d*>() + outData[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        if (reduce_mode == "ReduceAnd") {
-            reduce<src_d, dst_d>(src_dims, srcStrides, src_data, dst_data, work_amount_dst, reduced_dims_work_amount,
-                                 axes_for_reduction, our_dims, 1, reduce_mode);
-        } else if (reduce_mode == "ReduceL1") {
-            reduce<src_d, dst_d>(src_dims, srcStrides, src_data, dst_data, work_amount_dst, reduced_dims_work_amount,
-                                 axes_for_reduction, our_dims, 0, reduce_mode);
-        } else if (reduce_mode == "ReduceL2") {
-            reduce<src_d, dst_d>(src_dims, srcStrides, src_data, dst_data, work_amount_dst, reduced_dims_work_amount,
-                                 axes_for_reduction, our_dims, 0, reduce_mode);
-
-            parallel_for(work_amount_dst, [&](size_t i) {
-                dst_data[i] = sqrt(dst_data[i]);
-            });
-        } else if (reduce_mode == "ReduceLogSum") {
-            reduce<src_d, dst_d>(src_dims, srcStrides, src_data, dst_data, work_amount_dst, reduced_dims_work_amount,
-                                 axes_for_reduction, our_dims, 0, reduce_mode);
-
-            parallel_for(work_amount_dst, [&](size_t i) {
-                dst_data[i] = logf(dst_data[i]);
-            });
-        } else if (reduce_mode == "ReduceLogSumExp") {
-            reduce<src_d, dst_d>(src_dims, srcStrides, src_data, dst_data, work_amount_dst, reduced_dims_work_amount,
-                                 axes_for_reduction, our_dims, 0, reduce_mode);
-
-            parallel_for(work_amount_dst, [&](size_t i) {
-                dst_data[i] = logf(dst_data[i]);
-            });
-        } else if (reduce_mode == "ReduceMax") {
-            reduce<src_d, dst_d>(src_dims, srcStrides, src_data, dst_data, work_amount_dst, reduced_dims_work_amount,
-                                 axes_for_reduction, our_dims, min_val, reduce_mode);
-        } else if (reduce_mode == "ReduceMean") {
-            reduce<src_d, dst_d>(src_dims, srcStrides, src_data, dst_data, work_amount_dst, reduced_dims_work_amount,
-                                 axes_for_reduction, our_dims, 0, reduce_mode);
-
-            parallel_for(work_amount_dst, [&](size_t i) {
-                dst_data[i] /= static_cast<float>(reduced_dims_work_amount);
-            });
-        } else if (reduce_mode == "ReduceMin") {
-            reduce<src_d, dst_d>(src_dims, srcStrides, src_data, dst_data, work_amount_dst, reduced_dims_work_amount,
-                                 axes_for_reduction, our_dims, max_val, reduce_mode);
-        } else if (reduce_mode == "ReduceOr") {
-            reduce<src_d, dst_d>(src_dims, srcStrides, src_data, dst_data, work_amount_dst, reduced_dims_work_amount,
-                                 axes_for_reduction, our_dims, 0, reduce_mode);
-        } else if (reduce_mode == "ReduceProd") {
-            reduce<src_d, dst_d>(src_dims, srcStrides, src_data, dst_data, work_amount_dst, reduced_dims_work_amount,
-                                 axes_for_reduction, our_dims, 1, reduce_mode);
-        } else if (reduce_mode == "ReduceSum") {
-            reduce<src_d, dst_d>(src_dims, srcStrides, src_data, dst_data, work_amount_dst, reduced_dims_work_amount,
-                                 axes_for_reduction, our_dims, 0, reduce_mode);
-        } else if (reduce_mode == "ReduceSumSquare") {
-            reduce<src_d, dst_d>(src_dims, srcStrides, src_data, dst_data, work_amount_dst, reduced_dims_work_amount,
-                                 axes_for_reduction, our_dims, 0, reduce_mode);
-        } else {
-            THROW_IE_EXCEPTION << " Incorrect Reduce layer type!";
-        }
-    }
-
-public:
-    explicit ReduceConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    void inferImpl(const std::vector<Blob::CPtr>& insData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        LayerParams lp {"", _type, Precision::UNSPECIFIED};
-        CNNLayer layer(lp);
-        layer.params = params;
-
-        if (insData.empty() || outData.empty())
-            THROW_IE_EXCEPTION << " Reduce constant inference error: empty input or output data!";
-
-        if (insData.size() != 2)
-            THROW_IE_EXCEPTION
-                << " Reduce constant inference error: Incorrect number of input edges! Should be 2 edges, got "
-                << insData.size();
-
-        SizeVector idx_dims = insData[REDUCE_INDEXES]->getTensorDesc().getDims();
-        if (idx_dims.size() > 1)
-            THROW_IE_EXCEPTION << " Reduce constant inference error: Index vector should be 1 dimension, got "
-                               << idx_dims.size() << " dimensions";
-
-        if (insData[REDUCE_INDEXES]->getTensorDesc().getPrecision() != Precision::I32)
-            THROW_IE_EXCEPTION << " Reduce constant inference error: Incorrect 'axes_to_reduction' input precision. "
-                                  "Only I32 is supported! Current precision: "
-                               << insData[REDUCE_INDEXES]->getTensorDesc().getPrecision();
-
-        SizeVector data_dims = insData[REDUCE_DATA]->getTensorDesc().getDims();
-        SizeVector dst_dims = outData[0]->getTensorDesc().getDims();
-
-        bool keep_dims = layer.GetParamAsBool("keep_dims", true);
-        if (keep_dims) {
-            if (data_dims.size() != dst_dims.size())
-                THROW_IE_EXCEPTION << " Reduce constant inference error: Incorrect number of input/output dimensions!";
-        } else {
-            if (data_dims.size() <= dst_dims.size())
-                THROW_IE_EXCEPTION << " Reduce constant inference error: Incorrect number of input/output dimensions!";
-        }
-
-        SizeVector src_dims = insData[REDUCE_DATA]->getTensorDesc().getDims();
-        SizeVector srcStrides = insData[REDUCE_DATA]->getTensorDesc().getBlockingDesc().getStrides();
-
-        int32_t* idx_data = insData[REDUCE_INDEXES]->cbuffer().as<int32_t*>() +
-                            insData[REDUCE_INDEXES]->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        SizeVector axes;
-        for (size_t i = 0; i < idx_dims[0]; i++) {
-            int32_t axis = idx_data[i];
-            if (axis < 0) axis += data_dims.size();
-
-            if (static_cast<size_t>(axis) > data_dims.size())
-                THROW_IE_EXCEPTION << " Reduce constant inference error: Index to reduce exceeds data tensor dimension";
-            axes.push_back(static_cast<size_t>(axis));
-        }
-
-        size_t reduced_dims_work_amount = 1;
-        InferenceEngine::SizeVector our_dims, out_dims, axes_for_reduction;
-        for (size_t i = 0; i < src_dims.size(); i++) {
-            bool found = false;
-            for (size_t axis : axes)
-                if (i == axis) found = true;
-
-            if (found) {
-                axes_for_reduction.push_back(i);
-                reduced_dims_work_amount *= src_dims[i];
-                if (keep_dims) out_dims.push_back(1);
-                our_dims.push_back(1);
-            } else {
-                out_dims.push_back(src_dims[i]);
-                our_dims.push_back(src_dims[i]);
-            }
-        }
-
-        if (!our_dims.size()) our_dims = SizeVector(1, 1);
-
-        for (size_t i = 0; i < (std::min)(out_dims.size(), dst_dims.size()); i++)
-            if (out_dims[i] != dst_dims[i])
-                THROW_IE_EXCEPTION << " Reduce constant inference error: Incorrect number of output dimensions!";
-
-        size_t work_amount_dst;
-        if (!dst_dims.size())
-            work_amount_dst = 1;
-        else
-            work_amount_dst = outData[0]->getTensorDesc().getBlockingDesc().getStrides()[0] * dst_dims[0];
-
-        std::string reduce_mode = layer.type;
-
-        auto compare = getPrecisionMask(insData[REDUCE_DATA]->getTensorDesc().getPrecision(),
-                                        outData[0]->getTensorDesc().getPrecision());
-        switch (compare) {
-        case getPrecisionMask(Precision::FP32, Precision::FP32):
-            exec_reduce<PrecisionTrait<Precision::FP32>::value_type, PrecisionTrait<Precision::FP32>::value_type>(
-                insData, outData, reduce_mode, src_dims, srcStrides, work_amount_dst, reduced_dims_work_amount,
-                axes_for_reduction, dst_dims, (std::numeric_limits<PrecisionTrait<Precision::FP32>::value_type>::min)(),
-                (std::numeric_limits<PrecisionTrait<Precision::FP32>::value_type>::max)());
-            break;
-
-        case getPrecisionMask(Precision::I32, Precision::I64):
-            exec_reduce<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::I64>::value_type>(
-                insData, outData, reduce_mode, src_dims, srcStrides, work_amount_dst, reduced_dims_work_amount,
-                axes_for_reduction, dst_dims, (std::numeric_limits<PrecisionTrait<Precision::I64>::value_type>::min)(),
-                (std::numeric_limits<PrecisionTrait<Precision::I64>::value_type>::max)());
-            break;
-        case getPrecisionMask(Precision::I32, Precision::U64):
-            exec_reduce<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::U64>::value_type>(
-                insData, outData, reduce_mode, src_dims, srcStrides, work_amount_dst, reduced_dims_work_amount,
-                axes_for_reduction, dst_dims, (std::numeric_limits<PrecisionTrait<Precision::U64>::value_type>::min)(),
-                (std::numeric_limits<PrecisionTrait<Precision::U64>::value_type>::max)());
-            break;
-        case getPrecisionMask(Precision::I32, Precision::FP32):
-            exec_reduce<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::FP32>::value_type>(
-                insData, outData, reduce_mode, src_dims, srcStrides, work_amount_dst, reduced_dims_work_amount,
-                axes_for_reduction, dst_dims, (std::numeric_limits<PrecisionTrait<Precision::FP32>::value_type>::min)(),
-                (std::numeric_limits<PrecisionTrait<Precision::FP32>::value_type>::max)());
-            break;
-        case getPrecisionMask(Precision::I32, Precision::I32):
-            exec_reduce<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::I32>::value_type>(
-                insData, outData, reduce_mode, src_dims, srcStrides, work_amount_dst, reduced_dims_work_amount,
-                axes_for_reduction, dst_dims, (std::numeric_limits<PrecisionTrait<Precision::I32>::value_type>::min)(),
-                (std::numeric_limits<PrecisionTrait<Precision::I32>::value_type>::max)());
-            break;
-        default:
-            THROW_IE_EXCEPTION
-                << "Reduce constant inference error: Incorrect data tensor precisions. REDUCE_DATA precision: "
-                << insData[REDUCE_DATA]->getTensorDesc().getPrecision()
-                << " Output precision: " << outData[0]->getTensorDesc().getPrecision();
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_reshape_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_reshape_const_infer.hpp
deleted file mode 100644 (file)
index b9f4703..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-#include <ie_memcpy.h>
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_const_infer_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Const inference for Tile layer
- */
-class ReshapeConstInfer : public ConstInferImpl {
-public:
-    explicit ReshapeConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        auto inBlob = *inData.begin();
-        const auto* inBuffer = inBlob->cbuffer().as<uint8_t*>();
-        auto outBlob = *outData.begin();
-        auto* outBuffer = outBlob->buffer().as<uint8_t*>();
-        if (outBlob->byteSize() != inBlob->byteSize())
-            THROW_IE_EXCEPTION << "ReshapeConstInfer: input/output tensor size mismatch";
-
-        ie_memcpy(outBuffer, outBlob->byteSize(), inBuffer, inBlob->byteSize());
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_shape_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_shape_const_infer.hpp
deleted file mode 100644 (file)
index d8025d7..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "precision_utils.h"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Const inference for TBD layer
- */
-class ShapeConstInfer : public ConstInferImpl {
-public:
-    explicit ShapeConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        SizeVector inShape = (*inData.begin())->getTensorDesc().getDims();
-        auto outBlob = *outData.begin();
-        if (inShape.size() != outBlob->size()) THROW_IE_EXCEPTION << "Number of shapes don't match size of output";
-
-        if (outBlob->getTensorDesc().getPrecision() == Precision::FP16) {
-            auto* outBuffer = outBlob->buffer().as<ie_fp16*>();
-            for (int i = 0; i < outBlob->size(); i++) {
-                outBuffer[i] = PrecisionUtils::f32tof16(static_cast<float>(inShape[i]));
-            }
-        } else if (outBlob->getTensorDesc().getPrecision() == Precision::I32) {
-            auto* outBuffer = outBlob->buffer().as<int32_t*>();
-            for (int i = 0; i < outBlob->size(); i++) {
-                outBuffer[i] = static_cast<int32_t>(inShape[i]);
-            }
-        } else if (outBlob->getTensorDesc().getPrecision() == Precision::I64) {
-            auto* outBuffer = outBlob->buffer().as<int64_t*>();
-            for (int i = 0; i < outBlob->size(); i++) {
-                outBuffer[i] = static_cast<int64_t>(inShape[i]);
-            }
-        } else if (outBlob->getTensorDesc().getPrecision() == Precision::U64) {
-            auto* outBuffer = outBlob->buffer().as<uint64_t*>();
-            for (int i = 0; i < outBlob->size(); i++) {
-                outBuffer[i] = static_cast<uint64_t>(inShape[i]);
-            }
-        } else {
-            auto* outBuffer = outBlob->buffer().as<float*>();
-            for (int i = 0; i < outBlob->size(); i++) {
-                outBuffer[i] = inShape[i];
-            }
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_split_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_split_const_infer.hpp
deleted file mode 100644 (file)
index 402ec46..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-#include <ie_memcpy.h>
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_const_infer_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Const inference for Tile layer
- */
-class SplitConstInfer : public ConstInferImpl {
-public:
-    explicit SplitConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        LayerParams lp {};
-        SplitLayer layer(lp);
-        layer.params = params;
-        layer.type = _type;
-        _validator->parseParams(&layer);
-
-        auto inputBlob = inData.front();
-        Precision precision = inputBlob->getTensorDesc().getPrecision();
-
-        switch (precision.size()) {
-        case 4:
-            split_copy_impl<int32_t>(layer._axis, inputBlob, outData);
-            break;
-        case 2:
-            split_copy_impl<int16_t>(layer._axis, inputBlob, outData);
-            break;
-        case 1:
-            split_copy_impl<int8_t>(layer._axis, inputBlob, outData);
-            break;
-        default:
-            THROW_IE_EXCEPTION << "unsupported precision";
-        }
-    }
-
-    template <typename data_t>
-    static void split_copy_impl(size_t axis, const Blob::CPtr& inBlob, const std::vector<Blob::Ptr>& outData) {
-        SizeVector inShape = inBlob->getTensorDesc().getDims();
-        const auto* inBuffer = inBlob->cbuffer().as<data_t*>();
-
-        size_t outerSize = 1;
-        for (int i = 0; i < axis; i++) outerSize *= inShape[i];
-
-        for (size_t osIdx = 0; osIdx < outerSize; osIdx++) {
-            for (auto& outBlob : outData) {
-                auto* outBuffer = outBlob->buffer().as<data_t*>();
-                size_t innerSize = outBlob->size() / outerSize;
-
-                for (size_t j = 0; j < innerSize; j++, inBuffer++) {
-                    outBuffer[osIdx * innerSize + j] = *inBuffer;
-                }
-            }
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_strided_slice_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_strided_slice_const_infer.hpp
deleted file mode 100644 (file)
index f2f855c..0000000
+++ /dev/null
@@ -1,437 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-#include <ie_memcpy.h>
-
-#include <algorithm>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_const_infer_impl.hpp"
-#include "ie_parallel.hpp"
-#include "ie_precision.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-class StridedSliceHelper {
-public:
-    StridedSliceHelper(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params) {
-        LayerParams lp {};
-        CNNLayer layer(lp);
-        layer.params = params;
-
-        if (inData.size() > 4)
-            THROW_IE_EXCEPTION << "StridedSlice constant inference error: Incorrect number of input edges!";
-
-        src_dims = inData[STRIDEDSLICE_DATA]->getTensorDesc().getDims();
-
-        bounds_size = 0;
-        if (inData.size() > 1) {
-            begin_dims = inData[STRIDEDSLICE_BEGIN]->getTensorDesc().getDims();
-            if (inData[STRIDEDSLICE_BEGIN]->getTensorDesc().getPrecision() != Precision::I32)
-                THROW_IE_EXCEPTION << "StridedSlice constant inference error: Incorrect 'begin' input precision. Only "
-                                      "I32 is supported! Current precision: "
-                                   << inData[STRIDEDSLICE_BEGIN]->getTensorDesc().getPrecision();
-            if (begin_dims.size() > 1)
-                THROW_IE_EXCEPTION << "StridedSlice constant inference error: Begin vector should be 1 dimension, got: "
-                                   << begin_dims.size() << " dimensions";
-            bounds_size = begin_dims[0];
-        }
-
-        if (inData.size() > 2) {
-            end_dims = inData[STRIDEDSLICE_END]->getTensorDesc().getDims();
-            if (inData[STRIDEDSLICE_END]->getTensorDesc().getPrecision() != Precision::I32)
-                THROW_IE_EXCEPTION << "StridedSlice constant inference error: Incorrect 'end' input precision. Only "
-                                      "I32 is supported! Current precision: "
-                                   << inData[STRIDEDSLICE_END]->getTensorDesc().getPrecision();
-            if (end_dims.size() > 1)
-                THROW_IE_EXCEPTION << "StridedSlice constant inference error: End vector should be 1 dimension, got: "
-                                   << end_dims.size() << " dimensions";
-            if (begin_dims[0] != end_dims[0])
-                THROW_IE_EXCEPTION
-                    << "StridedSlice constant inference error: Begin vector size should be equal end vector size";
-        }
-
-        if (inData.size() > 3) {
-            stride_dims = inData[STRIDEDSLICE_STRIDE]->getTensorDesc().getDims();
-            if (inData[STRIDEDSLICE_STRIDE]->getTensorDesc().getPrecision() != Precision::I32)
-                THROW_IE_EXCEPTION << "StridedSlice constant inference error: Incorrect 'strides' input precision. "
-                                      "Only I32 is supported! Current precision: "
-                                   << inData[STRIDEDSLICE_STRIDE]->getTensorDesc().getPrecision();
-            if (stride_dims.size() > 1)
-                THROW_IE_EXCEPTION << "StridedSlice constant inference error: End vector should be 1 dimension, got: "
-                                   << stride_dims.size() << " dimensions";
-            if (begin_dims[0] != stride_dims[0])
-                THROW_IE_EXCEPTION
-                    << "StridedSlice constant inference error: Stride vector size should be equal begin vector size";
-        }
-
-        std::string::size_type i;
-        std::string begin_mask_str = layer.GetParamAsString("begin_mask", "");
-        for (i = 0; i < begin_mask_str.size(); ++i) {
-            if (begin_mask_str[i] == '1')
-                begin_mask.push_back(1);
-            else if (begin_mask_str[i] == '0')
-                begin_mask.push_back(0);
-        }
-        for (; i < src_dims.size(); ++i) begin_mask.push_back(1);
-
-        std::string end_mask_str = layer.GetParamAsString("end_mask", "");
-        for (i = 0; i < end_mask_str.size(); ++i) {
-            if (end_mask_str[i] == '1')
-                end_mask.push_back(1);
-            else if (end_mask_str[i] == '0')
-                end_mask.push_back(0);
-        }
-        for (; i < src_dims.size(); ++i) end_mask.push_back(1);
-
-        std::string ellipsis_mask_str = layer.GetParamAsString("ellipsis_mask", "");
-        size_t ellipsis_mask_counter = 0;
-        for (i = 0; i < ellipsis_mask_str.size(); ++i) {
-            if (ellipsis_mask_str[i] == '1') {
-                ellipsis_mask_counter++;
-                ellipsis_mask.push_back(1);
-            } else if (ellipsis_mask_str[i] == '0') {
-                ellipsis_mask.push_back(0);
-            }
-        }
-        if (ellipsis_mask_counter > 1)
-            THROW_IE_EXCEPTION << " 'Ellipsis_mask' must be a power of two (only one ellipsis)!";
-        for (; i < src_dims.size(); ++i) ellipsis_mask.push_back(0);
-
-        std::string new_axis_mask_str = layer.GetParamAsString("new_axis_mask", "");
-        for (i = 0; i < new_axis_mask_str.size(); ++i) {
-            if (new_axis_mask_str[i] == '1')
-                new_axis_mask.push_back(1);
-            else if (new_axis_mask_str[i] == '0')
-                new_axis_mask.push_back(0);
-        }
-        for (; i < src_dims.size(); ++i) new_axis_mask.push_back(0);
-
-        std::string shrink_axis_mask_str = layer.GetParamAsString("shrink_axis_mask", "");
-        for (i = 0; i < shrink_axis_mask_str.size(); ++i) {
-            if (shrink_axis_mask_str[i] == '1')
-                shrink_axis_mask.push_back(1);
-            else if (shrink_axis_mask_str[i] == '0')
-                shrink_axis_mask.push_back(0);
-        }
-        for (; i < src_dims.size(); ++i) shrink_axis_mask.push_back(0);
-
-        int new_axis = 0;
-        for (auto& na : new_axis_mask) new_axis += na;
-
-        shrink_axis = 0;
-        for (auto& sa : shrink_axis_mask) shrink_axis += sa;
-        max_dims = src_dims.size() + new_axis;
-
-        //  ellipsis_mask must be a power of two (only one ellipsis), so to take a first position
-        ellipsis_pos1 = ellipsis_pos2 = max_dims;
-        for (i = 0; i < ellipsis_mask.size(); i++) {
-            if (ellipsis_mask[i] > 0) {
-                ellipsis_pos1 = i;
-                break;
-            }
-        }
-        bounds_size -= ellipsis_pos1;
-        if (bounds_size > 0 && (max_dims - bounds_size) > ellipsis_pos1) ellipsis_pos2 = max_dims - bounds_size;
-
-        begin_dms.assign(max_dims, 0);
-        end_dms.assign(max_dims, -1);
-        stride_dms.assign(max_dims, 1);
-
-        srcStrides = inData[STRIDEDSLICE_DATA]->getTensorDesc().getBlockingDesc().getStrides();
-
-        int *begin = nullptr, *end = nullptr, *stride = nullptr;
-        if (begin_dims.size())
-            begin = inData[STRIDEDSLICE_BEGIN]->cbuffer().as<int*>() +
-                    inData[STRIDEDSLICE_BEGIN]->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        if (end_dims.size())
-            end = inData[STRIDEDSLICE_END]->cbuffer().as<int*>() +
-                  inData[STRIDEDSLICE_END]->getTensorDesc().getBlockingDesc().getOffsetPadding();
-        if (stride_dims.size())
-            stride = inData[STRIDEDSLICE_STRIDE]->cbuffer().as<int*>() +
-                     inData[STRIDEDSLICE_STRIDE]->getTensorDesc().getBlockingDesc().getOffsetPadding();
-
-        int j, k, bj, ej, sj;
-        for (i = 0, j = 0, k = 0, bj = 0, ej = 0, sj = 0; i < max_dims; i++) {
-            if (i >= ellipsis_pos1 && i < ellipsis_pos2) {
-                if (new_axis_mask.size() > i && new_axis_mask[i] == 1)
-                    end_dms[i] = 0;
-                else
-                    end_dms[i] = end_dms[i] >= 0 ? end_dms[i] : src_dims[j++] + end_dms[i];
-
-                out_dims.push_back(static_cast<int>(ceil(static_cast<float>(abs(end_dms[i] - begin_dms[i]) + 1) /
-                                                         static_cast<float>(abs(stride_dms[i])))));
-                our_dims.push_back(static_cast<int>(ceil(static_cast<float>(abs(end_dms[i] - begin_dms[i]) + 1) /
-                                                         static_cast<float>(abs(stride_dms[i])))));
-                k = ellipsis_pos1;
-            } else {
-                stride_dms[i] = (stride != nullptr && stride_dims[0] > sj && stride[sj] != 0) ? stride[sj++] : 1;
-
-                if (begin_mask.size() > j && begin_mask[j] == 0)
-                    begin_dms[i] = stride_dms[i] > 0 ? 0 : -1;
-                else
-                    begin_dms[i] = (begin != nullptr && begin_dims[0] > bj) ? begin[bj] : (stride_dms[i] > 0 ? 0 : -1);
-                bj++;
-                begin_dms[i] = begin_dms[i] >= 0 ? begin_dms[i] : src_dims[j] + begin_dms[i];
-                //  Clipping 'begin'
-                details::clipping(&begin_dms[i], 0, src_dims[j]);
-
-                if (end_mask.size() > j && end_mask[j] == 0) {
-                    end_dms[i] = stride_dms[i] > 0 ? -1 : 0;
-                } else {
-                    int end_dms_tmp = (end != nullptr && end_dims[0] > ej)
-                                          ? (stride_dms[i] > 0 ? end[ej] - 1 : end[ej] + 1)
-                                          : end_dms[i];
-                    end_dms[i] = (end != nullptr && end_dims[0] > ej) ? end_dms_tmp : (stride_dms[i] > 0 ? -1 : 0);
-                }
-                ej++;
-                end_dms[i] = end_dms[i] >= 0 ? end_dms[i] : src_dims[j] + end_dms[i];
-                //  Clipping 'end'
-                details::clipping(&end_dms[i], 0, src_dims[j]);
-
-                if (new_axis_mask.size() > i && new_axis_mask[i] == 1)
-                    end_dms[i] = 0;
-                else
-                    j++;
-
-                if (shrink_axis_mask.size() > k && shrink_axis_mask[k] == 1) {
-                    end_dms[i] = begin_dms[i];
-                    if (max_dims == 1) {
-                        out_dims.push_back(
-                            static_cast<int>(ceil(static_cast<float>(abs(end_dms[i] - begin_dms[i]) + 1) /
-                                                  static_cast<float>(abs(stride_dms[i])))));
-                    }
-                } else {
-                    out_dims.push_back(static_cast<int>(ceil(static_cast<float>(abs(end_dms[i] - begin_dms[i]) + 1) /
-                                                             static_cast<float>(abs(stride_dms[i])))));
-                }
-
-                our_dims.push_back(static_cast<int>(ceil(static_cast<float>(abs(end_dms[i] - begin_dms[i]) + 1) /
-                                                         static_cast<float>(abs(stride_dms[i])))));
-                k++;
-            }
-        }
-    }
-
-    SizeVector getOutputShape() {
-        return out_dims;
-    }
-
-    template <class src_t, class dst_t>
-    void exec_strided_slice(const std::vector<Blob::CPtr>& inData, std::vector<Blob::Ptr>& outData) {
-        const src_t* src_data = inData[STRIDEDSLICE_DATA]->cbuffer().as<const src_t*>() +
-                                inData[STRIDEDSLICE_DATA]->getTensorDesc().getBlockingDesc().getOffsetPadding();
-
-        dst_t* dst_data =
-            outData[0]->cbuffer().as<dst_t*>() + outData[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
-
-        if (src_dims.size() == max_dims && shrink_axis == 0 && stride_dms[stride_dms.size() - 1] == 1 &&
-            stride_dms.size() > 1)
-            strided_slice_vp(src_data, dst_data);
-        else if (src_dims.size() == max_dims && shrink_axis == 0)
-            strided_slice_p(src_data, dst_data);
-        else
-            strided_slice(src_data, dst_data, our_dims);
-    }
-
-    void infer(const std::vector<Blob::CPtr>& inData, std::vector<Blob::Ptr>& outData) {
-        dst_dims = outData[0]->getTensorDesc().getDims();
-        size_t range = out_dims.size() < dst_dims.size() ? out_dims.size() : dst_dims.size();
-        for (int i = 0; i < range; i++) {
-            if (out_dims[i] != dst_dims[i])
-                THROW_IE_EXCEPTION << "StridedSlice constant inference error: parameter mismatch";
-        }
-        dstStrides = outData[0]->getTensorDesc().getBlockingDesc().getStrides();
-        if (dst_dims.size() == 1 && dst_dims[0] == 1) dstStrides.push_back(1);
-        if (outData.size() != 1)
-            THROW_IE_EXCEPTION << "StridedSlice constant inference error: Incorrect number of output edges!";
-
-        auto compare =
-            getPrecisionMask(inData[0]->getTensorDesc().getPrecision(), outData[0]->getTensorDesc().getPrecision());
-        switch (compare) {
-        case getPrecisionMask(Precision::FP32, Precision::FP32):
-            exec_strided_slice<PrecisionTrait<Precision::FP32>::value_type,
-                               PrecisionTrait<Precision::FP32>::value_type>(inData, outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::I32):
-            exec_strided_slice<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::I32>::value_type>(
-                inData, outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::I64):
-            exec_strided_slice<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::I64>::value_type>(
-                inData, outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::U64):
-            exec_strided_slice<PrecisionTrait<Precision::I32>::value_type, PrecisionTrait<Precision::U64>::value_type>(
-                inData, outData);
-            break;
-        default:
-            THROW_IE_EXCEPTION << "StridedSlice constant inference error: Unsupported precision configuration:"
-                               << " input precision: " << inData[0]->getTensorDesc().getPrecision()
-                               << " output precision: " << outData[0]->getTensorDesc().getPrecision();
-        }
-    }
-
-private:
-    template <class src_t, class dst_t>
-    void strided_slice(const src_t* src_data, dst_t* dst_data, std::vector<size_t>& dims) {
-        size_t i;
-        int j;
-        size_t work_amount_dst = (dstStrides.empty() && dst_dims.empty()) ? 1 : dstStrides[0] * dst_dims[0];
-        SizeVector counters(max_dims, 0);
-
-        for (size_t iwork = 0; iwork < work_amount_dst; ++iwork) {
-            int src_idx = 0;
-            for (i = 0, j = 0; i < max_dims; ++i) {
-                src_idx += (begin_dms[i] + counters[i] * stride_dms[i]) * srcStrides[j];
-                if (!(new_axis_mask.size() > i && new_axis_mask[i] == 1)) j++;
-            }
-
-            dst_data[iwork] = src_data[src_idx];
-
-            for (j = max_dims - 1; j >= 0; j--) {
-                counters[j]++;
-                if (counters[j] < dims[j])
-                    break;
-                else
-                    counters[j] = 0;
-            }
-        }
-    }
-
-    template <class src_t, class dst_t>
-    void strided_slice_vp(const src_t* src_data, dst_t* dst_data) {
-        //  Vectorized copy
-        size_t dims_size_1 = dst_dims.size() - 1;
-        size_t dataLength = dst_dims[dims_size_1];
-        size_t work_amount_dst = dstStrides[0] * dst_dims[0] / dst_dims[dims_size_1];
-
-        parallel_nt(0, [&](const int ithr, const int nthr) {
-            size_t start = 0, end = 0;
-            SizeVector counters(dims_size_1, 0);
-            splitter(work_amount_dst, nthr, ithr, start, end);
-            int src_idx = begin_dms[dims_size_1];
-            for (int j = dims_size_1 - 1, i = start; j >= 0; j--) {
-                counters[j] = i % dst_dims[j];
-                src_idx += (begin_dms[j] + counters[j] * stride_dms[j]) * srcStrides[j];
-                i /= dst_dims[j];
-            }
-
-            for (size_t iwork = start, dst_idx = start * dataLength, i = 1; iwork < end;
-                 ++iwork, dst_idx += dataLength) {
-                memcpy(&dst_data[dst_idx], &src_data[src_idx], sizeof(float) * dataLength);
-                for (int j = dims_size_1 - 1; j >= 0; j--) {
-                    counters[j]++;
-                    if (counters[j] < dst_dims[j]) {
-                        src_idx += stride_dms[j] * srcStrides[j];
-                        break;
-                    } else {
-                        counters[j] = i = 0;
-                    }
-                }
-                if (!i) {
-                    for (src_idx = begin_dms[dims_size_1]; i < dims_size_1; ++i)
-                        src_idx += (begin_dms[i] + counters[i] * stride_dms[i]) * srcStrides[i];
-                }
-            }
-        });
-    }
-
-    template <class src_t, class dst_t>
-    void strided_slice_p(const src_t* src_data, dst_t* dst_data) {
-        size_t dims_size = dst_dims.size();
-        size_t work_amount_dst = dstStrides[0] * dst_dims[0];
-
-        parallel_nt(0, [&](const int ithr, const int nthr) {
-            size_t start = 0, end = 0;
-            SizeVector counters(dims_size, 0);
-            splitter(work_amount_dst, nthr, ithr, start, end);
-            int src_idx = 0;
-            for (int j = dims_size - 1, i = start; j >= 0; j--) {
-                counters[j] = i % dst_dims[j];
-                src_idx += (begin_dms[j] + counters[j] * stride_dms[j]) * srcStrides[j];
-                i /= dst_dims[j];
-            }
-
-            for (size_t iwork = start, dst_idx = start, i = 1; iwork < end; ++iwork, dst_idx++) {
-                dst_data[dst_idx] = src_data[src_idx];
-                for (int j = dims_size - 1; j >= 0; j--) {
-                    counters[j]++;
-                    if (counters[j] < dst_dims[j]) {
-                        src_idx += stride_dms[j] * srcStrides[j];
-                        break;
-                    } else {
-                        counters[j] = i = 0;
-                    }
-                }
-                if (!i) {
-                    for (src_idx = 0; i < dims_size; ++i)
-                        src_idx += (begin_dms[i] + counters[i] * stride_dms[i]) * srcStrides[i];
-                }
-            }
-        });
-    }
-
-private:
-    const size_t STRIDEDSLICE_DATA = 0;
-    const size_t STRIDEDSLICE_BEGIN = 1;
-    const size_t STRIDEDSLICE_END = 2;
-    const size_t STRIDEDSLICE_STRIDE = 3;
-
-    SizeVector begin_dims;
-    SizeVector end_dims;
-    SizeVector stride_dims;
-
-    SizeVector begin_mask;
-    SizeVector end_mask;
-    SizeVector ellipsis_mask;
-    SizeVector new_axis_mask;
-    SizeVector shrink_axis_mask;
-    int shrink_axis;
-
-    SizeVector src_dims;
-    SizeVector dst_dims;
-    std::vector<int> begin_dms;
-    std::vector<int> end_dms;
-    std::vector<int> stride_dms;
-    SizeVector srcStrides;
-    SizeVector dstStrides;
-    size_t bounds_size;
-    size_t max_dims;
-    size_t ellipsis_pos1, ellipsis_pos2;
-
-    InferenceEngine::SizeVector out_dims;
-    InferenceEngine::SizeVector our_dims;
-};
-
-/**
- *@brief Implementation of Const inference for Tile layer
- */
-class StridedSliceConstInfer : public ConstInferImpl {
-public:
-    explicit StridedSliceConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        LayerParams lp {};
-        StridedSliceLayer layer(lp);
-        layer.params = params;
-        layer.type = _type;
-        _validator->parseParams(&layer);
-
-        StridedSliceHelper helper(inData, params);
-        helper.infer(inData, outData);
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_sub_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_sub_const_infer.hpp
deleted file mode 100644 (file)
index 5a7b046..0000000
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-#include <precision_utils.h>
-
-#include <ie_precision.hpp>
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_const_infer_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-class SubConstInfer : public ConstInferImpl {
-public:
-    explicit SubConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    struct fp16tofp32 {
-        inline float operator()(ie_fp16 value) {
-            return static_cast<float>(PrecisionUtils::f16tof32(value));
-        }
-    };
-
-    struct fp32tofp16 {
-        inline ie_fp16 operator()(float value) {
-            return static_cast<float>(PrecisionUtils::f32tof16(value));
-        }
-    };
-
-    template <typename dataType>
-    struct noConversion {
-        inline dataType operator()(dataType value) {
-            return value;
-        }
-    };
-
-    template <typename inDatatype1, typename inDatatype2, typename outDatatype, class ConversionInData1,
-              class ConversionInData2, class ConversionOutData>
-    void sub(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-             const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) {
-        auto* firstBlobBuffer = inData[0]->cbuffer().as<inDatatype1*>();
-        auto* secondBlobBuffer = inData[1]->cbuffer().as<inDatatype2*>();
-
-        if (!firstBlobBuffer || !secondBlobBuffer) {
-            THROW_IE_EXCEPTION << "empty input data";
-        }
-
-        auto outBlob = *outData.begin();
-        auto* outBuffer = outBlob->buffer().as<outDatatype*>();
-        if (!outBuffer) THROW_IE_EXCEPTION << "empty output data";
-
-        BroadcastOffset outOff(outBlob->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
-        BroadcastOffset inOff1(inData[0]->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
-        BroadcastOffset inOff2(inData[1]->getTensorDesc().getDims(), outBlob->getTensorDesc().getDims());
-
-        for (size_t i = 0; i < outBlob->size(); i++) {
-            SizeVector offsetDims = outOff.offset_dims(i);
-            outBuffer[outOff.offset(offsetDims)] =
-                ConversionOutData()(ConversionInData1()(firstBlobBuffer[inOff1.offset(offsetDims)]) -
-                                    ConversionInData2()(secondBlobBuffer[inOff2.offset(offsetDims)]));
-        }
-    }
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        size_t numInputs = inData.size();
-        if (inData.size() != 2)
-            THROW_IE_EXCEPTION << "Unsupported number of inputs: " << numInputs << ". 2 inputs is supported";
-
-        auto compare =
-            getPrecisionMask(inData[0]->getTensorDesc().getPrecision(), inData[1]->getTensorDesc().getPrecision(),
-                             outData[0]->getTensorDesc().getPrecision());
-
-        switch (compare) {
-        case getPrecisionMask(Precision::U8, Precision::U8, Precision::U8):
-            sub<uint8_t, uint8_t, uint8_t, noConversion<uint8_t>, noConversion<uint8_t>, noConversion<uint8_t>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U8, Precision::I32, Precision::I32):
-            sub<uint8_t, int, int, noConversion<uint8_t>, noConversion<int>, noConversion<int>>(inData, params, blobs,
-                                                                                                outData);
-            break;
-        case getPrecisionMask(Precision::U8, Precision::I64, Precision::I64):
-            sub<uint8_t, long long int, long long int, noConversion<uint8_t>, noConversion<long long int>,
-                noConversion<long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U8, Precision::U64, Precision::U64):
-            sub<uint8_t, unsigned long long int, unsigned long long int, noConversion<uint8_t>,
-                noConversion<unsigned long long int>, noConversion<unsigned long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U8, Precision::FP16, Precision::FP16):
-            sub<uint8_t, ie_fp16, ie_fp16, noConversion<uint8_t>, fp16tofp32, fp32tofp16>(inData, params, blobs,
-                                                                                          outData);
-            break;
-        case getPrecisionMask(Precision::U8, Precision::FP32, Precision::FP32):
-            sub<uint8_t, float, float, noConversion<uint8_t>, noConversion<float>, noConversion<float>>(inData, params,
-                                                                                                        blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I8, Precision::I8, Precision::FP32):
-            sub<int8_t, int8_t, float, noConversion<int8_t>, noConversion<int8_t>, noConversion<float>>(inData, params,
-                                                                                                        blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::U8, Precision::I32):
-            sub<int, uint8_t, int, noConversion<int>, noConversion<uint8_t>, noConversion<int>>(inData, params, blobs,
-                                                                                                outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::I32, Precision::I32):
-            sub<int, int, int, noConversion<int>, noConversion<int>, noConversion<int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::I64, Precision::I64):
-            sub<int, long long int, long long int, noConversion<int>, noConversion<long long int>,
-                noConversion<long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::U64, Precision::U64):
-            sub<int, unsigned long long int, unsigned long long int, noConversion<int>,
-                noConversion<unsigned long long int>, noConversion<unsigned long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::FP16, Precision::FP32):
-            sub<int, ie_fp16, float, noConversion<int>, fp16tofp32, noConversion<float>>(inData, params, blobs,
-                                                                                         outData);
-            break;
-        case getPrecisionMask(Precision::I32, Precision::FP32, Precision::FP32):
-            sub<int, float, float, noConversion<int>, noConversion<float>, noConversion<float>>(inData, params, blobs,
-                                                                                                outData);
-            break;
-
-        case getPrecisionMask(Precision::I64, Precision::U8, Precision::I64):
-            sub<long long int, uint8_t, long long int, noConversion<long long int>, noConversion<uint8_t>,
-                noConversion<long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I64, Precision::I32, Precision::I64):
-            sub<long long int, int, long long int, noConversion<long long int>, noConversion<int>,
-                noConversion<long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I64, Precision::I64, Precision::I64):
-            sub<long long int, long long int, long long int, noConversion<long long int>, noConversion<long long int>,
-                noConversion<long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I64, Precision::FP16, Precision::FP32):
-            sub<long long int, ie_fp16, float, noConversion<long long int>, fp16tofp32, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::I64, Precision::FP32, Precision::FP32):
-            sub<long long int, float, float, noConversion<long long int>, noConversion<float>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-
-        case getPrecisionMask(Precision::U64, Precision::U8, Precision::U64):
-            sub<unsigned long long int, uint8_t, unsigned long long int, noConversion<unsigned long long int>,
-                noConversion<uint8_t>, noConversion<unsigned long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U64, Precision::I32, Precision::U64):
-            sub<unsigned long long int, int, unsigned long long int, noConversion<unsigned long long int>, noConversion<int>,
-                noConversion<unsigned long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U64, Precision::U64, Precision::U64):
-            sub<unsigned long long int, unsigned long long int, unsigned long long int,
-                noConversion<unsigned long long int>, noConversion<unsigned long long int>,
-                noConversion<unsigned long long int>>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U64, Precision::FP16, Precision::FP32):
-            sub<unsigned long long int, ie_fp16, float, noConversion<unsigned long long int>, fp16tofp32, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::U64, Precision::FP32, Precision::FP32):
-            sub<unsigned long long int, float, float, noConversion<unsigned long long int>, noConversion<float>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-
-        case getPrecisionMask(Precision::FP16, Precision::U8, Precision::FP16):
-            sub<ie_fp16, uint8_t, ie_fp16, fp16tofp32, noConversion<uint8_t>, fp32tofp16>(inData, params, blobs,
-                                                                                          outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::I32, Precision::FP32):
-            sub<ie_fp16, int, float, fp16tofp32, noConversion<int>, noConversion<float>>(inData, params, blobs,
-                                                                                         outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::I64, Precision::FP32):
-            sub<ie_fp16, long long int, float, fp16tofp32, noConversion<long long int>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::U64, Precision::FP32):
-            sub<ie_fp16, unsigned long long int, float, fp16tofp32, noConversion<unsigned long long int>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::FP16, Precision::FP16):
-            sub<ie_fp16, ie_fp16, ie_fp16, fp16tofp32, fp16tofp32, fp32tofp16>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::FP32, Precision::FP16):
-            sub<ie_fp16, float, ie_fp16, fp16tofp32, noConversion<float>, fp32tofp16>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP16, Precision::FP32, Precision::FP32):
-            sub<ie_fp16, float, float, fp16tofp32, noConversion<float>, noConversion<float>>(inData, params, blobs,
-                                                                                             outData);
-            break;
-
-        case getPrecisionMask(Precision::FP32, Precision::U8, Precision::FP32):
-            sub<float, uint8_t, float, noConversion<float>, noConversion<uint8_t>, noConversion<float>>(inData, params,
-                                                                                                        blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::I32, Precision::FP32):
-            sub<float, int, float, noConversion<float>, noConversion<int>, noConversion<float>>(inData, params, blobs,
-                                                                                                outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::I64, Precision::FP32):
-            sub<float, long long int, float, noConversion<float>, noConversion<long long int>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::U64, Precision::FP32):
-            sub<float, unsigned long long int, float, noConversion<float>, noConversion<unsigned long long int>, noConversion<float>>(
-                inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::FP16, Precision::FP32):
-            sub<float, ie_fp16, float, noConversion<float>, fp16tofp32, noConversion<float>>(inData, params, blobs,
-                                                                                             outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::FP16, Precision::FP16):
-            sub<float, ie_fp16, ie_fp16, noConversion<float>, fp16tofp32, fp32tofp16>(inData, params, blobs, outData);
-            break;
-        case getPrecisionMask(Precision::FP32, Precision::FP32, Precision::FP32):
-            sub<float, float, float, noConversion<float>, noConversion<float>, noConversion<float>>(inData, params,
-                                                                                                    blobs, outData);
-            break;
-        default:
-            THROW_IE_EXCEPTION << "Unsupported precision!";
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_tile_const_infer.hpp b/inference-engine/src/legacy_api/src/shape_infer/const_infer/ie_tile_const_infer.hpp
deleted file mode 100644 (file)
index a3296ca..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <ie_blob.h>
-#include <legacy/ie_layers.h>
-#include <ie_memcpy.h>
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "ie_const_infer_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-/**
- *@brief Implementation of Const inference for Tile layer
- */
-class TileConstInfer : public ConstInferImpl {
-public:
-    explicit TileConstInfer(const std::string& type): ConstInferImpl(type) {}
-
-    void inferImpl(const std::vector<Blob::CPtr>& inData, const std::map<std::string, std::string>& params,
-                   const std::map<std::string, Blob::Ptr>& blobs, std::vector<Blob::Ptr>& outData) override {
-        LayerParams lp {};
-        TileLayer layer(lp);
-        layer.params = params;
-        layer.type = _type;
-        _validator->parseParams(&layer);
-
-        auto inBlob = *inData.begin();
-        auto inBlobDataSize = inBlob.get()->getTensorDesc().getPrecision().size();
-        SizeVector inShape = inBlob->getTensorDesc().getDims();
-        const auto* inBuffer = inBlob->cbuffer().as<uint8_t*>();
-
-        auto outBlob = *outData.begin();
-        auto outBlobDataSize = outBlob.get()->getTensorDesc().getPrecision().size();
-        auto* outBuffer = outBlob->buffer().as<uint8_t*>();
-
-        int m_outer_dim = 1;
-        int m_inner_dim = 1;
-
-        for (int i = 0; i < layer.axis; i++) m_outer_dim *= inShape[i];
-        for (int i = layer.axis; i < inShape.size(); i++) m_inner_dim *= inShape[i];
-
-        for (int i = 0; i < m_outer_dim; ++i) {
-            for (int t = 0; t < layer.tiles; ++t) {
-                ie_memcpy(outBuffer, outBlob->byteSize(), inBuffer, m_inner_dim * inBlobDataSize);
-                outBuffer += m_inner_dim * outBlobDataSize;
-            }
-            inBuffer += m_inner_dim * inBlobDataSize;
-        }
-    }
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/ie_reshape_io_controllers.cpp b/inference-engine/src/legacy_api/src/shape_infer/ie_reshape_io_controllers.cpp
deleted file mode 100644 (file)
index 41ea15c..0000000
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <set>
-#include <string>
-#include <vector>
-
-#include <blob_factory.hpp>
-
-#include <legacy/ie_layers.h>
-#include <ie_layer_validators.hpp>
-#include "shape_infer/ie_reshape_io_controllers.hpp"
-
-using namespace InferenceEngine;
-using namespace ShapeInfer;
-
-void DefaultChecker::run(const std::vector<DataPtr>& dataVec, const std::string& layerName) {
-    std::string errorBase = "Failed to init controller for reshaping layer `" + layerName + "`";
-    if (dataVec.empty()) THROW_IE_EXCEPTION << errorBase + ": vector of data is empty";
-    for (const auto& data : dataVec) {
-        if (!data) THROW_IE_EXCEPTION << errorBase + ": pointer to the data is null";
-    }
-}
-
-InputController::InputController(const std::vector<DataPtr>& dataVec, const std::string& layerName,
-                                 const DefaultChecker::Ptr& checker)
-    : _dataVec(dataVec), _layerName(layerName) {
-    checker->run(_dataVec, layerName);
-    for (const auto& data : _dataVec) {
-        if (data) {
-            _dataNames.push_back(data->getName());
-            SizeVector dims = data->getTensorDesc().getDims();
-            _irShapes.push_back(dims);
-            // TODO probably need to create blobs with dimensions, not on getBlobs stage
-            _inferedData.push_back(nullptr);
-        }
-    }
-    _shapes = _irShapes;
-}
-
-void InputController::setShapeByName(const SizeVector& shape, const std::string& dataName) {
-    long pos = getPositionByName(dataName);
-    _shapes[pos] = shape;
-}
-
-SizeVector InputController::getShapeByName(const std::string& dataName) {
-    long pos = getPositionByName(dataName);
-    return _shapes[pos];
-}
-
-std::vector<SizeVector> InputController::getShapes(bool check) {
-    if (check) checkCorrespondence();
-    return _shapes;
-}
-
-void InputController::applyChanges() {
-    checkCorrespondence();
-    for (int i = 0; i < _dataVec.size(); i++) {
-        auto data = _dataVec[i];
-        if (data) data->setDims(_shapes[i]);
-    }
-}
-
-void InputController::checkCorrespondence() {
-    if (_shapes.size() != _dataVec.size()) {
-        THROW_IE_EXCEPTION << "ReshapeLauncher: Number of data(" << _dataVec.size()
-                           << ") doesn't match with number of shapes(" << _shapes.size() << ") for layer '"
-                           << _layerName << "'!";
-    }
-    // TODO: iterate and check for emptiness and size matching
-}
-
-void InputController::reset() {
-    _shapes = _irShapes;
-}
-
-std::vector<SizeVector> InputController::getIRShapes() {
-    return _irShapes;
-}
-
-SizeVector InputController::getIRShapeByName(const std::string& dataName) {
-    long pos = getPositionByName(dataName);
-    return _irShapes[pos];
-}
-
-long InputController::getPositionByName(const std::string& dataName) {
-    auto pos = std::distance(_dataNames.begin(), std::find(_dataNames.begin(), _dataNames.end(), dataName));
-    if (pos < 0 || pos >= _dataNames.size()) {
-        THROW_IE_EXCEPTION << "Failed to find shape that corresponds Data name=" << dataName;
-    }
-    return pos;
-}
-
-void InputController::setShapeByIndex(const SizeVector& shape, size_t index) {
-    size_t numShapes = _shapes.size();
-    if (index >= numShapes) {
-        THROW_IE_EXCEPTION << "Failed to set shape for index(" << index
-                           << ") that is more than number of shapes: " << numShapes;
-    }
-    _shapes[index] = shape;
-}
-
-bool InputController::isDataAvailable() {
-    if (_inferedData.empty()) return false;
-    for (const auto& data : _inferedData) {
-        if (!data)
-            return false;
-        else if (data->cbuffer() == nullptr)
-            return false;
-    }
-    return true;
-}
-
-std::vector<Blob::CPtr> InputController::getBlobs(bool check) {
-    if (check) checkCorrespondence();
-    for (int i = 0; i < _dataVec.size(); i++) {
-        if (_inferedData[i] == nullptr || _inferedData[i]->cbuffer() == nullptr) {
-            TensorDesc desc = _dataVec[i]->getTensorDesc();
-            desc.setDims(_shapes[i]);
-            // special case of Shape layer: no input data, but blob contains info about dimensions, layout and etc...
-            auto blob = make_blob_with_precision(desc);
-            _inferedData[i] = blob;
-        }
-    }
-    return _inferedData;
-}
-
-void InputController::setBlobByName(const Blob::CPtr& blob, const std::string& dataName) {
-    long pos = getPositionByName(dataName);
-    _inferedData[pos] = blob;
-}
-
-OutputController::OutputController(const std::vector<DataPtr>& data, const std::string& layerName,
-                                   const DefaultChecker::Ptr& checker)
-    : InputController(data, layerName, checker) {}
-
-void OutputController::propagateShapes(const std::set<ReshapeLauncher::Ptr>& launchers) {
-    checkCorrespondence();
-    unsigned idx = 0;
-    for (auto const& outData : _dataVec) {
-        for (auto const& inputTo : getInputTo(outData)) {
-            CNNLayerPtr layer = inputTo.second;
-            if (layer == nullptr) {
-                THROW_IE_EXCEPTION << "Failed to propagate shapes for layer (" << inputTo.first
-                                   << "): connected layer is null";
-            }
-            auto layerName = layer->name;
-            auto foundLauncher =
-                std::find_if(launchers.begin(), launchers.end(), [&layerName](const ReshapeLauncher::Ptr& launcher) {
-                    return launcher->getLayerName() == layerName;
-                });
-            if (foundLauncher == launchers.end())
-                THROW_IE_EXCEPTION << "Failed to find ReshapeLauncher for layer: '" << layerName << "'";
-            (*foundLauncher)->setShapeByName(_shapes[idx], outData->getName());
-        }
-        idx++;
-    }
-}
-
-// Combine with propagate shapes
-void OutputController::propagateBlobs(const std::set<ReshapeLauncher::Ptr>& launchers) {
-    unsigned idx = 0;
-    for (auto const& outData : _dataVec) {
-        for (auto const& inputTo : getInputTo(outData)) {
-            CNNLayerPtr layer = inputTo.second;
-            if (layer == nullptr) {
-                THROW_IE_EXCEPTION << "Failed to propagate shapes for layer (" << inputTo.first
-                                   << "): connected layer is null";
-            }
-            auto layerName = layer->name;
-            auto foundLauncher =
-                std::find_if(launchers.begin(), launchers.end(), [&layerName](const ReshapeLauncher::Ptr& launcher) {
-                    return launcher->getLayerName() == layerName;
-                });
-            if (foundLauncher == launchers.end())
-                THROW_IE_EXCEPTION << "Failed to find ReshapeLauncher for layer: '" << layerName << "'";
-            (*foundLauncher)->setBlobByName(_inferedData[idx], outData->getName());
-        }
-        idx++;
-    }
-}
-
-void OutputController::setShapes(const std::vector<SizeVector>& shapes) {
-    _shapes = shapes;
-}
-
-void OutputController::setBlobs(const std::vector<Blob::Ptr>& blobs) {
-    _inferedData.clear();
-    for (const auto& blob : blobs) {
-        _inferedData.push_back(blob);
-    }
-}
-
-std::vector<Blob::Ptr> OutputController::createBlobs() {
-    std::vector<Blob::Ptr> blobs;
-    for (int i = 0; i < _dataVec.size(); i++) {
-        TensorDesc desc = _dataVec[i]->getTensorDesc();
-        desc.setDims(_shapes[i]);
-        auto blob = make_blob_with_precision(desc);
-        blob->allocate();
-        blobs.push_back(blob);
-    }
-    return blobs;
-}
diff --git a/inference-engine/src/legacy_api/src/shape_infer/ie_reshape_io_controllers.hpp b/inference-engine/src/legacy_api/src/shape_infer/ie_reshape_io_controllers.hpp
deleted file mode 100644 (file)
index 138501d..0000000
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <list>
-#include <map>
-#include <memory>
-#include <set>
-#include <string>
-#include <vector>
-
-#include "shape_infer/ie_reshape_launcher.hpp"
-
-#include "legacy/shape_infer/built-in/ie_built_in_holder.hpp"
-#include <legacy/ie_layers.h>
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-struct ShapeDesc {
-    std::string dataName;
-    SizeVector dims;
-};
-
-class DefaultChecker {
-public:
-    using Ptr = std::shared_ptr<DefaultChecker>;
-
-    virtual void run(const std::vector<DataPtr>& inData, const std::string& layerName);
-
-    virtual ~DefaultChecker() = default;
-};
-
-class EmptyChecker : public DefaultChecker {
-public:
-    void run(const std::vector<DataPtr>& inData, const std::string& layerName) override {};
-};
-
-class InputController {
-public:
-    InputController(const std::vector<DataPtr>& dataVec, const std::string& layerName,
-                    const DefaultChecker::Ptr& checker = std::make_shared<DefaultChecker>());
-
-    virtual ~InputController() = default;
-
-    /**
-     * @brief Set shape for current reshape launcher by corresponding Data name.
-     * @param shape - shape to be set
-     * @param dataName - Data's name
-     */
-    virtual void setShapeByName(const SizeVector& shape, const std::string& dataName);
-
-    /**
-     * @brief Return calculated shape for name.
-     */
-    virtual SizeVector getShapeByName(const std::string& dataName);
-
-    /**
-     * @brief Set shape for current reshape launcher by corresponding index.
-     * @param shape - shape to be set
-     * @param index - shape's index
-     */
-    virtual void setShapeByIndex(const SizeVector& shape, size_t index);
-
-    /**
-     * @brief Returns shapes that are supposed to be set by reshape algorithm.
-     * @note Shapes are in topological order.
-     * @param check - indicator whether check for correspondence of input data and shapes is required
-     * @return shapes
-     */
-    virtual std::vector<SizeVector> getShapes(bool check);
-
-    /**
-     * @brief Returns shapes from IR. If Controller was initialized irShapesOnInit=false, it accesses Data object of
-     * Layer If not, all shapes from IR are collected on Controller's construction.
-     * @note Shapes are in topological order.
-     * @return shapes from IR
-     */
-    virtual std::vector<SizeVector> getIRShapes();
-
-    /**
-     * @brief Returns shape from IR by corresponding Data's name
-     * @param dataName - name of Data object that holds requested shape
-     * @return shape from IR
-     */
-    virtual SizeVector getIRShapeByName(const std::string& dataName);
-
-    /**
-     * @brief Applies calculated shapes to the Data of the Layer
-     */
-    virtual void applyChanges();
-
-    /**
-     * @brief Reset vector of input shapes.
-     */
-    virtual void reset();
-
-    virtual void checkCorrespondence();
-
-    virtual bool isDataAvailable();
-
-    virtual std::vector<Blob::CPtr> getBlobs(bool check);
-
-    virtual void setBlobByName(const Blob::CPtr& blob, const std::string& name);
-
-private:
-    long getPositionByName(const std::string& dataName);
-
-protected:
-    std::vector<DataPtr> _dataVec;
-    std::vector<SizeVector> _shapes;
-    std::vector<SizeVector> _irShapes;
-    std::vector<std::string> _dataNames;
-    std::string _layerName;
-    std::vector<Blob::CPtr> _inferedData;
-};
-
-/**
- * @brief Keeps calculated output shapes, distribute (propagate) them to the connected layers, applies output shapes to
- * the Data object
- */
-class OutputController : public InputController {
-public:
-    OutputController(const std::vector<DataPtr>& inData, const std::string& layerName,
-                     const DefaultChecker::Ptr& checker = std::make_shared<DefaultChecker>());
-
-    /**
-     * @brief Set calculated output shapes as inputs for next layers
-     * @param launchers - Map of layer names to reshape launchers for that layer
-     */
-    virtual void propagateShapes(const std::set<ReshapeLauncher::Ptr>& launchers);
-
-    virtual void setShapes(const std::vector<SizeVector>& shapes);
-
-    virtual void setBlobs(const std::vector<Blob::Ptr>& blobs);
-
-    std::vector<Blob::Ptr> createBlobs();
-
-    void propagateBlobs(const std::set<ReshapeLauncher::Ptr>& set);
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/ie_reshape_launcher.cpp b/inference-engine/src/legacy_api/src/shape_infer/ie_reshape_launcher.cpp
deleted file mode 100644 (file)
index 5823988..0000000
+++ /dev/null
@@ -1,336 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <map>
-#include <memory>
-#include <set>
-#include <string>
-#include <vector>
-
-#include <debug.h>
-#include <details/ie_exception.hpp>
-
-#include <legacy/ie_layers.h>
-#include "shape_infer/ie_reshape_io_controllers.hpp"
-#include <shape_infer/const_infer/ie_const_infer_holder.hpp>
-#include "shape_infer/built-in/ie_tensor_iterator_shape_infer.hpp"
-#include "ie_layer_validators.hpp"
-#include "shape_infer/ie_reshape_launcher.hpp"
-#include "ie_reshape_launcher.hpp"
-
-using namespace InferenceEngine;
-using namespace ShapeInfer;
-
-IE_SUPPRESS_DEPRECATED_START
-
-void DefaultInitializer::check(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl) {
-    std::string errorBase = "Failed to init reshape launcher: ";
-    if (!layer) THROW_IE_EXCEPTION << errorBase + " pointer to the layer is null";
-    if (!impl) THROW_IE_EXCEPTION << errorBase + " shape infer implementation is null";
-}
-
-InputController* DefaultInitializer::createInputController(const CNNLayer* layer) {
-    std::vector<DataPtr> data;
-    for (auto const& insData : layer->insData) {
-        data.push_back(insData.lock());
-    }
-    return new InputController(data, layer->name);
-}
-
-OutputController* DefaultInitializer::createOutputController(const CNNLayer* layer) {
-    return new OutputController(layer->outData, layer->name);
-}
-
-ReshapeLauncher::ReshapeLauncher(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl,
-                                 const DefaultInitializer::Ptr& initializer)
-    : _layer(layer), _reshapeImpl(impl) {
-    initializer->check(layer, impl);
-    ConstInferHolder holder;
-    if (layer) _inferImpl = holder.getConstInferImpl(layer->type);
-    try {
-        _iController = initializer->createInputController(layer);
-        _oController = initializer->createOutputController(layer);
-    } catch (...) {
-        auto exception = std::current_exception();
-        delete _iController;
-        delete _oController;
-        std::rethrow_exception(exception);
-    }
-}
-
-ReshapeLauncher::~ReshapeLauncher() {
-    delete _iController;
-    delete _oController;
-    _iController = nullptr;
-    _oController = nullptr;
-}
-
-void ReshapeLauncher::setShapeByName(const SizeVector& shape, const std::string& dataName) {
-    _iController->setShapeByName(shape, dataName);
-}
-
-void ReshapeLauncher::setBlobByName(const Blob::CPtr& blob, const std::string& dataName) {
-    _iController->setBlobByName(blob, dataName);
-}
-
-SizeVector ReshapeLauncher::getShapeByName(const std::string& dataName) {
-    return _oController->getShapeByName(dataName);
-}
-
-void ReshapeLauncher::reshape(const std::set<ReshapeLauncher::Ptr>& launchers) {
-    ResponseDesc resp;
-    std::vector<SizeVector> outShapes;
-
-    // TODO: TensorIterator strongly required original layer instance because body is not presented
-    //       in params map. Original subnetwork body is required for internal shape infer
-    TensorIteratorShapeProp* TI_shaper = dynamic_cast<TensorIteratorShapeProp*>(_reshapeImpl.get());
-    if (TI_shaper) {
-        TI_shaper->setOriginalLayer(_layer);
-    }
-
-    auto sts = _reshapeImpl->inferShapes(_iController->getBlobs(true), _layer->params, _layer->blobs, outShapes, &resp);
-    _oController->setShapes(outShapes);
-    if (sts != OK)
-        THROW_IE_EXCEPTION << "Failed to infer shapes for " + _layer->type + " layer (" + _layer->name +
-                                  ") with error: " + resp.msg;
-    _oController->propagateShapes(launchers);
-}
-
-void ReshapeLauncher::applyChanges(CNNLayer* layer) {
-    checkLayer(layer);
-    _iController->applyChanges();
-    _oController->applyChanges();
-
-    // TODO: Need to finalize result of internal body shape infer and apply
-    //       new shapes to body subnetwork
-    TensorIteratorShapeProp* TI_shaper = dynamic_cast<TensorIteratorShapeProp*>(_reshapeImpl.get());
-    if (TI_shaper) TI_shaper->apply();
-}
-
-void ReshapeLauncher::constInfer(const std::set<ReshapeLauncher::Ptr>& launchers) {
-    if ((_iController->isDataAvailable() && _layer->type != "Quantize" && _layer->type != "FakeQuantize") ||
-        _layer->type == "Const" || _layer->type == "Shape") {
-        auto outBlobs = _oController->createBlobs();
-        _oController->setBlobs(outBlobs);
-        if (!_inferImpl)
-            THROW_IE_EXCEPTION << "Failed to find reference implementation for `" + _layer->name + "` Layer with `" +
-                                      _layer->type + "` Type on constant propagation";
-        _inferImpl->infer(_iController->getBlobs(false), _layer->params, _layer->blobs, outBlobs);
-        _oController->propagateBlobs(launchers);
-    }
-}
-
-void ReshapeLauncher::reset() {
-    _iController->reset();
-    _oController->reset();
-}
-
-std::string ReshapeLauncher::getLayerName() const {
-    return _layer->name;
-}
-
-std::string ReshapeLauncher::getLayerType() const {
-    return _layer->type;
-}
-
-void ReshapeLauncher::checkLayer(CNNLayer* layer) {
-    if ((nullptr == _layer || layer == nullptr)) {
-        THROW_IE_EXCEPTION << "Can't apply changes for empty layer";
-    }
-    auto oldParams = _layer->params;
-    auto newParams = layer->params;
-    if ((!oldParams.empty() && !newParams.empty() &&
-         !std::equal(oldParams.begin(), oldParams.end(), newParams.begin())) ||
-        (_layer->name != layer->name) || (_layer->type != layer->type) || oldParams.size() != newParams.size()) {
-        THROW_IE_EXCEPTION << "Can't apply changes for layer with another params";
-    }
-}
-
-void ReshapeLauncher::setIRShapeByName(const std::string& dataName) {
-    SizeVector foundShape = _iController->getIRShapeByName(dataName);
-    _iController->setShapeByName(foundShape, dataName);
-}
-
-void ReshapeLauncher::setShapeInferImpl(const IShapeInferImpl::Ptr& impl) {
-    _reshapeImpl = impl;
-}
-
-const CNNLayer* ReshapeLauncher::getLayer() const {
-    return _layer;
-}
-
-InputController* FakeInitializer::createInputController(const CNNLayer* layer) {
-    std::vector<DataPtr> outData;
-    for (auto const& insData : layer->insData) {
-        outData.push_back(insData.lock());
-    }
-    return new InputController(outData, layer->name);
-}
-
-void FakeInitializer::check(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl) {
-    std::string errorBase = "Failed to init reshape launcher: ";
-    if (!layer) THROW_IE_EXCEPTION << errorBase + " pointer to the layer is null";
-}
-
-OutputController* FakeInitializer::createOutputController(const CNNLayer* layer) {
-    return new OutputController(layer->outData, layer->name);
-}
-
-FakeReshapeLauncher::FakeReshapeLauncher(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl)
-    : ReshapeLauncher(layer, impl, std::make_shared<FakeInitializer>()) {}
-
-void FakeReshapeLauncher::reshape(const std::set<ReshapeLauncher::Ptr>& launchers) {
-    auto iShapesIR = _iController->getIRShapes();
-    auto oShapesIR = _oController->getIRShapes();
-    auto iShapes = _iController->getShapes(true);
-
-    for (int i = 0; i < iShapes.size(); i++) {
-        auto newInShape = iShapes[i];
-        auto irInShape = iShapesIR[i];
-        bool equal = std::equal(newInShape.begin(), newInShape.end(), irInShape.begin());
-        if (!equal) {
-            THROW_IE_EXCEPTION << "Failed to infer shapes for layer with type: " << _layer->type
-                               << ". Use @IShapeInferExtension class to register shape infer function for this layer";
-        }
-    }
-
-    _oController->setShapes(oShapesIR);
-    _oController->propagateShapes(launchers);
-}
-
-void OutputOnlyInitializer::check(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl) {
-    std::string errorBase = "Failed to init reshape launcher: ";
-    if (!layer) THROW_IE_EXCEPTION << errorBase + " pointer to the layer is null";
-    if (!layer->insData.empty())
-        THROW_IE_EXCEPTION << "Failed to init reshape launcher: "
-                           << "layer type (`" + layer->type + "`) is supposed to not have inputs, but actually it has";
-}
-
-InputController* OutputOnlyInitializer::createInputController(const CNNLayer* layer) {
-    return nullptr;
-}
-
-OutputController* OutputOnlyInitializer::createOutputController(const CNNLayer* layer) {
-    return new OutputController(layer->outData, layer->name);
-}
-
-OutputOnlyReshapeLauncher::OutputOnlyReshapeLauncher(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl,
-                                                     const OutputOnlyInitializer::Ptr& initializer)
-    : ReshapeLauncher(layer, impl, initializer) {}
-
-void OutputOnlyReshapeLauncher::setShapeByName(const SizeVector& shape, const std::string& dataName) {
-    _oController->setShapeByName(shape, dataName);
-}
-
-void OutputOnlyReshapeLauncher::setBlobByName(const Blob::CPtr& blob, const std::string& dataName) {
-    _oController->setBlobByName(blob, dataName);
-}
-
-void OutputOnlyReshapeLauncher::setIRShapeByName(const std::string& dataName) {
-    SizeVector foundShape = _oController->getIRShapeByName(dataName);
-    _oController->setShapeByName(foundShape, dataName);
-}
-
-void OutputOnlyReshapeLauncher::applyChanges(CNNLayer* layer) {
-    checkLayer(layer);
-    _oController->applyChanges();
-}
-
-void OutputOnlyReshapeLauncher::reset() {
-    _oController->reset();
-}
-
-void OutputOnlyReshapeLauncher::constInfer(const std::set<ReshapeLauncher::Ptr>& launchers) {
-    if (_layer->type == "Const") {
-        auto outBlobs = _oController->createBlobs();
-        _oController->setBlobs(outBlobs);
-        if (!_inferImpl)
-            THROW_IE_EXCEPTION << "Failed to find reference implementation for `" + _layer->name + "` Layer with `" +
-                                      _layer->type + "` Type on constant propagation";
-        _inferImpl->infer({}, _layer->params, _layer->blobs, outBlobs);
-        auto shapes = _oController->getShapes(true);
-        for (int i = 0; i < outBlobs.size(); i++) {
-            outBlobs[i]->getTensorDesc().reshape(shapes[i], TensorDesc::getLayoutByDims(shapes[i]));
-        }
-        _oController->setBlobs(outBlobs);
-        _oController->propagateBlobs(launchers);
-    }
-}
-
-void InputInitializer::check(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl) {
-    OutputOnlyInitializer::check(layer, impl);
-    std::string errorBase = "Failed to init reshape launcher: layer type (`" + layer->type + "`) is not";
-    if (details::equal(layer->type, "memory")) {
-        if (!layer->GetParamAsInt("index")) THROW_IE_EXCEPTION << errorBase << " `Memory`(as input)";
-    } else if (!::details::equal(layer->type, "input")) {
-        THROW_IE_EXCEPTION << errorBase << " `Input`";
-    }
-}
-
-InputReshapeLauncher::InputReshapeLauncher(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl,
-                                           const DefaultInitializer::Ptr& initializer)
-    : OutputOnlyReshapeLauncher(layer, impl, initializer) {}
-
-void InputReshapeLauncher::reshape(const std::set<ReshapeLauncher::Ptr>& launchers) {
-    auto oShapes = _oController->getShapes(false);
-    auto oIRShapes = _oController->getIRShapes();
-    for (size_t i = 0; i < oShapes.size(); i++) {
-        if (oShapes[i].empty()) {
-            _oController->setShapeByIndex(oIRShapes[i], i);
-        }
-    }
-    _oController->propagateShapes(launchers);
-}
-
-void ConstInitializer::check(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl) {
-    OutputOnlyInitializer::check(layer, impl);
-    if (!::details::equal(layer->type, "const"))
-        THROW_IE_EXCEPTION << "Failed to init reshape launcher: layer type (`" + layer->type + "`) is not `Const`";
-}
-
-ConstReshapeLauncher::ConstReshapeLauncher(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl)
-    : OutputOnlyReshapeLauncher(layer, impl, std::make_shared<ConstInitializer>()) {}
-
-void ConstReshapeLauncher::reshape(const std::set<ReshapeLauncher::Ptr>& launchers) {
-    auto oShapesIR = _oController->getIRShapes();
-    auto oShapes = _oController->getShapes(false);
-
-    if (oShapes.empty()) {
-        _oController->setShapes(oShapesIR);
-    }
-    if (oShapes != oShapesIR) {
-        THROW_IE_EXCEPTION << "Failed to set different shapes for Const layer,"
-                           << " original shapes:" << details::dumpVec(oShapesIR)
-                           << " new shapes:" << details::dumpVec(oShapes);
-    }
-    _oController->propagateShapes(launchers);
-}
-
-void OutMemoryInitializer::check(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl) {
-    std::string errorBase = "Failed to init reshape launcher: ";
-    if (!layer) THROW_IE_EXCEPTION << errorBase + " pointer to the layer is null";
-    int index = layer->GetParamAsInt("index");
-    if (!::details::equal(layer->type, "memory") && index)
-        THROW_IE_EXCEPTION << "Failed to init reshape launcher: layer type (`" + layer->type +
-                                  "`) is not `Memory` as output";
-    if (!layer->outData.empty())
-        THROW_IE_EXCEPTION << "Failed to init reshape launcher: "
-                           << "layer type (`" + layer->type + "`) is supposed to not have outputs, but actually it has";
-}
-
-OutputController* OutMemoryInitializer::createOutputController(const CNNLayer* layer) {
-    return nullptr;
-}
-
-OutMemoryReshapeLauncher::OutMemoryReshapeLauncher(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl)
-    : ReshapeLauncher(layer, impl, std::make_shared<OutMemoryInitializer>()) {}
-
-void OutMemoryReshapeLauncher::applyChanges(CNNLayer* layer) {
-    checkLayer(layer);
-    _iController->applyChanges();
-}
-
-void OutMemoryReshapeLauncher::reset() {
-    _iController->reset();
-}
\ No newline at end of file
diff --git a/inference-engine/src/legacy_api/src/shape_infer/ie_reshape_launcher.hpp b/inference-engine/src/legacy_api/src/shape_infer/ie_reshape_launcher.hpp
deleted file mode 100644 (file)
index 681435c..0000000
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <list>
-#include <map>
-#include <memory>
-#include <set>
-#include <string>
-#include <vector>
-
-#include <legacy/ie_layers.h>
-#include "legacy/shape_infer/built-in/ie_built_in_holder.hpp"
-#include "shape_infer/const_infer/ie_const_infer_impl.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-class InputController;
-
-class OutputController;
-
-class DefaultInitializer {
-public:
-    using Ptr = std::shared_ptr<DefaultInitializer>;
-
-    IE_SUPPRESS_DEPRECATED_START
-    virtual void check(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl);
-    IE_SUPPRESS_DEPRECATED_END
-
-    virtual InputController* createInputController(const CNNLayer* layer);
-
-    virtual OutputController* createOutputController(const CNNLayer* layer);
-
-    virtual ~DefaultInitializer() = default;
-};
-
-/**
- * @class ReshapeLauncher
- * @brief Helper class to infer shapes for the given CNNLayer by using specified implementation.
- * Encapsulate input and output shapes, before applying it to the real CNNLayer and Data.
- */
-class ReshapeLauncher {
-public:
-    using Ptr = std::shared_ptr<ReshapeLauncher>;
-
-    IE_SUPPRESS_DEPRECATED_START
-
-    /**
-     * @brief constructor
-     * @param layer - const pointer to the layer for performing shape inference.
-     * It is used to obtain parameters, input/output shapes.
-     * @param impl - implementation of shape inference for the given layer
-     */
-    ReshapeLauncher(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl,
-                    const DefaultInitializer::Ptr& initializer = std::make_shared<DefaultInitializer>());
-
-    virtual void setShapeInferImpl(const IShapeInferImpl::Ptr& impl);
-
-    IE_SUPPRESS_DEPRECATED_END
-
-    virtual ~ReshapeLauncher();
-
-    /**
-     * @brief Set input shape for current reshape launcher.
-     * @param shape - input shape to be set
-     */
-    virtual void setShapeByName(const SizeVector& shape, const std::string& dataName);
-
-    virtual void setBlobByName(const Blob::CPtr& blob, const std::string& dataName);
-
-    /**
-     * @brief Return calculated shape for data with requested name.
-     * @return Result shape
-     */
-    virtual SizeVector getShapeByName(const std::string& dataName);
-
-    /**
-     * @brief Set input shape from IR by Data name. If there's no Data with given name it throws exception
-     * @param dataName - name of the corresponding Data.
-     */
-    virtual void setIRShapeByName(const std::string& dataName);
-
-    /**
-     * @brief Calculates output shapes and changed layer params using input shapes that was set
-     * @param resp Pointer to the response message that holds a description of an error if any occurred
-     * @param launchers - Map of pairs: layer name and its reshape launcher.
-     * @return Status code of the operation. OK if succeeded
-     */
-    virtual void reshape(const std::set<ReshapeLauncher::Ptr>& launchers);
-
-    virtual void constInfer(const std::set<ReshapeLauncher::Ptr>& launchers);
-
-    /**
-     * @brief Apply new input shapes, calculated output shapes and changed layer's params to CNNLayer and Data.
-     * @param layer - pointer to the layer for setting changes in layer's params
-     */
-    virtual void applyChanges(CNNLayer* layer);
-
-    /**
-     * @brief Reset all stored to the initial state: input/output shapes and layer's params.
-     * @param layer - pointer to the layer for setting changes in layer's params
-     */
-    virtual void reset();
-
-    virtual std::string getLayerName() const;
-
-    virtual std::string getLayerType() const;
-
-    virtual const CNNLayer* getLayer() const;
-
-protected:
-    InputController* _iController = nullptr;
-    OutputController* _oController = nullptr;
-    const CNNLayer* _layer;
-
-    IE_SUPPRESS_DEPRECATED_START
-    IShapeInferImpl::Ptr _reshapeImpl;
-    IConstInferImpl::Ptr _inferImpl;
-    IE_SUPPRESS_DEPRECATED_END
-
-protected:
-    /**
-     * @brief Check that all shape infer operations were done with specified layer.
-     * @param layer - pointer to the layer to compare with
-     */
-    void checkLayer(CNNLayer* layer);
-};
-
-class FakeInitializer : public DefaultInitializer {
-public:
-    IE_SUPPRESS_DEPRECATED_START
-    void check(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl) override;
-    IE_SUPPRESS_DEPRECATED_END
-
-    InputController* createInputController(const CNNLayer* layer) override;
-
-    OutputController* createOutputController(const CNNLayer* layer) override;
-};
-
-/**
- * @class FakeReshapeLauncher
- * @brief Helper class to infer shapes for layers without registered shape infer functions.
- * Encapsulates input and output shapes, before applying it to the real CNNLayer and Data.
- * If input shape is the same as in IR, it takes output shape from IR as is.
- * It sets batch size to the first output dimension of all outputs if:
- *      1) first dimension of all input layers should be the same (assume this is batch size)
- *      2) calculated input shape of the unsupported layer is different only in a first dimension from original input
- * shape in IR.
- */
-class FakeReshapeLauncher : public ReshapeLauncher {
-public:
-    using Ptr = std::shared_ptr<FakeReshapeLauncher>;
-
-    IE_SUPPRESS_DEPRECATED_START
-    FakeReshapeLauncher(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl);
-    IE_SUPPRESS_DEPRECATED_END
-
-    void reshape(const std::set<ReshapeLauncher::Ptr>& launchers) override;
-
-    void constInfer(const std::set<ReshapeLauncher::Ptr>& launchers) override {}
-};
-
-class OutputOnlyInitializer : public DefaultInitializer {
-public:
-    IE_SUPPRESS_DEPRECATED_START
-    void check(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl) override;
-    IE_SUPPRESS_DEPRECATED_END
-
-    InputController* createInputController(const CNNLayer* layer) override;
-
-    OutputController* createOutputController(const CNNLayer* layer) override;
-};
-
-/**
- * @class OutputOnlyReshapeLauncher
- * @brief Helper class to infer shapes for layers without inputs. It creates output controller only, input one is null.
- */
-class OutputOnlyReshapeLauncher : public ReshapeLauncher {
-public:
-    using Ptr = std::shared_ptr<OutputOnlyReshapeLauncher>;
-
-    IE_SUPPRESS_DEPRECATED_START
-    OutputOnlyReshapeLauncher(
-        const CNNLayer* layer, const IShapeInferImpl::Ptr& impl,
-        const OutputOnlyInitializer::Ptr& initializer = std::make_shared<OutputOnlyInitializer>());
-    IE_SUPPRESS_DEPRECATED_END
-
-    void setShapeByName(const SizeVector& shape, const std::string& dataName) override;
-
-    void setIRShapeByName(const std::string& dataName) override;
-
-    void applyChanges(CNNLayer* layer) override;
-
-    void reset() override;
-
-    void setBlobByName(const Blob::CPtr& blob, const std::string& dataName) override;
-
-    void constInfer(const std::set<ReshapeLauncher::Ptr>& launchers) override;
-};
-
-class InputInitializer : public OutputOnlyInitializer {
-public:
-    IE_SUPPRESS_DEPRECATED_START
-    void check(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl) override;
-    IE_SUPPRESS_DEPRECATED_END
-};
-
-/**
- * @class InputReshapeLauncher
- * @brief Helper class to infer shapes for input layers. Supported layer types: `Input` or `Memory`(as inputs only, if
- * index=1) It takes new given input shape and propagate for connected layers. If shape is not set, it takes shapes from
- * IR.
- */
-class InputReshapeLauncher : public OutputOnlyReshapeLauncher {
-public:
-    using Ptr = std::shared_ptr<InputReshapeLauncher>;
-
-    IE_SUPPRESS_DEPRECATED_START
-    InputReshapeLauncher(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl,
-                         const DefaultInitializer::Ptr& initializer = std::make_shared<InputInitializer>());
-    IE_SUPPRESS_DEPRECATED_END
-
-    void reshape(const std::set<ReshapeLauncher::Ptr>& launchers) override;
-};
-
-class ConstInitializer : public OutputOnlyInitializer {
-public:
-    IE_SUPPRESS_DEPRECATED_START
-    void check(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl) override;
-    IE_SUPPRESS_DEPRECATED_END
-};
-
-/**
- * @class ConstReshapeLauncher
- * @brief Helper class to infer shapes for layers with Const type.
- * It checks if new given shape is the same as in IR. The launcher fails if not and propagate for connected layers
- * otherwise. If shape is not set, it propagates shapes from IR.
- */
-class ConstReshapeLauncher : public OutputOnlyReshapeLauncher {
-public:
-    using Ptr = std::shared_ptr<InputReshapeLauncher>;
-
-    IE_SUPPRESS_DEPRECATED_START
-    ConstReshapeLauncher(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl);
-    IE_SUPPRESS_DEPRECATED_END
-
-    void reshape(const std::set<ReshapeLauncher::Ptr>& launchers) override;
-};
-
-class OutMemoryInitializer : public DefaultInitializer {
-    IE_SUPPRESS_DEPRECATED_START
-    void check(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl) override;
-    IE_SUPPRESS_DEPRECATED_END
-
-    OutputController* createOutputController(const CNNLayer* layer) override;
-};
-
-/**
- * @class OutMemoryReshapeLauncher
- * @brief Helper class to infer shapes for layers with Memory type (as outputs only, if index=0).
- * It sets new input shapes and doesn't call propagation as this layer doesn't have childs.
- */
-class OutMemoryReshapeLauncher : public ReshapeLauncher {
-public:
-    using Ptr = std::shared_ptr<InputReshapeLauncher>;
-
-    IE_SUPPRESS_DEPRECATED_START
-    OutMemoryReshapeLauncher(const CNNLayer* layer1, const IShapeInferImpl::Ptr& impl1);
-    IE_SUPPRESS_DEPRECATED_END
-
-    void reshape(const std::set<ReshapeLauncher::Ptr>& launchers) override {}
-
-    void applyChanges(CNNLayer* layer) override;
-
-    void reset() override;
-
-    void constInfer(const std::set<ReshapeLauncher::Ptr>& launchers) override {}
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/legacy_api/src/shape_infer/ie_reshaper.cpp b/inference-engine/src/legacy_api/src/shape_infer/ie_reshaper.cpp
deleted file mode 100644 (file)
index 96d6e92..0000000
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <functional>
-#include <map>
-#include <memory>
-#include <mutex>
-#include <string>
-#include <tuple>
-#include <vector>
-
-#include <debug.h>
-#include <blob_factory.hpp>
-
-#include <legacy/graph_tools.hpp>
-#include <legacy/ie_layers.h>
-#include "legacy/details/ie_cnn_network_tools.h"
-#include "legacy/shape_infer/built-in/ie_built_in_holder.hpp"
-#include "shape_infer/ie_reshaper.hpp"
-
-using namespace InferenceEngine;
-using namespace InferenceEngine::details;
-using namespace ShapeInfer;
-
-inline static std::vector<CNNLayerPtr> SortTopologicallyStartsFrom(const std::vector<DataPtr>& inputs) {
-    std::vector<CNNLayerPtr> all_layers;
-    CNNNetForestDFS(
-        inputs,
-        [&](CNNLayerPtr current) {
-            all_layers.push_back(current);
-        },
-        false);
-    std::reverse(all_layers.begin(), all_layers.end());
-    return all_layers;
-}
-
-Reshaper::Reshaper(std::vector<DataPtr> insDatas, const LauncherCreator::Ptr& launcherCreator) {
-    auto builtIn = std::make_shared<BuiltInShapeInferHolder>();
-    _allTypes = getTypeNamesFromExtension(builtIn);
-    _extensions.push_back(builtIn);
-
-    _allSortedLayers = SortTopologicallyStartsFrom(insDatas);
-    for (auto& in_data : insDatas) {
-        for (auto layer : getInputTo(in_data)) {
-            _inputLayers.insert(layer.second);
-        }
-    }
-
-    if (_inputLayers.empty() || _allSortedLayers.empty())
-        THROW_IE_EXCEPTION << "Unsupported model for shape inference: failed to collect inputs and layers";
-
-    for (auto const& currentLayer : _allSortedLayers) {
-        auto createdLauncher = launcherCreator->createNotInputLauncher(currentLayer.get(), _extensions);
-        _launchers.insert(createdLauncher);
-    }
-}
-
-Reshaper::Reshaper(ICNNNetwork& network, const LauncherCreator::Ptr& launcherCreator) {
-    auto builtIn = std::make_shared<BuiltInShapeInferHolder>();
-    _allTypes = getTypeNamesFromExtension(builtIn);
-    _extensions.push_back(builtIn);
-
-    auto inputLayers = CNNNetGetAllInputLayers(network);
-    for (const auto& layer : inputLayers) {
-        _inputLayers.insert(layer);
-    }
-
-    _allSortedLayers = CNNNetSortTopologically(network);
-    if (_inputLayers.empty() || _allSortedLayers.empty())
-        THROW_IE_EXCEPTION << "Unsupported model for shape inference: failed to collect inputs and layers";
-    for (auto const& currentLayer : _allSortedLayers) {
-        auto foundInput =
-            std::find_if(_inputLayers.begin(), _inputLayers.end(), [&currentLayer](const CNNLayerPtr& inputLayer) {
-                return currentLayer->name == inputLayer->name;
-            });
-        ReshapeLauncher::Ptr createdLauncher;
-        if (foundInput == _inputLayers.end()) {
-            createdLauncher = launcherCreator->createNotInputLauncher(currentLayer.get(), _extensions);
-        } else {
-            createdLauncher = launcherCreator->createInputLauncher(currentLayer.get(), _extensions);
-        }
-        _launchers.insert(createdLauncher);
-    }
-}
-
-void Reshaper::AddExtension(const IShapeInferExtensionPtr& extension) {
-    if (!extension)
-        THROW_IE_EXCEPTION << "Failed to add empty shape infer extension";
-
-    auto newLayerTypes = getTypeNamesFromExtension(extension);
-    std::string badLayerTypes;
-    for (const auto& type : newLayerTypes) {
-        auto ret = _allTypes.insert(type);
-        if (!ret.second) {
-            if (!badLayerTypes.empty()) badLayerTypes += ", ";
-            badLayerTypes += type;
-        }
-    }
-    if (!badLayerTypes.empty())
-        THROW_IE_EXCEPTION << "Failed to add extension with already registered types:" << badLayerTypes;
-
-    for (auto const& layerType : newLayerTypes) {
-        auto foundLauncher = _launchers.begin();
-        // find all layers with given type
-        std::vector<ReshapeLauncher::Ptr> launchersToInsert;
-        while (foundLauncher != _launchers.end()) {
-            foundLauncher =
-                std::find_if(foundLauncher, _launchers.end(), [&layerType](const ReshapeLauncher::Ptr& launcher) {
-                    return layerType == launcher->getLayerType();
-                });
-            if (foundLauncher != _launchers.end()) {
-                IE_SUPPRESS_DEPRECATED_START
-                IShapeInferImpl::Ptr impl;
-                StatusCode sts = extension->getShapeInferImpl(impl, layerType.c_str(), nullptr);
-                IE_SUPPRESS_DEPRECATED_END
-                if (sts == OK && impl != nullptr) {
-                    auto newLauncher = std::make_shared<ReshapeLauncher>((*foundLauncher)->getLayer(), impl);
-                    newLauncher->setShapeInferImpl(impl);
-                    launchersToInsert.push_back(newLauncher);
-                    foundLauncher = _launchers.erase(foundLauncher);
-                } else {
-                    THROW_IE_EXCEPTION << "Failed to get registered Shape Infer Implementation for type: " << layerType;
-                }
-            }
-        }
-        for (const auto& launcher : launchersToInsert) {
-            _launchers.insert(launcher);
-        }
-    }
-    _extensions.push_back(extension);
-}
-
-ReshapeLauncher::Ptr Reshaper::getLauncherByLayerName(const std::string& layerName) const {
-    auto foundLauncher =
-        std::find_if(_launchers.begin(), _launchers.end(), [&layerName](const ReshapeLauncher::Ptr& launcher) {
-            return launcher->getLayerName() == layerName;
-        });
-    if (foundLauncher == _launchers.end())
-        THROW_IE_EXCEPTION << "Failed to reshape layer ('" << layerName << "'): can't find the corresponding launcher";
-    return *foundLauncher;
-}
-
-StatusCode Reshaper::run(const std::map<std::string, SizeVector>& inputShapes, ResponseDesc* resp) {
-    // WA: In another case we should change the registration logic of shape implementations
-    static std::mutex reshapeMutex;
-    {
-        std::lock_guard<std::mutex> lock(reshapeMutex);
-        // Reset all shapes from previous run
-        for (const auto& launcher : _launchers) {
-            launcher->reset();
-        }
-
-        // Set new input shapes
-        for (auto const& input : _inputLayers) {
-            std::string layerName = input->name;
-            for (auto const& outData : input->outData) {
-                std::string dataName = outData->getName();
-                auto foundShapeIt = inputShapes.find(dataName);
-                auto foundLauncher = getLauncherByLayerName(layerName);
-                if (foundShapeIt != inputShapes.end()) {
-                    foundLauncher->setShapeByName(foundShapeIt->second, dataName);
-                } else {
-                    foundLauncher->setIRShapeByName(dataName);
-                }
-            }
-        }
-
-        // do reshape
-        for (auto& layer : _allSortedLayers) {
-            auto foundLauncher = getLauncherByLayerName(layer->name);
-            foundLauncher->reshape(_launchers);
-            foundLauncher->constInfer(_launchers);
-        }
-
-        // apply changes
-        for (auto& layer : _allSortedLayers) {
-            auto foundLauncher = getLauncherByLayerName(layer->name);
-            foundLauncher->applyChanges(layer.get());
-        }
-        return OK;
-    }
-}
-
-StatusCode Reshaper::runNoApply(const std::map<std::string, SizeVector>& inputShapes, ResponseDesc* resp) {
-    // Reset all shapes from previous run
-    for (const auto& launcher : _launchers) {
-        launcher->reset();
-    }
-
-    // Set new input shapes
-    for (auto const& input : _inputLayers) {
-        std::string layerName = input->name;
-        for (auto const& inData_w : input->insData) {
-            auto inData = inData_w.lock();
-            auto dataName = inData->getName();
-            auto foundShapeIt = inputShapes.find(dataName);
-            auto foundLauncher = getLauncherByLayerName(layerName);
-            if (foundShapeIt != inputShapes.end()) {
-                foundLauncher->setShapeByName(foundShapeIt->second, dataName);
-            } else {
-                foundLauncher->setIRShapeByName(dataName);
-            }
-        }
-    }
-
-    // do reshape
-    for (auto& layer : _allSortedLayers) {
-        auto foundLauncher = getLauncherByLayerName(layer->name);
-        foundLauncher->reshape(_launchers);
-    }
-    return OK;
-}
-
-StatusCode Reshaper::apply(ResponseDesc* resp) {
-    // apply changes
-    for (auto& layer : _allSortedLayers) {
-        auto foundLauncher = getLauncherByLayerName(layer->name);
-        foundLauncher->applyChanges(layer.get());
-    }
-    return OK;
-}
-
-SizeVector Reshaper::getResultShapeFor(DataPtr& data, ResponseDesc* resp) {
-    auto creator_layer = getCreatorLayer(data).lock();
-    std::string creator_layer_name;
-    if (creator_layer) {
-        creator_layer_name = creator_layer->name;
-    }
-    auto foundLauncher = getLauncherByLayerName(creator_layer_name);
-    return foundLauncher->getShapeByName(data->getName());
-}
-
-caseless_set<std::string> Reshaper::getTypeNamesFromExtension(const IShapeInferExtensionPtr& extension) {
-    char** types = nullptr;
-    unsigned int size = 0;
-    ResponseDesc resp;
-    IE_SUPPRESS_DEPRECATED_START
-    StatusCode sts = extension->getShapeInferTypes(types, size, &resp);
-    IE_SUPPRESS_DEPRECATED_END
-    if (sts != OK) THROW_IE_EXCEPTION << "Failed to get types from extension: " << resp.msg;
-    caseless_set<std::string> typesSet;
-    for (int i = 0; i < size; i++) {
-        std::string type(types[i], strlen(types[i]));
-        delete[] types[i];
-        typesSet.insert(type);
-    }
-    delete[] types;
-    return typesSet;
-}
-
-ReshapeLauncher::Ptr LauncherCreator::createNotInputLauncher(const CNNLayer* layer,
-                                                             const std::vector<IShapeInferExtensionPtr>& extensions) {
-    auto layerType = layer->type;
-    if ((::details::equal(layerType, "memory") && layer->GetParamAsInt("index")) ||
-        ::details::equal(layerType, "const") || ::details::equal(layerType, "input")) {
-        THROW_IE_EXCEPTION << "Failed to reshape: Layer with type `" << layerType
-                           << "` can't be intermediate layer in network";
-    }
-
-    for (const auto& extension : extensions) {
-        IE_SUPPRESS_DEPRECATED_START
-        IShapeInferImpl::Ptr impl = nullptr;
-        StatusCode sts = extension->getShapeInferImpl(impl, layerType.c_str(), nullptr);
-        IE_SUPPRESS_DEPRECATED_END
-        if (sts == OK && impl != nullptr) {
-            if (::details::equal(layerType, "memory") && !layer->GetParamAsInt("index")) {
-                return std::make_shared<OutMemoryReshapeLauncher>(layer, nullptr);
-            }
-            return std::make_shared<ReshapeLauncher>(layer, impl);
-        }
-    }
-    return std::make_shared<FakeReshapeLauncher>(layer, nullptr);
-}
-
-ReshapeLauncher::Ptr LauncherCreator::createInputLauncher(const CNNLayer* layer,
-                                                          const std::vector<IShapeInferExtensionPtr>& extensions) {
-    auto layerType = layer->type;
-    if (::details::equal(layerType, "memory") && layer->GetParamAsInt("index")) {
-        return std::make_shared<InputReshapeLauncher>(layer, nullptr);
-    } else if (::details::equal(layerType, "const")) {
-        return std::make_shared<ConstReshapeLauncher>(layer, nullptr);
-    } else if (::details::equal(layerType, "input")) {
-        return std::make_shared<InputReshapeLauncher>(layer, nullptr);
-    }
-    THROW_IE_EXCEPTION << "Failed to reshape: Layer with type `" << layerType
-                       << "` can't be input. Supported input types: Input, Const and Memory(with index=1)";
-}
diff --git a/inference-engine/src/legacy_api/src/shape_infer/ie_reshaper.hpp b/inference-engine/src/legacy_api/src/shape_infer/ie_reshaper.hpp
deleted file mode 100644 (file)
index 32787ef..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <list>
-#include <map>
-#include <memory>
-#include <set>
-#include <string>
-#include <vector>
-
-#include "caseless.hpp"
-#include "ie_icnn_network.hpp"
-
-#include <legacy/ie_ishape_infer_extension.hpp>
-#include <legacy/ie_layers.h>
-#include "ie_reshape_launcher.hpp"
-
-namespace InferenceEngine {
-namespace ShapeInfer {
-
-class INFERENCE_ENGINE_API_CLASS(LauncherCreator) {
-public:
-    using Ptr = std::shared_ptr<LauncherCreator>;
-
-    /**
-     * @brief Creates reshape launcher for the given intermediate layer with first registered implementation.
-     * Built-in implementations are first, then - custom ones.
-     * Throws exception if it fails to find implementation for the given layer.
-     * @param layer - const pointer to the CNNLayer for which shape infer is needed
-     * @param extensions - all registered extensions
-     * @return - shared_ptr to the corresponding launcher.
-     */
-    virtual ReshapeLauncher::Ptr createNotInputLauncher(const CNNLayer* layer,
-                                                        const std::vector<IShapeInferExtensionPtr>& extensions);
-
-    /**
-     * @brief Creates reshape launcher for the given input layer. Supported types: Input, Const, Memory (as input)
-     * @param layer - const pointer to the CNNLayer for which shape infer is needed
-     * @param extensions - all registered extensions
-     * @return - shared_ptr to the corresponding launcher.
-     */
-    virtual ReshapeLauncher::Ptr createInputLauncher(const CNNLayer* layer,
-                                                     const std::vector<IShapeInferExtensionPtr>& extensions);
-
-    virtual ~LauncherCreator() = default;
-};
-
-/**
- * @class Reshaper
- * @brief Helper class to infer shapes for the given ICNNNetwork.
- * It delegates shape inference to the corresponding ReshapeLauncher.
- */
-class INFERENCE_ENGINE_API_CLASS(Reshaper) {
-public:
-    /**
-     * @brief Constructor
-     * @param network - const reference to the ICNNNetwork for performing shape inference
-     */
-    explicit Reshaper(ICNNNetwork& network, const LauncherCreator::Ptr& creator = std::make_shared<LauncherCreator>());
-
-    explicit Reshaper(std::vector<DataPtr> inputs,
-                      const LauncherCreator::Ptr& launcherCreator = std::make_shared<LauncherCreator>());
-
-    virtual ~Reshaper() = default;
-
-    /**
-     * @brief Adds shape infer extension to provide implementations of shape infer functions
-     * @param extension - pointer to the shape infer extension
-     */
-    void AddExtension(const IShapeInferExtensionPtr& extension);
-
-    /**
-     * @brief Launches shape inference for the given ICNNNetworkAdds and input shapes.
-     * Throws if shape infer failed without corruption of original shapes
-     * @param inputShapes - Map of input names (data) to their input shapes.
-     */
-    StatusCode run(const std::map<std::string, SizeVector>& inputShapes, ResponseDesc* resp = nullptr);
-
-    /**
-     * @brief Perform shape inference for the given input shapes but not apply it.
-     * In case of success call apply() method.
-     * @param inputShapes - Map of input names (data) to their input shapes.
-     * @throws exception if shape infer failed without corruption of original shapes
-     */
-    StatusCode runNoApply(const std::map<std::string, SizeVector>& inputShapes, ResponseDesc* resp = nullptr);
-
-    /**
-     * @brief Apply shapes pre calculated by runNoApply() method.
-     */
-    StatusCode apply(ResponseDesc* resp = nullptr);
-
-    /**
-     * @brief Return newly calculated shape for provided data.
-     */
-    SizeVector getResultShapeFor(DataPtr& data, ResponseDesc* resp = nullptr);
-
-private:
-    ReshapeLauncher::Ptr getLauncherByLayerName(const std::string& layerName) const;
-
-    InferenceEngine::details::caseless_set<std::string> getTypeNamesFromExtension(
-        const IShapeInferExtensionPtr& extension);
-
-    std::vector<IShapeInferExtensionPtr> _extensions;
-    std::set<ReshapeLauncher::Ptr> _launchers;
-    std::vector<CNNLayerPtr> _allSortedLayers {};
-    std::set<CNNLayerPtr> _inputLayers {};
-    InferenceEngine::details::caseless_set<std::string> _allTypes;
-};
-
-}  // namespace ShapeInfer
-}  // namespace InferenceEngine
index 46d236c..10d8c7d 100644 (file)
@@ -9,7 +9,6 @@
 #include <memory>
 #include <ie_iextension.h>
 #include <legacy/ie_layers.h>
-#include <legacy/ie_ishape_infer_extension.hpp>
 #include "nodes/list.hpp"
 
 namespace MKLDNNPlugin {
index 2ed9432..cf585ba 100644 (file)
@@ -11,7 +11,6 @@
 #include <vector>
 #include <memory>
 #include <map>
-#include <legacy/ie_ishape_infer_extension.hpp>
 
 namespace MKLDNNPlugin {
 
index 078b38b..d3c0725 100644 (file)
@@ -9,7 +9,6 @@
 #include <string>
 #include <map>
 
-#include <legacy/ie_ishape_infer_extension.hpp>
 #include <ie_parameter.hpp>
 #include <ie_precision.hpp>
 
 #include <ngraph/op/tensor_iterator.hpp>
 #include <ngraph/graph_util.hpp>
 
+namespace InferenceEngine {
+
+class IShapeInferExtension;
+using IShapeInferExtensionPtr = std::shared_ptr<IShapeInferExtension>;
+
+}
+
 namespace ngraph {
 namespace op {
 
index f1c54ae..00686cf 100644 (file)
@@ -25,8 +25,3 @@
 #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinfer_request_internal.hpp"
 #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_imemory_state_internal.hpp"
 #include "unit_test_utils/mocks/cpp_interfaces/interface/mock_iinference_plugin.hpp"
-
-#include "unit_test_utils/mocks/shape_infer/mock_input_controller.hpp"
-#include "unit_test_utils/mocks/shape_infer/mock_ishape_infer_impl.hpp"
-#include "unit_test_utils/mocks/shape_infer/mock_output_controller.hpp"
-#include "unit_test_utils/mocks/shape_infer/mock_reshaper_launcher.hpp"
diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/shape_infer/mock_input_controller.hpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/shape_infer/mock_input_controller.hpp
deleted file mode 100644 (file)
index 0daea6a..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include <gmock/gmock.h>
-#include <ie_api.h>
-
-IE_SUPPRESS_DEPRECATED_START
-
-#include <shape_infer/ie_reshape_io_controllers.hpp>
-
-using namespace InferenceEngine;
-using namespace ShapeInfer;
-
-
-class MockInputController : public InputController {
-public:
-    explicit MockInputController(const std::vector<DataPtr>& dataVec) : InputController(dataVec, {}, std::make_shared<EmptyChecker>()) {}
-
-    MOCK_METHOD2(setShapeByName, void(
-            const SizeVector&, const std::string&));
-
-    MOCK_METHOD2(setShapeByIndex, void(
-            const SizeVector&, size_t index));
-
-    MOCK_METHOD1(getShapes, std::vector<SizeVector>(bool));
-
-    MOCK_METHOD1(getBlobs, std::vector<Blob::CPtr>(bool));
-
-    MOCK_METHOD0(getIRShapes, std::vector<SizeVector>());
-
-    MOCK_METHOD1(getIRShapeByName, SizeVector(
-            const std::string&));
-
-    MOCK_METHOD0(applyChanges, void());
-
-    MOCK_METHOD0(reset, void());
-
-    SizeVector realGetIRShapeByName(const std::string& name) {
-        return InputController::getIRShapeByName(name);
-    }
-};
-
-IE_SUPPRESS_DEPRECATED_END
diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/shape_infer/mock_ishape_infer_impl.hpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/shape_infer/mock_ishape_infer_impl.hpp
deleted file mode 100644 (file)
index 7a12bd1..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <map>
-#include <memory>
-#include <string>
-#include <vector>
-#include <gmock/gmock.h>
-
-#include <ie_api.h>
-
-IE_SUPPRESS_DEPRECATED_START
-
-#include <ie_iextension.h>
-
-using namespace InferenceEngine;
-
-class MockIShapeInferImpl : public IShapeInferImpl {
-public:
-    using Ptr = std::shared_ptr<MockIShapeInferImpl>;
-
-    MOCK_QUALIFIED_METHOD5(inferShapes, noexcept, StatusCode(
-            const std::vector<Blob::CPtr> &,
-            const std::map<std::string, std::string>&,
-            const std::map<std::string, Blob::Ptr>&,
-            std::vector<SizeVector> &,
-            ResponseDesc *));
-};
-
-IE_SUPPRESS_DEPRECATED_END
diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/shape_infer/mock_output_controller.hpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/shape_infer/mock_output_controller.hpp
deleted file mode 100644 (file)
index e0e555f..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-#include <memory>
-#include <string>
-#include <vector>
-#include <set>
-#include <gmock/gmock.h>
-
-#include <ie_api.h>
-
-IE_SUPPRESS_DEPRECATED_START
-
-#include <shape_infer/ie_reshape_io_controllers.hpp>
-
-using namespace InferenceEngine;
-
-class MockOutputController : public OutputController {
-public:
-    explicit MockOutputController(const std::vector<DataPtr>& dataVec) : OutputController(dataVec, {}, std::make_shared<EmptyChecker>()) {}
-
-    MOCK_METHOD2(setShapeByName, void(const SizeVector&, const std::string&));
-
-    MOCK_METHOD2(setShapeByIndex, void(const SizeVector&, size_t index));
-
-    MOCK_METHOD1(getIRShapeByName, SizeVector(const std::string&));
-
-    MOCK_METHOD1(getShapes, std::vector<SizeVector>(bool));
-
-    MOCK_METHOD0(getIRShapes, std::vector<SizeVector>());
-
-    MOCK_METHOD0(applyChanges, void());
-
-    MOCK_METHOD0(reset, void());
-
-    MOCK_METHOD1(propagateShapes, void(const std::set<ReshapeLauncher::Ptr>&));
-
-    MOCK_METHOD1(setShapes, void(const std::vector<SizeVector>&));
-
-    std::vector<SizeVector> realGetShapes() {
-        return OutputController::getShapes(false);
-    }
-};
-
-IE_SUPPRESS_DEPRECATED_END
diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/shape_infer/mock_reshaper_launcher.hpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/shape_infer/mock_reshaper_launcher.hpp
deleted file mode 100644 (file)
index 9bdb593..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-#include <memory>
-#include <string>
-#include <vector>
-#include <set>
-#include <gmock/gmock.h>
-
-#include <ie_api.h>
-
-#include "unit_test_utils/mocks/shape_infer/mock_ishape_infer_impl.hpp"
-#include "unit_test_utils/mocks/shape_infer/mock_input_controller.hpp"
-#include "unit_test_utils/mocks/shape_infer/mock_output_controller.hpp"
-
-IE_SUPPRESS_DEPRECATED_START
-
-#include <shape_infer/ie_reshape_launcher.hpp>
-#include <shape_infer/ie_reshape_io_controllers.hpp>
-
-using namespace InferenceEngine;
-using namespace ShapeInfer;
-
-class MockReshapeLauncher : public ReshapeLauncher {
-public:
-    using Ptr = std::shared_ptr<MockReshapeLauncher>;
-    class TestLauncherInitializer : public DefaultInitializer {
-    public:
-        void check(const CNNLayer* layer, const IShapeInferImpl::Ptr& impl) override {}
-
-        InputController* createInputController(const CNNLayer* layer) override {
-            if (!_iController) {
-                std::vector<DataPtr> data;
-                if (layer) {
-                    for (auto const& insData : layer->insData) {
-                        data.push_back(insData.lock());
-                    }
-                }
-                _iController = new MockInputController(data);
-            }
-            return _iController;
-        }
-
-        OutputController* createOutputController(const CNNLayer* layer) override {
-            if (!_oController) {
-                std::vector<DataPtr> data;
-                if (layer) data = layer->outData;
-                _oController = new MockOutputController(data);
-            }
-            return _oController;
-        }
-
-        MockInputController* getInputController() {
-            return _iController;
-        }
-
-        MockOutputController* getOutputController() {
-            return _oController;
-        }
-
-    private:
-        MockInputController* _iController;
-        MockOutputController* _oController;
-    };
-
-    MockReshapeLauncher(const DefaultInitializer::Ptr& initializer = std::make_shared<TestLauncherInitializer>(),
-                        const CNNLayer* layer = nullptr,
-                        const IShapeInferImpl::Ptr& impl = std::make_shared<MockIShapeInferImpl>())
-            : ReshapeLauncher(layer, impl, initializer) {}
-
-    MOCK_METHOD2(setShapeByName, void(const SizeVector&, const std::string&));
-
-    MOCK_METHOD1(reshape, void(const std::set<ReshapeLauncher::Ptr>&));
-
-    MOCK_METHOD1(applyChanges, void(CNNLayer*));
-
-    MOCK_METHOD0(reset, void());
-
-    MOCK_QUALIFIED_METHOD0(getLayerName, const, std::string());
-
-    MOCK_METHOD1(setShapeInferImpl, void(const IShapeInferImpl::Ptr&));
-
-    void realReset() {
-        ReshapeLauncher::reset();
-    }
-
-    void realReshape() {
-        ReshapeLauncher::reshape({});
-    }
-
-    std::string realGetLayerName() {
-        return ReshapeLauncher::getLayerName();
-    }
-};
-
-IE_SUPPRESS_DEPRECATED_END
index fb98a55..f2d14c3 100644 (file)
@@ -22,9 +22,6 @@ add_executable(${TARGET_NAME}
         ${TEST_SRC}
         ${TEST_INCLUDE})
 
-target_compile_definitions(${TARGET_NAME} PRIVATE
-        INSTANTIATE_TESTS=1)
-
 target_link_libraries(${TARGET_NAME} PRIVATE IEBehaviorSharedTests)
 
 add_test(NAME ${TARGET_NAME}
index 38f5d2f..a064a79 100644 (file)
@@ -22,9 +22,6 @@ add_executable(${TARGET_NAME}
         ${TEST_SRC}
         ${TEST_INCLUDE})
 
-target_compile_definitions(${TARGET_NAME} PRIVATE
-        INSTANTIATE_TESTS=1)
-
 target_link_libraries(${TARGET_NAME} PRIVATE IEBehaviorSharedTests)
 
 add_test(NAME ${TARGET_NAME}
index 3015a30..d60d94d 100644 (file)
@@ -22,9 +22,6 @@ add_executable(${TARGET_NAME}
         ${TEST_SRC}
         ${TEST_INCLUDE})
 
-target_compile_definitions(${TARGET_NAME} PRIVATE
-        INSTANTIATE_TESTS=1)
-
 target_link_libraries(${TARGET_NAME} PRIVATE IEBehaviorSharedTests)
 
 target_include_directories(${TARGET_NAME} PRIVATE $<TARGET_PROPERTY:inference_engine_preproc,INTERFACE_INCLUDE_DIRECTORIES> ${IE_MAIN_SOURCE_DIR}/samples)
index 217a2aa..9f74985 100644 (file)
@@ -41,7 +41,6 @@ function(enable_vpu TARGET_NAME FLAG_NAME PLUGIN_NAME)
             ${TEST_INCLUDE})
 
     target_compile_definitions(${TARGET_NAME} PRIVATE
-            INSTANTIATE_TESTS=1
             ${FLAG_NAME}=1)
 
     target_link_libraries(${TARGET_NAME} PRIVATE
index fa8a348..b8c7453 100644 (file)
@@ -57,8 +57,6 @@ add_executable(${TARGET_NAME}
         ${TEST_INCLUDE})
 
 target_compile_definitions(${TARGET_NAME}
-        PRIVATE
-        INSTANTIATE_TESTS=1
         PUBLIC ${ARGV}
         DATA_PATH=\"${DATA_PATH}\"
         MODELS_PATH=\"${MODELS_PATH}\")
index 9477b8c..1923c1a 100644 (file)
@@ -101,10 +101,9 @@ getTestCaseName(testing::TestParamInfo<std::tuple<InitialShapes, NewShapes, Plug
     return "CLDNN" + helper->getType();
 }
 
-#if (defined INSTANTIATE_TESTS)
-
 INSTANTIATE_TEST_CASE_P(
-        Conv_nightly, CommonSingleLayerTest,
+        // TODO: rewrite to ngraph to have reshape functionality
+        DISABLED_Conv_nightly, CommonSingleLayerTest,
         ::testing::Combine(
         ::testing::Values(InitialShapes({
                                                 {{1, 2, 16, 16}},           // input
@@ -120,7 +119,8 @@ INSTANTIATE_TEST_CASE_P(
 );
 
 INSTANTIATE_TEST_CASE_P(
-        Deconv_nightly, CommonSingleLayerTest,
+        // TODO: rewrite to ngraph to have reshape functionality
+        DISABLED_Deconv_nightly, CommonSingleLayerTest,
         ::testing::Combine(
         ::testing::Values(InitialShapes({
                                                 {{1, 2, 8,  8}},             // input
@@ -136,7 +136,8 @@ INSTANTIATE_TEST_CASE_P(
 );
 
 INSTANTIATE_TEST_CASE_P(
-        Pool_nightly, CommonSingleLayerTest,
+        // TODO: rewrite to ngraph to have reshape functionality
+        DISABLED_Pool_nightly, CommonSingleLayerTest,
         ::testing::Combine(
         ::testing::Values(InitialShapes({
                                                 {{1, 2, 16, 16}},           // input
@@ -230,5 +231,3 @@ INSTANTIATE_TEST_CASE_P(
                 ::testing::Values(Helper(std::make_shared<DeformableConvolutionTestHelper>(defConvParamsHeavy, 4)))
         ), getTestCaseName
 );
-
-#endif
index 35a90e8..665637e 100644 (file)
@@ -34,7 +34,6 @@ target_compile_definitions(${TARGET_NAME}
         PRIVATE
             USE_GNA=ON
             GNA_LIB_VER=${GNA_LIBRARY_VERSION_NUMBER}
-            INSTANTIATE_TESTS=1
         PUBLIC ${ARGV}
             DATA_PATH=\"${DATA_PATH}\"
             MODELS_PATH=\"${MODELS_PATH}\")
index aff270b..09fe185 100644 (file)
@@ -47,9 +47,6 @@ add_executable(${TARGET_NAME}
                ${TEST_INCLUDE})
 
 target_compile_definitions(${TARGET_NAME}
-        PRIVATE
-        USE_MKL_DNN=ON
-        INSTANTIATE_TESTS=1
         PUBLIC ${ARGV}
         DATA_PATH=\"${DATA_PATH}\"
         MODELS_PATH=\"${MODELS_PATH}\" PARENT_SCOPE)
index c593a44..d360fd3 100644 (file)
@@ -101,10 +101,9 @@ getTestCaseName(testing::TestParamInfo<std::tuple<InitialShapes, NewShapes, Plug
     return "MKLDNN" + helper->getType();
 }
 
-#if (defined INSTANTIATE_TESTS)
-
 INSTANTIATE_TEST_CASE_P(
-        Conv_smoke, CommonSingleLayerTest,
+        // TODO: rewrite to ngraph to have reshape functionality
+        DISABLED_Conv_smoke, CommonSingleLayerTest,
         ::testing::Combine(
         ::testing::Values(InitialShapes({
                                                 {{1, 2, 16, 16}},           // input
@@ -120,7 +119,8 @@ INSTANTIATE_TEST_CASE_P(
 );
 
 INSTANTIATE_TEST_CASE_P(
-        Deconv_smoke, CommonSingleLayerTest,
+        // TODO: rewrite to ngraph to have reshape functionality
+        DISABLED_Deconv_smoke, CommonSingleLayerTest,
         ::testing::Combine(
         ::testing::Values(InitialShapes({
                                                 {{1, 2, 8,  8}},             // input
@@ -136,7 +136,8 @@ INSTANTIATE_TEST_CASE_P(
 );
 
 INSTANTIATE_TEST_CASE_P(
-        Pool_smoke, CommonSingleLayerTest,
+        // TODO: rewrite to ngraph to have reshape functionality
+        DISABLED_Pool_smoke, CommonSingleLayerTest,
         ::testing::Combine(
         ::testing::Values(InitialShapes({
                                                 {{1, 2, 16, 16}},           // input
@@ -230,5 +231,3 @@ INSTANTIATE_TEST_CASE_P(
                 ::testing::Values(Helper(std::make_shared<DeformableConvolutionTestHelper>(defConvParamsHeavy, 4)))
         ), getTestCaseName
 );
-
-#endif
index 811b789..3573968 100644 (file)
@@ -378,7 +378,8 @@ protected:
 // NOTE: always auto_pad = same_upper. IR with zero_pads, pad from params is used for ref_conv after reshape
 #define case_si_1 conv_base_params({{1lu, 144lu, 75lu, 75lu}, {3lu, 3lu}, {2lu, 2lu}, {1lu, 1lu}, {0lu, 0lu}, {1lu, 1lu}, 144lu, 144lu, {1lu, 1lu}})
 
-TEST_P(smoke_ConvolutionReshapeTest, TestsReshapeConvolution) {
+// TODO: rewrite to ngraph to have reshape functionality
+TEST_P(smoke_ConvolutionReshapeTest, DISABLED_TestsReshapeConvolution) {
 }
 
 std::string getTestCaseName(testing::TestParamInfo<conv_test_params> obj) {
index dd4bfd2..92464f1 100644 (file)
@@ -168,7 +168,7 @@ const auto workload = Combine(
         Values(2),                  // Batch
         Values(3),                  // Sequence size
         Values(0, 1),               // Axis of sequence
-        Values(RESH_NO, RESH_B)     // Reshape mode for batch, sequence or both
+        Values(RESH_NO /*, RESH_B TODO: migrate to ngraph reshape */)  // Reshape mode for batch, sequence or both
 );
 #endif
 
@@ -181,5 +181,5 @@ const auto dyn_seq_workload = Combine(
         Values(1, 8),               // Batch
         Values(3, 100),             // Sequence size
         Values(0, 1),               // Axis of sequence
-        Values(RESH_NO, RESH_B)     // Reshape mode for batch, sequence or both
+        Values(RESH_NO /*, RESH_B TODO: migrate to ngraph reshape */)     // Reshape mode for batch, sequence or both
 );
index e8fe9b1..b85dd5d 100644 (file)
@@ -22,7 +22,6 @@ addIeTarget(
         LINK_LIBRARIES
         ieTestHelpers
         DEFINES
-        INSTANTIATE_TESTS=1
         PUBLIC
         DATA_PATH=\"${DATA_PATH}\"
         MODELS_PATH=\"${MODELS_PATH}\"
@@ -43,8 +42,6 @@ addIeTarget(
         IESharedTests
         vpu_graph_transformer
         vpu_custom_kernels
-    DEFINES
-        INSTANTIATE_TESTS=1
     EXPORT_DEPENDENCIES
         vpu_custom_kernels
 )
index 36c110b..01d2b31 100644 (file)
@@ -140,7 +140,7 @@ static  std::vector<int32_t> s_dimension = {
 };
 
 static  std::vector<int32_t> s_batch = {
-    1, 8
+    1 /*, 8 TODO: rewrite to ngraph to have reshape functionality */
 };
 
 static std::vector<InferenceEngine::SizeVector> s_concatCores = {
index 239b933..fdadfbf 100644 (file)
@@ -110,16 +110,18 @@ TEST_P(myriadLayersTestsConvertWithFP16_smoke, TestsConvertWithFP16)
 }
 
 std::vector<SizeVector> inputsDims = {
-    {       224, 224 },
-    {    3, 224, 224 },
+    // TODO: rewrite to ngraph to have reshape functionality
+    // {       224, 224 },
+    // {    3, 224, 224 },
     { 1, 1, 224, 224 },
     { 1, 1, 416, 416 },
     { 1, 1,  62,  62 },
     { 1, 1, 227, 227 },
     { 1, 3, 224, 224 },
 
+    // TODO: rewrite to ngraph to have reshape functionality
     // 5D case
-    { 2, 2, 3, 224, 224 },
+    // { 2, 2, 3, 224, 224 },
 };
 
 std::vector<SizeVector> inputsDims4D = {
index f4cfb44..e5816b5 100644 (file)
@@ -668,7 +668,8 @@ INSTANTIATE_TEST_CASE_P(accuracy_small_input_3, myriadLayerConvolution_smoke,
            )
  );
 
-TEST_F(myriadLayersTests_nightly, tests125) {
+// TODO: rewrite to ngraph to have reshape functionality
+TEST_F(myriadLayersTests_nightly, DISABLED_tests125) {
     std::string outName1 = "SecondStageFeatureExtractor/InceptionV2/Mixed_5a/Branch_1/Conv2d_0b_3x3/Conv2D";
     std::string outName2 = "SecondStageFeatureExtractor/InceptionV2/Mixed_5a/Branch_0/Conv2d_1a_3x3/Relu";
     InferenceEngine::TBlob<uint8_t>::Ptr weights(GenWeights(1697280 / sizeof(ie_fp16)));
index c8ea03e..03f33d8 100644 (file)
@@ -41,6 +41,7 @@ static std::vector<Dims> s_expParams = {
     {{1, 1, 224, 235}},
     {{1, 2, 224, 235}},
     {{1, 3, 224, 235}},
-    {{10, 17191, 1, 1}},
+    // TODO: rewrite to ngraph to have reshape functionality
+    // {{10, 17191, 1, 1}},
     {{1, 1, 10, 17191}}
 };
index 8b52f4c..479ee3a 100644 (file)
@@ -57,6 +57,7 @@ static std::vector<Dims> s_FloorParams = {
         {{1, 1, 224, 235}},
         {{1, 2, 224, 235}},
         {{1, 3, 224, 235}},
-        {{10, 17191, 1, 1}},
+        // TODO: rewrite to ngraph to have reshape functionality
+        // {{10, 17191, 1, 1}},
         {{1, 1, 10, 17191}}
 };
index 7a6abd7..1c0801b 100644 (file)
@@ -44,6 +44,7 @@ static std::vector<Dims> s_logParams = {
     {{1, 1, 224, 235}},
     {{1, 2, 224, 235}},
     {{1, 3, 224, 235}},
-    {{10, 17191, 1, 1}},
+    // TODO: rewrite to ngraph to have reshape functionality
+    // {{10, 17191, 1, 1}},
     {{1, 1, 10, 17191}}
 };
index 3ab61df..f00894a 100644 (file)
@@ -22,7 +22,8 @@ INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsSqueezeTC2_smoke,
     )
 );
 
-INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsSqueezeTC3_smoke,
+// TODO: rewrite to ngraph to have reshape functionality
+INSTANTIATE_TEST_CASE_P(DISABLED_accuracy, myriadLayersTestsSqueezeTC3_smoke,
     ::testing::Combine(
         ::testing::ValuesIn(s_squeezeTensorsTC3),
         ::testing::ValuesIn(s_squeezeIndicesTC3),
@@ -31,7 +32,8 @@ INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsSqueezeTC3_smoke,
     )
 );
 
-INSTANTIATE_TEST_CASE_P(accuracy, myriadLayersTestsSqueezeTC4_smoke,
+// TODO: rewrite to ngraph to have reshape functionality
+INSTANTIATE_TEST_CASE_P(DISABLED_accuracy, myriadLayersTestsSqueezeTC4_smoke,
         ::testing::Combine(
         ::testing::ValuesIn(s_squeezeTensorsTC4),
         ::testing::ValuesIn(s_squeezeIndicesTC4),
index 71604b1..e28e473 100644 (file)
@@ -180,7 +180,13 @@ R"V0G0N(
 }
 
 static std::vector<InferenceEngine::SizeVector> s_squeezeTensors = {
-        {{3}, {1}, {1, 3}, {3, 1}}
+        {
+            // TODO: rewrite to ngraph to have reshape functionality
+            // {3, 1},
+            // {3},
+            {1},
+            {1, 3},
+        }
 };
 
 static std::vector<IndicesVector> s_squeezeIndices = {
index 164f6b6..553dc2e 100644 (file)
@@ -44,10 +44,9 @@ getTestCaseName(testing::TestParamInfo<std::tuple<InitialShapes, NewShapes, Plug
     return device2FilterName[pluginParams.deviceName] + helper->getType();
 }
 
-#if (defined INSTANTIATE_TESTS)
-
 INSTANTIATE_TEST_CASE_P(
-        Conv_nightly, CommonSingleLayerTest,
+        // TODO: rewrite to ngraph to have reshape functionality
+        DISABLED_Conv_nightly, CommonSingleLayerTest,
         ::testing::Combine(
         ::testing::Values(InitialShapes({
                                                 {{1, 2, 16, 16}},           // input
@@ -63,7 +62,8 @@ INSTANTIATE_TEST_CASE_P(
 );
 
 INSTANTIATE_TEST_CASE_P(
-        Deconv_nightly, CommonSingleLayerTest,
+        // TODO: rewrite to ngraph to have reshape functionality
+        DISABLED_Deconv_nightly, CommonSingleLayerTest,
         ::testing::Combine(
         ::testing::Values(InitialShapes({
                                                 {{1, 2, 8,  8}},             // input
@@ -79,7 +79,8 @@ INSTANTIATE_TEST_CASE_P(
 );
 
 INSTANTIATE_TEST_CASE_P(
-        Pool_nightly, CommonSingleLayerTest,
+        // TODO: rewrite to ngraph to have reshape functionality
+        DISABLED_Pool_nightly, CommonSingleLayerTest,
         ::testing::Combine(
         ::testing::Values(InitialShapes({
                                                 {{1, 2, 16, 16}},           // input
@@ -93,5 +94,3 @@ INSTANTIATE_TEST_CASE_P(
         ::testing::Values(Helper(std::make_shared<PoolingTestHelper>(poolParams)))
 ), getTestCaseName
 );
-
-#endif
index 46ddf97..4f59e63 100644 (file)
@@ -5,4 +5,5 @@
 #include "dims_tests.hpp"
 #include "vpu_tests_config.hpp"
 
-VPU_PLUGING_CASE_WITH_SUFFIX(_nightly, IO_BlobTest, params_myriad);
+// TODO: rewrite to ngraph to have reshape functionality
+// VPU_PLUGING_CASE_WITH_SUFFIX(_nightly, IO_BlobTest, params_myriad);
index acc9e21..d3af081 100644 (file)
@@ -13,4 +13,5 @@ static auto params_myriad = ::testing::Combine(
         ::testing::Values(Precision::FP32, Precision::U8)  // TODO: What about U16/I8/FP16?
 );
 
-VPU_PLUGING_CASE_WITH_SUFFIX(_nightly, LayoutTTTest, params_myriad);
+// TODO: rewrite to ngraph to have reshape functionality
+// VPU_PLUGING_CASE_WITH_SUFFIX(_nightly, LayoutTTTest, params_myriad);
index a9c1db7..c1da6fc 100644 (file)
@@ -386,30 +386,31 @@ INSTANTIATE_TEST_CASE_P(
         ::testing::Values(
                 activation_test_params{eltwise_relu, 0.0f, 0.0f, {2, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit},
                 activation_test_params{eltwise_relu, 0.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit},
-                activation_test_params{eltwise_relu, 0.5f, 0.5f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit},
                 activation_test_params{eltwise_relu, 0.5f, 0.5f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit},
-                activation_test_params{eltwise_elu, 0.5f, 0.5f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit},
                 activation_test_params{eltwise_elu, 0.5f, 0.5f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit},
-                activation_test_params{eltwise_elu, 1.0f, 1.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit},
                 activation_test_params{eltwise_elu, 1.0f, 1.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit},
-                activation_test_params{eltwise_logistic, 0.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit},
                 activation_test_params{eltwise_logistic, 0.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit},
-                activation_test_params{eltwise_bounded_relu, 6.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit},
                 activation_test_params{eltwise_bounded_relu, 6.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit},
-                activation_test_params{eltwise_bounded_relu, 0.1f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit},
                 activation_test_params{eltwise_bounded_relu, 0.1f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::jit},
-                activation_test_params{eltwise_relu, 0.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
                 activation_test_params{eltwise_relu, 0.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                activation_test_params{eltwise_relu, 0.5f, 0.5f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
                 activation_test_params{eltwise_relu, 0.5f, 0.5f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                activation_test_params{eltwise_elu, 0.5f, 0.5f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
                 activation_test_params{eltwise_elu, 0.5f, 0.5f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                activation_test_params{eltwise_elu, 1.0f, 1.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
                 activation_test_params{eltwise_elu, 1.0f, 1.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                activation_test_params{eltwise_logistic, 0.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
                 activation_test_params{eltwise_logistic, 0.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                activation_test_params{eltwise_bounded_relu, 6.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
                 activation_test_params{eltwise_bounded_relu, 6.0f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                activation_test_params{eltwise_bounded_relu, 0.1f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
                 activation_test_params{eltwise_bounded_relu, 0.1f, 0.0f, {4, 3, 228, 228}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}
+                // TODO: rewrite to ngraph to have reshape functionality
+                // activation_test_params{eltwise_relu, 0.5f, 0.5f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit},
+                // activation_test_params{eltwise_elu, 0.5f, 0.5f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit},
+                // activation_test_params{eltwise_elu, 1.0f, 1.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit},
+                // activation_test_params{eltwise_logistic, 0.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit},
+                // activation_test_params{eltwise_bounded_relu, 6.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit},
+                // activation_test_params{eltwise_bounded_relu, 0.1f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::jit},
+                // activation_test_params{eltwise_relu, 0.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+                // activation_test_params{eltwise_relu, 0.5f, 0.5f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+                // activation_test_params{eltwise_elu, 0.5f, 0.5f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+                // activation_test_params{eltwise_elu, 1.0f, 1.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+                // activation_test_params{eltwise_logistic, 0.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+                // activation_test_params{eltwise_bounded_relu, 6.0f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+                // activation_test_params{eltwise_bounded_relu, 0.1f, 0.0f, {1, 32, 128, 256}, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
         ));
index 8f409cd..0fad0a1 100644 (file)
@@ -337,7 +337,8 @@ TEST_P(MKLDNNGraphDynBatchBatchNormScaleShiftTests, TestsDynBatchBatchNormWithSc
 INSTANTIATE_TEST_CASE_P(
         TestsDynBatchBatchNormWithScaleShift, MKLDNNGraphDynBatchBatchNormScaleShiftTests,
         ::testing::Values(
-                batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::jit},
+                // TODO: rewrite to ngraph to have reshape functionality
+                // batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::jit},
+                // batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
                 batchnorm_scaleshift_test_params{{4, 3, 227, 227}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::jit},
-                batchnorm_scaleshift_test_params{{1, 32, 128, 256}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
                 batchnorm_scaleshift_test_params{{4, 3, 227, 227}, 1e-6, 2, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}));
index 750f952..cbc0b75 100644 (file)
@@ -305,7 +305,8 @@ TEST_P(MKLDNNGraphDynBatchBatchNormTests, TestsDynBatchBatchNorm) {}
 INSTANTIATE_TEST_CASE_P(
         TestsDynBatchBatchNorm, MKLDNNGraphDynBatchBatchNormTests,
         ::testing::Values(
-                batchnorm4D_test_params{{1, 32, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::jit},
+                // TODO: rewrite to ngraph to have reshape functionality
+                // batchnorm4D_test_params{{1, 32, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::jit},
+                // batchnorm4D_test_params{{1, 32, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
                 batchnorm4D_test_params{{3, 3, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::jit},
-                batchnorm4D_test_params{{1, 32, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
                 batchnorm4D_test_params{{3, 3, 128, 256}, 1e-6, 5, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}));
index 663cfc4..9ca74de 100644 (file)
@@ -382,8 +382,9 @@ protected:
 TEST_P(MKLDNNGraphDynBatchConcatTests, TestsDynBatchConcat) {}
 
 
+// TODO: rewrite to ngraph to have reshape functionality
 INSTANTIATE_TEST_CASE_P(
-        TestsDynBatchConcat, MKLDNNGraphDynBatchConcatTests,
+        DISABLED_TestsDynBatchConcat, MKLDNNGraphDynBatchConcatTests,
         ::testing::Values(
                 concat_test_params {
                         {1, 7, 2, 5},
@@ -396,11 +397,6 @@ INSTANTIATE_TEST_CASE_P(
                         1, 2, MKLDNNPlugin::impl_desc_type::unknown
                 },
                 concat_test_params {
-                        {3, 7, 2, 5},
-                        {3, 13, 2, 5},
-                        1, 2, MKLDNNPlugin::impl_desc_type::unknown
-                },
-                concat_test_params {
                         {1, 7, 2, 13},
                         {1, 7, 2, 17},
                         3, 1, MKLDNNPlugin::impl_desc_type::ref
@@ -411,6 +407,11 @@ INSTANTIATE_TEST_CASE_P(
                         1, 4, MKLDNNPlugin::impl_desc_type::unknown
                 },
                 concat_test_params {
+                        {3, 7, 2, 5},
+                        {3, 13, 2, 5},
+                        1, 2, MKLDNNPlugin::impl_desc_type::unknown
+                },
+                concat_test_params {
                         {2, 2, 3, 3},
                         {2, 3, 3, 3},
                         1, 2, MKLDNNPlugin::impl_desc_type::unknown
@@ -1024,106 +1025,3 @@ protected:
 };
 
 TEST_F(MKLDNNGraphTwoInputInConcatTests, TestSecondInputToConcat) {}
-
-class MKLDNNGraphIncorrectConcatTests: public TestsCommon,
-                              public WithParamInterface<concat_test_params> {
-    std::string model_t = R"V0G0N(
-<net name="ConcatOnly" version="2" precision="FP32" batch="1">
-    <layers>
-        <layer name="in1" type="Input" precision="FP32" id="1">
-            <output>
-                <port id="1">__SRC_DIMS_1__
-                </port>
-            </output>
-        </layer>
-        <layer name="in2" type="Input" precision="FP32" id="2">
-            <output>
-                <port id="2">__SRC_DIMS_2__
-                </port>
-            </output>
-        </layer>
-        <layer name="con" id="3" type="Concat" precision="FP32">
-            <concat_data axis="_AXIS_"/>
-            <input>
-                <port id="1">__SRC_DIMS_1__
-                </port>
-                <port id="2">__SRC_DIMS_2__
-                </port>
-            </input>
-            <output>
-                <port id="3">__DST_DIMS__
-                </port>
-            </output>
-        </layer>
-    </layers>
-    <edges>
-        <edge from-layer="1" from-port="1" to-layer="3" to-port="1"/>
-        <edge from-layer="2" from-port="2" to-layer="3" to-port="2"/>
-    </edges>
-</net>
-)V0G0N";
-
-    std::string getModel(concat_test_params p) {
-        std::string model = model_t;
-        std::string s_dims;
-        for (auto& dim : p.in1) {
-            s_dims += "\n                    <dim>";
-            s_dims += std::to_string(dim) + "</dim>";
-        }
-       REPLACE_WITH_STR(model, "__SRC_DIMS_1__", s_dims);
-
-        s_dims = "";
-        for (auto& dim : p.in2) {
-            s_dims += "\n                    <dim>";
-            s_dims += std::to_string(dim) + "</dim>";
-        }
-       REPLACE_WITH_STR(model, "__SRC_DIMS_2__", s_dims);
-
-        s_dims = "";
-        for (size_t i = 0; i < p.in1.size(); i++) {
-            size_t dim = p.axis == i ? p.in1[i] + p.in2[i] : p.in1[i];
-            s_dims += "\n                    <dim>";
-            s_dims += std::to_string(dim) + "</dim>";
-        }
-       REPLACE_WITH_STR(model, "__DST_DIMS__", s_dims);
-
-        REPLACE_WITH_NUM(model, "_AXIS_", p.axis);
-        return model;
-    }
-
-protected:
-    virtual void TearDown() {
-    }
-
-    virtual void SetUp() {
-        try {
-            TestsCommon::SetUp();
-            concat_test_params p = ::testing::WithParamInterface<concat_test_params>::GetParam();
-            std::string model = getModel(p);
-
-            InferenceEngine::Core core;
-            // TODO: check InferenceEngine::details::InferenceEngineException when RTTI issue will be resolved
-            ASSERT_THROW(core.ReadNetwork(model, InferenceEngine::Blob::CPtr()),
-                         std::exception);
-        } catch (const InferenceEngine::details::InferenceEngineException &e) {
-            FAIL() << e.what();
-        }
-    }
-};
-
-TEST_P(MKLDNNGraphIncorrectConcatTests, TestsIncorrectConcat) {}
-
-
-INSTANTIATE_TEST_CASE_P(
-        TestsIncorrectConcat, MKLDNNGraphIncorrectConcatTests,
-        ::testing::Values(
-                concat_test_params {
-                        {1, 7, 2, 5},
-                        {1, 7, 3, 5},
-                        1
-                },
-                concat_test_params {
-                        {1, 7, 2, 5},
-                        {1, 7, 4, 4},
-                        2
-                }));
index 7c8b600..65c4d3b 100644 (file)
@@ -495,8 +495,9 @@ protected:
 
 TEST_P(MKLDNNGraphDynBatchConvolutionTests, TestsDynBatchConvolution) {}
 
+// TODO: rewrite to ngraph to have reshape functionality
 INSTANTIATE_TEST_CASE_P(
-    TestDynBatchConvolution, MKLDNNGraphDynBatchConvolutionTests,
+    DISABLED_TestDynBatchConvolution, MKLDNNGraphDynBatchConvolutionTests,
     ::testing::Values(
                 conv_test_params{{1, 8, 16, 32},
                                  {1, 1}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "same_upper", 7, MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_1x1,
@@ -519,6 +520,7 @@ INSTANTIATE_TEST_CASE_P(
                 conv_test_params{{1, 9, 32, 16},
                                  {2, 4}, {1, 1}, {0, 0}, {0, 0}, 17, 1, "", 5, MKLDNNPlugin::impl_desc_type::ref_any,
                                  false, {MKLDNNPlugin::impl_desc_type::ref_any} } ));
+
 #ifdef USE_MKL
 INSTANTIATE_TEST_CASE_P(
     MKLTestDynBatchConvolution, MKLDNNGraphDynBatchConvolutionTests,
index 738f006..b1c4fba 100644 (file)
@@ -537,7 +537,8 @@ TEST_P(MKLDNNGraphDynBatchDeconvolutionalTests, TestsDynBatchDeconvolutional) {}
 INSTANTIATE_TEST_CASE_P(
         TestsDynBatchDeconvolutional, MKLDNNGraphDynBatchDeconvolutionalTests,
         ::testing::Values(
-                deconv_test_params{{1, 3, 3, 3}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, false, "", 5, {MKLDNNPlugin::impl_desc_type::jit} },
+                // TODO: rewrite to ngraph to have reshape functionality
+                // deconv_test_params{{1, 3, 3, 3}, {3, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, false, "", 5, {MKLDNNPlugin::impl_desc_type::jit} },
                 deconv_test_params{{3, 3, 3, 3}, {4, 3}, {1, 1}, {0, 0}, {0, 0}, 2, 1, false, "", 5, {MKLDNNPlugin::impl_desc_type::jit} },
                 deconv_test_params{ {2, 8, 5, 5}, {4, 4}, {2, 2}, {1, 1}, {0, 0}, 8, 8, false, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw} },
                 deconv_test_params{ {2, 8, 5, 5}, {8, 8}, {4, 4}, {1, 1}, {0, 0}, 8, 8, false, "", 4, {MKLDNNPlugin::impl_desc_type::jit | MKLDNNPlugin::impl_desc_type::_dw} },
index 6b3ecc3..5572d82 100644 (file)
@@ -425,28 +425,29 @@ TEST_P(MKLDNNGraphDynBatchDepthwiseTests, TestsDynBatchDepthwise) {}
 INSTANTIATE_TEST_CASE_P(
         TestsDynBatchDepthwise, MKLDNNGraphDynBatchDepthwiseTests,
         ::testing::Values(
-                depthwise_test_params{depthwise_scale_shift, {1, 32, 128, 256}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
                 depthwise_test_params{depthwise_scale_shift, {4, 3, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
-                depthwise_test_params{depthwise_scale_shift, {1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
-                depthwise_test_params{depthwise_scale_shift, {1, 4, 5, 5}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
                 depthwise_test_params{depthwise_scale_shift, {4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::jit},
-                depthwise_test_params{depthwise_scale_shift, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::jit},
-                depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, false,3, MKLDNNPlugin::impl_desc_type::jit},
                 depthwise_test_params{depthwise_prelu, {4, 3, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
-                depthwise_test_params{depthwise_prelu, {1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
-                depthwise_test_params{depthwise_prelu, {1, 4, 5, 5}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
                 depthwise_test_params{depthwise_prelu, {4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::jit},
-                depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::jit},
-                depthwise_test_params{depthwise_scale_shift, {1, 32, 128, 256}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
                 depthwise_test_params{depthwise_scale_shift, {4, 3, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                depthwise_test_params{depthwise_scale_shift, {1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                depthwise_test_params{depthwise_scale_shift, {1, 4, 5, 5}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
                 depthwise_test_params{depthwise_scale_shift, {4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                depthwise_test_params{depthwise_scale_shift, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, false,3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
                 depthwise_test_params{depthwise_prelu, {4, 3, 228, 228}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                depthwise_test_params{depthwise_prelu, {1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                depthwise_test_params{depthwise_prelu, {1, 4, 5, 5}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                depthwise_test_params{depthwise_prelu, {4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}
+                depthwise_test_params{depthwise_prelu, {4, 4, 10, 10}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}
+                // TODO: rewrite to ngraph to have reshape functionality
+                // depthwise_test_params{depthwise_scale_shift, {1, 32, 128, 256}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
+                // depthwise_test_params{depthwise_scale_shift, {1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
+                // depthwise_test_params{depthwise_scale_shift, {1, 4, 5, 5}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
+                // depthwise_test_params{depthwise_scale_shift, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::jit},
+                // depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, false,3, MKLDNNPlugin::impl_desc_type::jit},
+                // depthwise_test_params{depthwise_prelu, {1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
+                // depthwise_test_params{depthwise_prelu, {1, 4, 5, 5}, false, 3, MKLDNNPlugin::impl_desc_type::jit},
+                // depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::jit},
+                // depthwise_test_params{depthwise_scale_shift, {1, 32, 128, 256}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+                // depthwise_test_params{depthwise_scale_shift, {1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+                // depthwise_test_params{depthwise_scale_shift, {1, 4, 5, 5}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+                // depthwise_test_params{depthwise_scale_shift, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+                // depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, false,3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+                // depthwise_test_params{depthwise_prelu, {1, 1, 1, 1}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+                // depthwise_test_params{depthwise_prelu, {1, 4, 5, 5}, false, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+                // depthwise_test_params{depthwise_prelu, {1, 32, 128, 256}, true, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}}
         ));
index a176c74..e17f2ab 100644 (file)
@@ -892,8 +892,9 @@ protected:
 
 TEST_P(MKLDNNGraphEltwiseDynBatchTests, TestsDynBatchEltwise) {}
 
+// TODO: rewrite to ngraph to have reshape functionality
 INSTANTIATE_TEST_CASE_P(
-        TestsDynBatchEltwise, MKLDNNGraphEltwiseDynBatchTests,
+        DISABLED_TestsDynBatchEltwise, MKLDNNGraphEltwiseDynBatchTests,
         ::testing::Values(
                 eltwise_test_params{{1, 3, 3, 3},{1, 3, 3, 3},{1, 3, 3, 3}, eltwise_test_params::opType::Sum, "", 3, MKLDNNPlugin::impl_desc_type::ref},
                 eltwise_test_params{{1, 3, 3, 3},{1, 3, 3, 3},{1, 3, 3, 3}, eltwise_test_params::opType::Sum, "1.0,1.0,1.0", 3, MKLDNNPlugin::impl_desc_type::ref},
index b940a26..c7dc6f9 100644 (file)
@@ -325,8 +325,9 @@ class MKLDNNGraphDynBatchFullyConnectedTests: public MKLDNNGraphFullyConnectedTe
 
 TEST_P(MKLDNNGraphDynBatchFullyConnectedTests, TestsDynBatchFullyConnected) {}
 
+// TODO: rewrite to ngraph to have reshape functionality
 INSTANTIATE_TEST_CASE_P(
-        TestsDynBatchFullyConnected, MKLDNNGraphDynBatchFullyConnectedTests,
+        DISABLED_TestsDynBatchFullyConnected, MKLDNNGraphDynBatchFullyConnectedTests,
         ::testing::Values(
                 fc_test_params{{1, 3, 227, 227}, 96, 6, MKLDNNPlugin::impl_desc_type::gemm },
                 fc_test_params{{1, 4, 227, 227}, 8, 6, MKLDNNPlugin::impl_desc_type::gemm },
index de920b2..1f7eb5e 100644 (file)
@@ -468,8 +468,9 @@ protected:
 
 TEST_P(MKLDNNGraphDynBatchGemmTests, TestsDynBatchGemm) {}
 
+// TODO: rewrite to ngraph to have reshape functionality
 INSTANTIATE_TEST_CASE_P(
-        TestsDynBatchGemm, MKLDNNGraphDynBatchGemmTests,
+        DISABLED_TestsDynBatchGemm, MKLDNNGraphDynBatchGemmTests,
         ::testing::Values(
                 gemm_test_params{{1, 3, 1, 3, 1, 3, 1, 3}, 3, 3, 3, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
                 gemm_test_params{{1, 3, 1, 1, 1, 3, 1, 3}, 16, 15, 12, 1, 1, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any}
@@ -643,8 +644,9 @@ protected:
 
 TEST_P(MKLDNNGraphSingleBatchDimGemmTests, TestsGemm) {}
 
+// TODO: rewrite to ngraph to have reshape functionality
 INSTANTIATE_TEST_CASE_P(
-        TestsGemm, MKLDNNGraphSingleBatchDimGemmTests,
+        DISABLED_TestsGemm, MKLDNNGraphSingleBatchDimGemmTests,
         ::testing::Values(
                 gemm_test_params{{1, 1, 1, 1, 1, 1, 1, 1}, 7, 4, 3, 2, 3, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
                 gemm_test_params{{1, 3, 1, 3, 1, 1, 1, 3}, 7, 4, 3, 2, 3, false, false, 1, MKLDNNPlugin::impl_desc_type::gemm_any},
index 3ecbb67..d880b74 100644 (file)
@@ -293,8 +293,9 @@ protected:
 
 TEST_P(MKLDNNGraphDynBatchLrnTests, TestsDynBatchLrn) {}
 
+// TODO: rewrite to ngraph to have reshape functionality
 INSTANTIATE_TEST_CASE_P(
-        TestsDynBatchLrn, MKLDNNGraphDynBatchLrnTests,
+        DISABLED_TestsDynBatchLrn, MKLDNNGraphDynBatchLrnTests,
         ::testing::Values(
                 lrn_test_params{{1, 3, 228, 228}, 5, 0.0001f, 0.75f, 1, 3, MKLDNNPlugin::impl_desc_type::ref_any},
                 lrn_test_params{{1, 16, 228, 228}, 5, 0.0001f, 0.75f, 1, 3, MKLDNNPlugin::impl_desc_type::jit}));
index 33648a9..f903422 100644 (file)
@@ -494,8 +494,9 @@ protected:
 
 TEST_P(MKLDNNGraphDynBatchPoolingTests, TestsDynBatchPooling) {}
 
+// TODO: rewrite to ngraph to have reshape functionality
 INSTANTIATE_TEST_CASE_P(
-        TestsDynBatchPooling, MKLDNNGraphDynBatchPoolingTests,
+        DISABLED_TestsDynBatchPooling, MKLDNNGraphDynBatchPoolingTests,
         ::testing::Values(
                 pooling_test_params{{1, 3, 228, 228}, {4, 2}, {2, 1}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 4, MKLDNNPlugin::impl_desc_type::jit},
                 pooling_test_params{{1, 3, 228, 228}, {2, 2}, {2, 2}, {0, 0}, {0, 0}, PoolingLayer::MAX, false, 6, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
index 7c1a92b..cf0650b 100644 (file)
@@ -318,8 +318,9 @@ protected:
 
 TEST_P(MKLDNNGraphDynBatchPowerTests, TestsDynBatchPower) {}
 
+// TODO: rewrite to ngraph to have reshape functionality
 INSTANTIATE_TEST_CASE_P(
-        TestsDynBatchPower, MKLDNNGraphDynBatchPowerTests,
+        DISABLED_TestsDynBatchPower, MKLDNNGraphDynBatchPowerTests,
         ::testing::Values(
                 power_test_params{
                         {1, 3, 13, 13}, 1, 2, 0.5f, 3, MKLDNNPlugin::impl_desc_type::unknown, {
index 3d80459..47b2cec 100644 (file)
@@ -396,19 +396,20 @@ TEST_P(MKLDNNGraphDynBatchSoftMaxTests, TestsDynBatchSoftMax) {}
 INSTANTIATE_TEST_CASE_P(
         TestsDynBatchSoftMax, MKLDNNGraphDynBatchSoftMaxTests,
         ::testing::Values(
-                softmax_test_params{{1, 3, 228, 228}, 1, 2, MKLDNNPlugin::impl_desc_type::jit},
-                softmax_test_params{{1, 3, 228, 228}, 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                softmax_test_params{{1, 100, 6, 1}, 1, 2, MKLDNNPlugin::impl_desc_type::jit},
-                softmax_test_params{{1, 100, 6, 1}, 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                softmax_test_params{{1, 1000, 1, 1}, 1, 1, MKLDNNPlugin::impl_desc_type::ref},
+                // TODO: rewrite to ngraph to have reshape functionality
+                // softmax_test_params{{1, 3, 228, 228}, 1, 2, MKLDNNPlugin::impl_desc_type::jit},
+                // softmax_test_params{{1, 3, 228, 228}, 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+                // softmax_test_params{{1, 100, 6, 1}, 1, 2, MKLDNNPlugin::impl_desc_type::jit},
+                // softmax_test_params{{1, 100, 6, 1}, 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+                // softmax_test_params{{1, 1000, 1, 1}, 1, 1, MKLDNNPlugin::impl_desc_type::ref},
+                // softmax_test_params{{1, 19, 128, 128}, 1, 2, MKLDNNPlugin::impl_desc_type::jit},
+                // softmax_test_params{{1, 19, 128, 128}, 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
+                // softmax_test_params{{1, 1, 1, 1}, 3, 1, MKLDNNPlugin::impl_desc_type::ref},
+                // softmax_test_params{{1, 1, 1, 33}, 3, 1, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
                 softmax_test_params{{8, 1000, 1, 1}, 1, 1, MKLDNNPlugin::impl_desc_type::ref},
-                softmax_test_params{{1, 19, 128, 128}, 1, 2, MKLDNNPlugin::impl_desc_type::jit},
-                softmax_test_params{{1, 19, 128, 128}, 1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
 //                softmax_test_params{{8, 100, 81, 1}, 2, 2, MKLDNNPlugin::impl_desc_type::jit},
                 softmax_test_params{{8, 100, 81, 1}, 2, 1, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
-                softmax_test_params{{1, 1, 1, 1}, 3, 1, MKLDNNPlugin::impl_desc_type::ref},
 //                softmax_test_params{{1, 1, 1, 33}, 3, 2, MKLDNNPlugin::impl_desc_type::jit},
-                softmax_test_params{{1, 1, 1, 33}, 3, 1, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
 //                softmax_test_params{{8, 1, 10, 81}, 3, 2, MKLDNNPlugin::impl_desc_type::jit},
                 softmax_test_params{{8, 1, 10, 81}, 3, 1, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref_any}},
                 softmax_test_params{{2, 5, 9, 10, 11}, 1, 2, MKLDNNPlugin::impl_desc_type::jit},
index 237a70d..0d8062c 100644 (file)
@@ -467,80 +467,83 @@ TEST_P(MKLDNNGraphDynBatchSplitTests, TestsDynBatchSplit) {}
 INSTANTIATE_TEST_CASE_P(
         TestsDynBatchSplit, MKLDNNGraphDynBatchSplitTests,
         ::testing::Values(
-                split_test_params {
-                        {1, 24, 2, 5},
-                        {{1, 16, 2, 5}, {1, 8, 2, 5}},
-                        1, 3, MKLDNNPlugin::impl_desc_type::unknown, {}, {
-                                [](MKLDNNPlugin::PrimitiveDescInfo impl) {
-                                    ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType());
-                                    ASSERT_EQ(1, impl.getConfig().inConfs.size());
-                                    ASSERT_EQ(2, impl.getConfig().outConfs.size());
-                                    ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().inConfs.at(0).desc.getLayout());
-                                    ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(0).desc.getLayout());
-                                    ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(1).desc.getLayout());
-                                },
-                                [](MKLDNNPlugin::PrimitiveDescInfo impl) {
-                                    ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
-                                    ASSERT_EQ(1, impl.getConfig().inConfs.size());
-                                    ASSERT_EQ(2, impl.getConfig().outConfs.size());
-                                    ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
-                                    ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
-                                    ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(1).desc.getLayout());
-                                },
-                                [](MKLDNNPlugin::PrimitiveDescInfo impl) {
-                                    ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
-                                    ASSERT_EQ(1, impl.getConfig().inConfs.size());
-                                    ASSERT_EQ(2, impl.getConfig().outConfs.size());
-                                    ASSERT_EQ(InferenceEngine::Layout::BLOCKED, impl.getConfig().inConfs.at(0).desc.getLayout());
-                                    ASSERT_EQ(InferenceEngine::Layout::BLOCKED, impl.getConfig().outConfs.at(0).desc.getLayout());
-                                    ASSERT_EQ(InferenceEngine::Layout::BLOCKED, impl.getConfig().outConfs.at(1).desc.getLayout());
-                                }
-                        }
-                },
-                split_test_params {
-                        {1, 20, 2, 5},
-                        {{1, 13, 2, 5}, {1, 7, 2, 5}},
-                        1, 2, MKLDNNPlugin::impl_desc_type::unknown, {}, {
-                                [](MKLDNNPlugin::PrimitiveDescInfo impl) {
-                                    ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType());
-                                    ASSERT_EQ(1, impl.getConfig().inConfs.size());
-                                    ASSERT_EQ(2, impl.getConfig().outConfs.size());
-                                    ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().inConfs.at(0).desc.getLayout());
-                                    ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(0).desc.getLayout());
-                                    ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(1).desc.getLayout());
-                                },
-                                [](MKLDNNPlugin::PrimitiveDescInfo impl) {
-                                    ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
-                                    ASSERT_EQ(1, impl.getConfig().inConfs.size());
-                                    ASSERT_EQ(2, impl.getConfig().outConfs.size());
-                                    ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
-                                    ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
-                                    ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(1).desc.getLayout());
-                                }
-                        }
-                },
-                split_test_params {
-                        {1, 20, 2, 5},
-                        {{1, 10, 2, 5}, {1, 10, 2, 5}},
-                        1, 2, MKLDNNPlugin::impl_desc_type::unknown, {}, {
-                                [](MKLDNNPlugin::PrimitiveDescInfo impl) {
-                                    ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType());
-                                    ASSERT_EQ(1, impl.getConfig().inConfs.size());
-                                    ASSERT_EQ(2, impl.getConfig().outConfs.size());
-                                    ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().inConfs.at(0).desc.getLayout());
-                                    ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(0).desc.getLayout());
-                                    ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(1).desc.getLayout());
-                                },
-                                [](MKLDNNPlugin::PrimitiveDescInfo impl) {
-                                    ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
-                                    ASSERT_EQ(1, impl.getConfig().inConfs.size());
-                                    ASSERT_EQ(2, impl.getConfig().outConfs.size());
-                                    ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
-                                    ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
-                                    ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(1).desc.getLayout());
-                                }
-                        }
-                },
+                // TODO: rewrite to ngraph to have reshape functionality
+                // split_test_params {
+                //         {1, 24, 2, 5},
+                //         {{1, 16, 2, 5}, {1, 8, 2, 5}},
+                //         1, 3, MKLDNNPlugin::impl_desc_type::unknown, {}, {
+                //                 [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+                //                     ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType());
+                //                     ASSERT_EQ(1, impl.getConfig().inConfs.size());
+                //                     ASSERT_EQ(2, impl.getConfig().outConfs.size());
+                //                     ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().inConfs.at(0).desc.getLayout());
+                //                     ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(0).desc.getLayout());
+                //                     ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(1).desc.getLayout());
+                //                 },
+                //                 [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+                //                     ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
+                //                     ASSERT_EQ(1, impl.getConfig().inConfs.size());
+                //                     ASSERT_EQ(2, impl.getConfig().outConfs.size());
+                //                     ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
+                //                     ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
+                //                     ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(1).desc.getLayout());
+                //                 },
+                //                 [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+                //                     ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
+                //                     ASSERT_EQ(1, impl.getConfig().inConfs.size());
+                //                     ASSERT_EQ(2, impl.getConfig().outConfs.size());
+                //                     ASSERT_EQ(InferenceEngine::Layout::BLOCKED, impl.getConfig().inConfs.at(0).desc.getLayout());
+                //                     ASSERT_EQ(InferenceEngine::Layout::BLOCKED, impl.getConfig().outConfs.at(0).desc.getLayout());
+                //                     ASSERT_EQ(InferenceEngine::Layout::BLOCKED, impl.getConfig().outConfs.at(1).desc.getLayout());
+                //                 }
+                //         }
+                // },
+                // TODO: rewrite to ngraph to have reshape functionality
+                // split_test_params {
+                //         {1, 20, 2, 5},
+                //         {{1, 13, 2, 5}, {1, 7, 2, 5}},
+                //         1, 2, MKLDNNPlugin::impl_desc_type::unknown, {}, {
+                //                 [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+                //                     ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType());
+                //                     ASSERT_EQ(1, impl.getConfig().inConfs.size());
+                //                     ASSERT_EQ(2, impl.getConfig().outConfs.size());
+                //                     ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().inConfs.at(0).desc.getLayout());
+                //                     ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(0).desc.getLayout());
+                //                     ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(1).desc.getLayout());
+                //                 },
+                //                 [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+                //                     ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
+                //                     ASSERT_EQ(1, impl.getConfig().inConfs.size());
+                //                     ASSERT_EQ(2, impl.getConfig().outConfs.size());
+                //                     ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
+                //                     ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
+                //                     ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(1).desc.getLayout());
+                //                 }
+                //         }
+                // },
+                // TODO: rewrite to ngraph to have reshape functionality
+                // split_test_params {
+                //         {1, 20, 2, 5},
+                //         {{1, 10, 2, 5}, {1, 10, 2, 5}},
+                //         1, 2, MKLDNNPlugin::impl_desc_type::unknown, {}, {
+                //                 [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+                //                     ASSERT_EQ(MKLDNNPlugin::impl_desc_type::ref, impl.getImplementationType());
+                //                     ASSERT_EQ(1, impl.getConfig().inConfs.size());
+                //                     ASSERT_EQ(2, impl.getConfig().outConfs.size());
+                //                     ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().inConfs.at(0).desc.getLayout());
+                //                     ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(0).desc.getLayout());
+                //                     ASSERT_EQ(InferenceEngine::Layout::ANY, impl.getConfig().outConfs.at(1).desc.getLayout());
+                //                 },
+                //                 [](MKLDNNPlugin::PrimitiveDescInfo impl) {
+                //                     ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
+                //                     ASSERT_EQ(1, impl.getConfig().inConfs.size());
+                //                     ASSERT_EQ(2, impl.getConfig().outConfs.size());
+                //                     ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
+                //                     ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
+                //                     ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(1).desc.getLayout());
+                //                 }
+                //         }
+                // },
                 split_test_params {
                         {2, 20, 2, 5},
                         {{2, 10, 2, 5}, {2, 10, 2, 5}},
@@ -568,16 +571,18 @@ INSTANTIATE_TEST_CASE_P(
                         {{2, 16, 2, 5}, {2, 8, 2, 5}},
                         1, 3, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
                 },
-                split_test_params {
-                        {1, 20, 2, 5},
-                        {{1, 13, 2, 5}, {1, 7, 2, 5}},
-                        1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
-                },
-                split_test_params {
-                        {1, 20, 2, 5},
-                        {{1, 10, 2, 5}, {1, 10, 2, 5}},
-                        1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
-                },
+                // TODO: rewrite to ngraph to have reshape functionality
+                // split_test_params {
+                //         {1, 20, 2, 5},
+                //         {{1, 13, 2, 5}, {1, 7, 2, 5}},
+                //         1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+                // },
+                // TODO: rewrite to ngraph to have reshape functionality
+                // split_test_params {
+                //         {1, 20, 2, 5},
+                //         {{1, 10, 2, 5}, {1, 10, 2, 5}},
+                //         1, 2, MKLDNNPlugin::impl_desc_type::ref, {MKLDNNPlugin::impl_desc_type::ref}
+                // },
                 split_test_params {
                         {2, 20, 2, 5},
                         {{2, 10, 2, 5}, {2, 10, 2, 5}},
index cf85658..7e9c254 100644 (file)
@@ -265,8 +265,9 @@ protected:
 TEST_P(MKLDNNGraphDynBatchTileTests, TestsDynBatchTile) {}
 
 
+// TODO: rewrite to ngraph to have reshape functionality
 INSTANTIATE_TEST_CASE_P(
-        TestsDynBatchTile, MKLDNNGraphDynBatchTileTests,
+        DISABLED_TestsDynBatchTile, MKLDNNGraphDynBatchTileTests,
         ::testing::Values(
                 tile_test_params{
                         {1, 128, 1, 1}, 3, 24, 1, MKLDNNPlugin::impl_desc_type::unknown, {
index 12a8eff..e4470e1 100644 (file)
@@ -732,12 +732,12 @@ TEST_F(AdvancedShapeInferTests, canReshape) {
             .finalize();
     originalLayersNum = net->allLayers().size();
     IE::CNNNetwork cnnNetwork(net);
-    IE::SizeVector newShape = {1, 3, 1};
+    IE::SizeVector newShape = {1, 1, 1};
     std::map<std::string, IE::SizeVector> inputShapes = {{"data2", newShape}};
     cnnNetwork.reshape(inputShapes);
 
     ASSERT_NO_THROW(CommonTestUtils::getLayerByName(cnnNetwork, "layer2"));
-    ASSERT_EQ(getData("data3")->getTensorDesc().getDims(), IE::SizeVector{3});
+    ASSERT_EQ(getData("data3")->getTensorDesc().getDims(), IE::SizeVector{1});
     ASSERT_EQ(net->allLayers().size(), originalLayersNum);
 
     IE::ConstTransformer transformator(net.get());
@@ -803,17 +803,17 @@ TEST_F(AdvancedShapeInferTests, canReshape2) {
             .finalize();
     originalLayersNum = net->allLayers().size();
     IE::CNNNetwork cnnNetwork(net);
-    IE::SizeVector newShape = {5, 9, 3};
-    std::map<std::string, IE::SizeVector> inputShapes = {{"data1", {135}},
-                                                         {"data2", {2, 1, 1}},
-                                                         {"data3", {1, 3, 1}}};
+    IE::SizeVector newShape = {1, 1, 1};
+    std::map<std::string, IE::SizeVector> inputShapes = {{"data1", {1}},
+                                                         {"data2", {1, 1, 1}},
+                                                         {"data3", {1, 1, 1}}};
     getLayer("layer4")->params = {{"power", "1"},
                                   {"scale", "2"},
                                   {"shift", "1"}};
 
     cnnNetwork.reshape(inputShapes);
 
-    ASSERT_EQ(getData("data7")->getTensorDesc().getDims(), IE::SizeVector{3});
+    ASSERT_EQ(getData("data7")->getTensorDesc().getDims(), IE::SizeVector{1});
     ASSERT_EQ(net->allLayers().size(), originalLayersNum);
 
     IE::ConstTransformer transformator(net.get());
@@ -846,7 +846,7 @@ TEST_F(AdvancedShapeInferTests, canReshapeConst) {
     originalLayersNum = net->allLayers().size();
     IE::CNNNetwork cnnNetwork(net);
     initConstLayers({"const1"});
-    IE::SizeVector newOutShape = {1, 2, 3};
+    IE::SizeVector newOutShape = {1, 1, 1};
     IE::SizeVector newInShape = {IE::details::product(newOutShape)};
 
     std::map<std::string, IE::SizeVector> inputShapes = {{"data1", newInShape}};
@@ -885,7 +885,7 @@ TEST_F(AdvancedShapeInferTests, canReshapeCHWConst) {
 
     cnnNetwork.reshape({});
 
-    IE::SizeVector expectedDims = {2, 1, 3};
+    IE::SizeVector expectedDims = {1, 1, 1};
     ASSERT_EQ(getData("data2")->getTensorDesc().getDims(), expectedDims);
 }