Removed CNNNetwork BlobTransformer (#1709)
authorGleb Kazantaev <gleb.kazantaev@intel.com>
Tue, 11 Aug 2020 09:14:14 +0000 (12:14 +0300)
committerGitHub <noreply@github.com>
Tue, 11 Aug 2020 09:14:14 +0000 (12:14 +0300)
* Removed CNNNetwork BlobTransformer

* Removed inference_engine_lp_transformations dependency for GNA and VPU plugins

inference-engine/src/gna_plugin/CMakeLists.txt
inference-engine/src/gna_plugin/gna_plugin.cpp
inference-engine/src/low_precision_transformations/include/low_precision_transformations/blob_transformation.hpp [deleted file]
inference-engine/src/low_precision_transformations/src/blob_transformation.cpp [deleted file]
inference-engine/src/vpu/graph_transformer/CMakeLists.txt
inference-engine/src/vpu/graph_transformer/include/vpu/frontend/frontend.hpp
inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp
inference-engine/src/vpu/graph_transformer/src/frontend/move_const_inputs_to_blobs.cpp [deleted file]

index 38a7e88..8164990 100644 (file)
@@ -31,7 +31,7 @@ endif()
 #saving rpath to GNA shared library be used by CI
 log_rpath_from_dir(GNA ${libGNA_LIBRARIES_BASE_PATH})
 
-target_link_libraries(${TARGET_NAME} PRIVATE inference_engine inference_engine_lp_transformations Threads::Threads libGNA)
+target_link_libraries(${TARGET_NAME} PRIVATE inference_engine Threads::Threads libGNA)
 target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
 target_compile_definitions(${TARGET_NAME}
     PRIVATE
index b77bfe7..9254a96 100644 (file)
@@ -18,7 +18,6 @@
 #include <utility>
 #include <limits>
 
-#include <low_precision_transformations/blob_transformation.hpp>
 #include <legacy/graph_tools.hpp>
 #include <legacy/net_pass.h>
 #include <debug.h>
@@ -349,10 +348,6 @@ void GNAPlugin::LoadNetwork(ICNNNetwork & _network) {
     NetPass::ConvertPrecision(network, Precision::U64, Precision::I32);
     NetPass::ConvertPrecision(network, Precision::U32, Precision::I32);
 
-    // move blobs from Constant layers to Convolution, Deconvolution, FullyConnected layers attributes
-    BlobTransformation blobsTransformation;
-    blobsTransformation.transform(network, true);
-
     //  Check the input network
     std::string error;
     if (!AreLayersSupported(network, error)) {
diff --git a/inference-engine/src/low_precision_transformations/include/low_precision_transformations/blob_transformation.hpp b/inference-engine/src/low_precision_transformations/include/low_precision_transformations/blob_transformation.hpp
deleted file mode 100644 (file)
index 9d582e1..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <algorithm>
-#include <string>
-#include <unordered_set>
-
-#include <ie_icnn_network.hpp>
-
-namespace InferenceEngine {
-namespace details {
-
-class INFERENCE_ENGINE_API_CLASS(BlobTransformation) {
-public:
-    BlobTransformation() = default;
-    void transform(ICNNNetwork& network, bool transformWithFakeQuantizeOnWeights = false) const;
-
-private:
-    const std::unordered_set<std::string> layersForTransformations = {
-        "Convolution",
-        "Deconvolution",
-        "FullyConnected"
-    };
-};
-
-}  // namespace details
-}  // namespace InferenceEngine
diff --git a/inference-engine/src/low_precision_transformations/src/blob_transformation.cpp b/inference-engine/src/low_precision_transformations/src/blob_transformation.cpp
deleted file mode 100644 (file)
index 81c9ec2..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include "low_precision_transformations/blob_transformation.hpp"
-#include "low_precision_transformations/network_helper.hpp"
-#include <legacy/details/ie_cnn_network_tools.h>
-
-#include <algorithm>
-#include <vector>
-
-
-using namespace InferenceEngine;
-using namespace InferenceEngine::details;
-
-void BlobTransformation::transform(ICNNNetwork& network, bool transformWithFakeQuantizeOnWeights) const {
-    const std::vector<CNNLayerPtr> layers = CNNNetSortTopologically(network);
-
-    for (const CNNLayerPtr& layer : layers) {
-        if (layer->insData.size() < 2) {
-            continue;
-        }
-        if (this->layersForTransformations.find(layer->type) == this->layersForTransformations.end()) {
-            continue;
-        }
-
-        const CNNLayerPtr weightsLayer = CNNNetworkHelper::getParent(*layer, 1);
-        if ((!transformWithFakeQuantizeOnWeights) &&
-            ((weightsLayer->type == "FakeQuantize") || (weightsLayer->type == "Quantize"))) {
-            continue;
-        }
-
-        WeightableLayer* weightableLayer = dynamic_cast<WeightableLayer*>(layer.get());
-        if (weightableLayer == nullptr) {
-            continue;
-        }
-
-        const Blob::Ptr weightsBlob = CNNNetworkHelper::getWeights(*layer, false);
-        if (weightsBlob != nullptr) {
-            weightableLayer->blobs["weights"] = weightsBlob;
-            weightableLayer->_weights = weightsBlob;
-        }
-
-        if (layer->insData.size() >= 3) {
-            const Blob::Ptr biasesBlob = CNNNetworkHelper::getBiases(*layer);
-            if (biasesBlob != nullptr) {
-                weightableLayer->blobs["biases"] = biasesBlob;
-                weightableLayer->_biases = biasesBlob;
-            }
-
-            CNNLayerPtr biasesLayer = CNNNetworkHelper::getParent(*layer, 2);
-            CNNNetworkHelper::removeLayer(network, biasesLayer);
-        }
-
-        CNNNetworkHelper::removeLayer(network, weightsLayer);
-    }
-}
index 19ec225..a454374 100644 (file)
@@ -45,7 +45,7 @@ function(add_graph_transformer_target TARGET_NAME STATIC_IE)
         target_link_libraries(${TARGET_NAME} PUBLIC pugixml vpu_common_lib)
     endif()
 
-    target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES} inference_engine_lp_transformations
+    target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES}
                                          PRIVATE openvino::itt)
 
     if(WIN32)
index 8eaf3f3..07c32ce 100644 (file)
@@ -66,9 +66,6 @@ private:
     void removeConstLayers(
             ie::ICNNNetwork& network);
 
-    void moveConstInputsToBlobs(
-            ie::ICNNNetwork& network);
-
     //
     // Process internal VPU Model
     //
index a64da5e..368cef9 100644 (file)
@@ -426,8 +426,6 @@ ModelPtr FrontEnd::runCommonPasses(ie::ICNNNetwork& network, const UnsupportedLa
         ie::NetPass::ConvertPrecision(*originalOrConvertNetwork, ie::Precision::U64, ie::Precision::I32);
         ie::NetPass::ConvertPrecision(*originalOrConvertNetwork, ie::Precision::BOOL, ie::Precision::I32);
 
-        moveConstInputsToBlobs(*originalOrConvertNetwork);
-
         removeConstLayers(*originalOrConvertNetwork);
 
         unrollLoops(*originalOrConvertNetwork);
diff --git a/inference-engine/src/vpu/graph_transformer/src/frontend/move_const_inputs_to_blobs.cpp b/inference-engine/src/vpu/graph_transformer/src/frontend/move_const_inputs_to_blobs.cpp
deleted file mode 100644 (file)
index a9e0125..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <vpu/frontend/frontend.hpp>
-
-#include <low_precision_transformations/blob_transformation.hpp>
-
-#include <vpu/compile_env.hpp>
-
-namespace vpu {
-
-void FrontEnd::moveConstInputsToBlobs(ie::ICNNNetwork& network) {
-    VPU_PROFILE(moveConstInputsToBlobs);
-
-    const auto& env = CompileEnv::get();
-
-    env.log->trace("Move const inputs to blobs");
-    VPU_LOGGER_SECTION(env.log);
-
-    ie::details::BlobTransformation blobsTransformation;
-    blobsTransformation.transform(network, true);
-}
-
-}  // namespace vpu