#saving rpath to GNA shared library be used by CI
log_rpath_from_dir(GNA ${libGNA_LIBRARIES_BASE_PATH})
-target_link_libraries(${TARGET_NAME} PRIVATE inference_engine inference_engine_lp_transformations Threads::Threads libGNA)
+target_link_libraries(${TARGET_NAME} PRIVATE inference_engine Threads::Threads libGNA)
target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
target_compile_definitions(${TARGET_NAME}
PRIVATE
#include <utility>
#include <limits>
-#include <low_precision_transformations/blob_transformation.hpp>
#include <legacy/graph_tools.hpp>
#include <legacy/net_pass.h>
#include <debug.h>
NetPass::ConvertPrecision(network, Precision::U64, Precision::I32);
NetPass::ConvertPrecision(network, Precision::U32, Precision::I32);
- // move blobs from Constant layers to Convolution, Deconvolution, FullyConnected layers attributes
- BlobTransformation blobsTransformation;
- blobsTransformation.transform(network, true);
-
// Check the input network
std::string error;
if (!AreLayersSupported(network, error)) {
+++ /dev/null
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <algorithm>
-#include <string>
-#include <unordered_set>
-
-#include <ie_icnn_network.hpp>
-
-namespace InferenceEngine {
-namespace details {
-
-class INFERENCE_ENGINE_API_CLASS(BlobTransformation) {
-public:
- BlobTransformation() = default;
- void transform(ICNNNetwork& network, bool transformWithFakeQuantizeOnWeights = false) const;
-
-private:
- const std::unordered_set<std::string> layersForTransformations = {
- "Convolution",
- "Deconvolution",
- "FullyConnected"
- };
-};
-
-} // namespace details
-} // namespace InferenceEngine
+++ /dev/null
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include "low_precision_transformations/blob_transformation.hpp"
-#include "low_precision_transformations/network_helper.hpp"
-#include <legacy/details/ie_cnn_network_tools.h>
-
-#include <algorithm>
-#include <vector>
-
-
-using namespace InferenceEngine;
-using namespace InferenceEngine::details;
-
-void BlobTransformation::transform(ICNNNetwork& network, bool transformWithFakeQuantizeOnWeights) const {
- const std::vector<CNNLayerPtr> layers = CNNNetSortTopologically(network);
-
- for (const CNNLayerPtr& layer : layers) {
- if (layer->insData.size() < 2) {
- continue;
- }
- if (this->layersForTransformations.find(layer->type) == this->layersForTransformations.end()) {
- continue;
- }
-
- const CNNLayerPtr weightsLayer = CNNNetworkHelper::getParent(*layer, 1);
- if ((!transformWithFakeQuantizeOnWeights) &&
- ((weightsLayer->type == "FakeQuantize") || (weightsLayer->type == "Quantize"))) {
- continue;
- }
-
- WeightableLayer* weightableLayer = dynamic_cast<WeightableLayer*>(layer.get());
- if (weightableLayer == nullptr) {
- continue;
- }
-
- const Blob::Ptr weightsBlob = CNNNetworkHelper::getWeights(*layer, false);
- if (weightsBlob != nullptr) {
- weightableLayer->blobs["weights"] = weightsBlob;
- weightableLayer->_weights = weightsBlob;
- }
-
- if (layer->insData.size() >= 3) {
- const Blob::Ptr biasesBlob = CNNNetworkHelper::getBiases(*layer);
- if (biasesBlob != nullptr) {
- weightableLayer->blobs["biases"] = biasesBlob;
- weightableLayer->_biases = biasesBlob;
- }
-
- CNNLayerPtr biasesLayer = CNNNetworkHelper::getParent(*layer, 2);
- CNNNetworkHelper::removeLayer(network, biasesLayer);
- }
-
- CNNNetworkHelper::removeLayer(network, weightsLayer);
- }
-}
target_link_libraries(${TARGET_NAME} PUBLIC pugixml vpu_common_lib)
endif()
- target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES} inference_engine_lp_transformations
+ target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES}
PRIVATE openvino::itt)
if(WIN32)
void removeConstLayers(
ie::ICNNNetwork& network);
- void moveConstInputsToBlobs(
- ie::ICNNNetwork& network);
-
//
// Process internal VPU Model
//
ie::NetPass::ConvertPrecision(*originalOrConvertNetwork, ie::Precision::U64, ie::Precision::I32);
ie::NetPass::ConvertPrecision(*originalOrConvertNetwork, ie::Precision::BOOL, ie::Precision::I32);
- moveConstInputsToBlobs(*originalOrConvertNetwork);
-
removeConstLayers(*originalOrConvertNetwork);
unrollLoops(*originalOrConvertNetwork);
+++ /dev/null
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <vpu/frontend/frontend.hpp>
-
-#include <low_precision_transformations/blob_transformation.hpp>
-
-#include <vpu/compile_env.hpp>
-
-namespace vpu {
-
-void FrontEnd::moveConstInputsToBlobs(ie::ICNNNetwork& network) {
- VPU_PROFILE(moveConstInputsToBlobs);
-
- const auto& env = CompileEnv::get();
-
- env.log->trace("Move const inputs to blobs");
- VPU_LOGGER_SECTION(env.log);
-
- ie::details::BlobTransformation blobsTransformation;
- blobsTransformation.transform(network, true);
-}
-
-} // namespace vpu