[LPT] nGraph nodes naming fix (#2822)
authorEdward Shogulin <edward.shogulin@intel.com>
Fri, 30 Oct 2020 20:23:35 +0000 (23:23 +0300)
committerGitHub <noreply@github.com>
Fri, 30 Oct 2020 20:23:35 +0000 (23:23 +0300)
* [LPT] functional tests: FakeQuantize with dynamic intervals

* [LPT] decomposeFakeQuantize: removed debug info

* [LPT] Add NetworkHelper::mark_as_dequantization_op function

[ngraph] Fix compare runtime info function

[LPT] Fix test cases with no DEQUANTIZATION runtime attribute

[LPT] Change include path for dequantization op

* [LPT] Remove Subtract functional test, enable and rename legacy tests

Co-authored-by: Vladislav Golubev <vladislav.golubev@intel.com>
Co-authored-by: Aleksandr Pertovsky <aleksandr.pertovsky@intel.com>
17 files changed:
inference-engine/src/low_precision_transformations/include/low_precision/network_helper.hpp
inference-engine/src/low_precision_transformations/src/common/network_helper.cpp
inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_with_dynamic_intervals_transformation.cpp [new file with mode: 0644]
inference-engine/tests/functional/inference_engine/lp_transformations/subtract_transformation.cpp [deleted file]
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/transpose_after_matmul_transformation.cpp [moved from inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/permute_transformation.cpp with 85% similarity]
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/subtract_transformation.cpp
inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/transpose_after_matmul_transformation.cpp [moved from inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/permute_transformation.cpp with 82% similarity]
inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/transpose_after_matmul_transformation.hpp [moved from inference-engine/tests/functional/plugin/shared/include/low_precision_transformations/permute_transformation.hpp with 66% similarity]
inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformation.cpp [moved from inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/multiply_with_one_parent_transformaion.cpp with 100% similarity]
inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/transpose_after_matmul_transformation.cpp [moved from inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/permute_transformation.cpp with 87% similarity]
inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp
inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/subtract_function.hpp [deleted file]
inference-engine/tests/ngraph_functions/src/low_precision_transformations/convert_mul_or_add_finally_with_dequantization_function.cpp
inference-engine/tests/ngraph_functions/src/low_precision_transformations/mul_add_to_scaleshift_or_power_function.cpp
inference-engine/tests/ngraph_functions/src/low_precision_transformations/normalize_l2_function.cpp
inference-engine/tests/ngraph_functions/src/low_precision_transformations/subtract_function.cpp [deleted file]

index f27462b..306ba73 100644 (file)
@@ -160,6 +160,8 @@ public:
     // handles only specific case: Constant -> [dequantization operations] -> [node]
     static void foldDequantization(std::shared_ptr<Node>& node, const size_t branchIndex, const bool inPlace = false);
 
+    static std::shared_ptr<Node> markAsDequantizationOp(std::shared_ptr<Node> op);
+
 private:
     static std::shared_ptr<Node> foldFakeQuantize(const std::shared_ptr<opset1::FakeQuantize>& fq, const bool roundValues, const bool roundValuesWasSet);
 
index f7b55c8..1acc8d9 100644 (file)
@@ -632,8 +632,7 @@ std::tuple<std::shared_ptr<Node>, std::shared_ptr<Node>> NetworkHelper::decompos
             fq->get_levels(),
             fq->get_auto_broadcast()),
         true);
-    // TODO: for debuging only - remove later
-    newFQ->set_friendly_name(fq->get_friendly_name() + "_original");
+    newFQ->set_friendly_name(fq->get_friendly_name());
 
     std::shared_ptr<ngraph::Node> convert2;
     if (updatePrecision) {
@@ -1037,6 +1036,12 @@ std::shared_ptr<Node> NetworkHelper::toScalarIfPossible(std::shared_ptr<Node> no
     return NetworkHelper::toScalar(constant);
 }
 
+std::shared_ptr<Node> NetworkHelper::markAsDequantizationOp(std::shared_ptr<Node> op) {
+    auto opCopy = op->clone_with_new_inputs(op->input_values());
+    auto& rtInfo = opCopy->get_rt_info();
+    rtInfo["DEQUANTIZATION"] = std::make_shared<VariantWrapper<DequantizationAttr>>(DequantizationAttr());
+    return opCopy;
+}
 
 }  // namespace low_precision
 }  // namespace pass
diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_with_dynamic_intervals_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_with_dynamic_intervals_transformation.cpp
new file mode 100644 (file)
index 0000000..35de3b8
--- /dev/null
@@ -0,0 +1,169 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "layer_transformation.hpp"
+
+#include <map>
+#include <memory>
+#include <sstream>
+#include <string>
+
+#include <gtest/gtest.h>
+
+#include <ngraph/pass/visualize_tree.hpp>
+#include <low_precision/fake_quantize.hpp>
+
+#include "common_test_utils/ngraph_test_utils.hpp"
+#include "simple_low_precision_transformer.hpp"
+
+using namespace testing;
+using namespace ngraph;
+using namespace ngraph::pass;
+
+class FakeQuantizeWithDynamicIntervalsTransformationTestValues {
+public:
+    low_precision::LayerTransformation::Params params;
+    bool inputLowConst;
+    bool inpuHighConst;
+    bool outputLowConst;
+    bool outputHighConst;
+};
+
+inline std::ostream& operator<<(std::ostream& os, const std::vector<float>& values) {
+    os << "{ ";
+    for (size_t i = 0; i < values.size(); ++i) {
+        os << values[i];
+        if (i != (values.size() - 1ul)) {
+            os << ", ";
+        }
+    }
+    os << " }";
+    return os;
+}
+
+inline std::ostream& operator<<(std::ostream& out, const FakeQuantizeWithDynamicIntervalsTransformationTestValues& testValue) {
+    return out << "_" <<
+        testValue.inputLowConst << "_" <<
+        testValue.inpuHighConst << "_" <<
+        testValue.outputLowConst << "_" <<
+        testValue.outputHighConst;
+}
+
+typedef std::tuple<
+    ngraph::element::Type,
+    ngraph::Shape,
+    FakeQuantizeWithDynamicIntervalsTransformationTestValues> FakeQuantizeTransformationParams;
+
+class FakeQuantizeWithDynamicIntervalsTransformation : public LayerTransformation, public testing::WithParamInterface<FakeQuantizeTransformationParams> {
+public:
+    void SetUp() override {
+        const ngraph::element::Type precision = std::get<0>(GetParam());
+        const ngraph::Shape shape = std::get<1>(GetParam());
+        const FakeQuantizeWithDynamicIntervalsTransformationTestValues testValues = std::get<2>(GetParam());
+
+        actualFunction = get(precision, shape, testValues.inputLowConst, testValues.inpuHighConst, testValues.outputLowConst, testValues.outputHighConst);
+
+        SimpleLowPrecisionTransformer transform;
+        transform.add<ngraph::pass::low_precision::FakeQuantizeTransformation, ngraph::opset1::FakeQuantize>(testValues.params);
+        transform.transform(actualFunction);
+
+        referenceFunction = get(precision, shape, testValues.inputLowConst, testValues.inpuHighConst, testValues.outputLowConst, testValues.outputHighConst);
+    }
+
+    static std::string getTestCaseName(testing::TestParamInfo<FakeQuantizeTransformationParams> obj) {
+        ngraph::element::Type precision;
+        ngraph::Shape shape;
+        FakeQuantizeWithDynamicIntervalsTransformationTestValues testValues;
+        std::tie(precision, shape, testValues) = obj.param;
+
+        std::ostringstream result;
+        result << LayerTransformation::getTestCaseNameByParams(precision, shape, testValues.params) << testValues;
+        return result.str();
+    }
+
+private:
+    std::shared_ptr<ngraph::Function> get(
+        ngraph::element::Type precision,
+        ngraph::Shape inputShape,
+        const bool inputLowConst,
+        const bool inpuHighConst,
+        const bool outputLowConst,
+        const bool outputHighConst) {
+        const auto input = std::make_shared<ngraph::opset1::Parameter>(precision, inputShape);
+        input->set_friendly_name("input");
+
+        const auto constantPresition = element::f32;
+        const auto constantShape = Shape{ 1, 1, 1, 1 };
+        const std::vector<float> low = { 0.f };
+        const std::vector<float> high = { 1.f };
+
+        const auto inputLow = inputLowConst ?
+            std::dynamic_pointer_cast<ngraph::Node>(std::make_shared<opset1::Constant>(constantPresition, constantShape, low)) :
+            std::make_shared<ngraph::opset1::Parameter>(constantPresition, constantShape);
+
+        const auto inputHigh = inputLowConst ?
+            std::dynamic_pointer_cast<ngraph::Node>(std::make_shared<opset1::Constant>(constantPresition, constantShape, high)) :
+            std::make_shared<ngraph::opset1::Parameter>(constantPresition, constantShape);
+
+        const auto outputLow = outputLowConst ?
+            std::dynamic_pointer_cast<ngraph::Node>(std::make_shared<opset1::Constant>(constantPresition, constantShape, low)) :
+            std::make_shared<ngraph::opset1::Parameter>(constantPresition, constantShape);
+
+        const auto outputHigh = outputHighConst ?
+            std::dynamic_pointer_cast<ngraph::Node>(std::make_shared<opset1::Constant>(constantPresition, constantShape, high)) :
+            std::make_shared<ngraph::opset1::Parameter>(constantPresition, constantShape);
+
+        const auto levels = 256ul;
+
+        auto fakeQuantize = std::make_shared<ngraph::opset1::FakeQuantize>(input, inputLow, inputHigh, outputLow, outputHigh, levels);
+        fakeQuantize->set_friendly_name("fakeQuantize");
+
+        ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(fakeQuantize) };
+
+        ngraph::ParameterVector inputs{ input };
+        if (as_type_ptr<ngraph::opset1::Parameter>(inputLow)) {
+            inputs.push_back(as_type_ptr<ngraph::opset1::Parameter>(inputLow));
+        }
+        if (as_type_ptr<ngraph::opset1::Parameter>(inputHigh)) {
+            inputs.push_back(as_type_ptr<ngraph::opset1::Parameter>(inputHigh));
+        }
+        if (as_type_ptr<ngraph::opset1::Parameter>(outputLow)) {
+            inputs.push_back(as_type_ptr<ngraph::opset1::Parameter>(outputLow));
+        }
+        if (as_type_ptr<ngraph::opset1::Parameter>(outputHigh)) {
+            inputs.push_back(as_type_ptr<ngraph::opset1::Parameter>(outputHigh));
+        }
+
+        return std::make_shared<ngraph::Function>(results, inputs, "FakeQuantizeWithDynamicIntervalsTransformation");
+    }
+};
+
+TEST_P(FakeQuantizeWithDynamicIntervalsTransformation, CompareFunctions) {
+    actualFunction->validate_nodes_and_infer_types();
+    auto res = compare_functions(referenceFunction, actualFunction, true, true, true);
+    ASSERT_TRUE(res.first) << res.second;
+}
+
+const std::vector<ngraph::element::Type> precisions = {
+    ngraph::element::f32,
+    ngraph::element::i32,
+    ngraph::element::f16
+};
+
+const std::vector<FakeQuantizeWithDynamicIntervalsTransformationTestValues> fakeQuantizeTransformationTestValues = {
+    { LayerTransformation::createParamsU8I8(), false, false, false, true },
+    { LayerTransformation::createParamsU8I8(), true, false, false, false },
+    { LayerTransformation::createParamsU8I8(), false, false, false, false }
+};
+
+const std::vector<ngraph::Shape> shapes = { { 1, 32, 72, 48 } };
+
+INSTANTIATE_TEST_CASE_P(
+    LPT,
+    FakeQuantizeWithDynamicIntervalsTransformation,
+    ::testing::Combine(
+        ::testing::ValuesIn(precisions),
+        ::testing::ValuesIn(shapes),
+        ::testing::ValuesIn(fakeQuantizeTransformationTestValues)),
+    FakeQuantizeWithDynamicIntervalsTransformation::getTestCaseName);
diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/subtract_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/subtract_transformation.cpp
deleted file mode 100644 (file)
index c8898c7..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright (C) 2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include "layer_transformation.hpp"
-
-#include <string>
-#include <sstream>
-#include <memory>
-
-#include <gtest/gtest.h>
-
-#include <transformations/utils/utils.hpp>
-#include <transformations/init_node_info.hpp>
-#include "common_test_utils/ngraph_test_utils.hpp"
-#include "ngraph_functions/low_precision_transformations/subtract_function.hpp"
-
-using namespace testing;
-using namespace ngraph::pass;
-
-class SubtractTransformation : public LayerTransformation, public testing::WithParamInterface<LayerTransformationParams> {
-public:
-    void SetUp() override {
-        const ngraph::element::Type precision = std::get<0>(GetParam());
-        const ngraph::Shape shape = std::get<1>(GetParam());
-
-        actualFunction = ngraph::builder::subgraph::SubtractFunction::getOriginal(precision, shape);
-        // transform(actualFunction);
-        referenceFunction = ngraph::builder::subgraph::SubtractFunction::getReference(precision, shape);
-    }
-
-    static std::string getTestCaseName(testing::TestParamInfo<LayerTransformationParams> obj) {
-        ngraph::element::Type precision;
-        ngraph::Shape shape;
-        low_precision::LayerTransformation::Params params;
-        std::tie(precision, shape, params) = obj.param;
-
-        return LayerTransformation::getTestCaseNameByParams(precision, shape, params);
-    }
-};
-
-TEST_P(SubtractTransformation, CompareFunctions) {
-    // InitNodeInfo().run_on_function(actualFunction);
-    // ConvFusion().run_on_function(actualFunction);
-
-    // actualFunction->validate_nodes_and_infer_types();
-
-    // auto res = compare_functions(referenceFunction, actualFunction);
-    // ASSERT_TRUE(res.first) << res.second;
-}
-
-const std::vector<ngraph::element::Type> precisions = {
-    ngraph::element::f32,
-    ngraph::element::f16
-};
-
-const std::vector<ngraph::Shape> shapes = {
-    { 1, 32, 72, 48 }
-};
-
-const std::vector<low_precision::LayerTransformation::Params> trasformationParamValues = {
-    LayerTransformation::createParamsI8I8(),
-    LayerTransformation::createParamsU8I8()
-};
-
-INSTANTIATE_TEST_CASE_P(
-    LPT,
-    SubtractTransformation,
-    ::testing::Combine(
-        ::testing::ValuesIn(precisions),
-        ::testing::ValuesIn(shapes),
-        ::testing::ValuesIn(trasformationParamValues)),
-    SubtractTransformation::getTestCaseName);
index 8d549d7..e735953 100644 (file)
@@ -22,7 +22,7 @@ const std::vector<LayerTransformation::Params> trasformationParamValues = {
     LayerTestsUtils::LayerTransformationParamsFactory::createParamsU8I8()
 };
 
-INSTANTIATE_TEST_CASE_P(DISABLED_smoke_LPT, SubtractTransformation,
+INSTANTIATE_TEST_CASE_P(smoke_LPT, SubtractTransformation,
     ::testing::Combine(
         ::testing::ValuesIn(netPrecisions),
         ::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })),
@@ -4,7 +4,7 @@
 
 #include <vector>
 
-#include "low_precision_transformations/permute_transformation.hpp"
+#include "low_precision_transformations/transpose_after_matmul_transformation.hpp"
 #include "common_test_utils/test_constants.hpp"
 
 using namespace LayerTestsDefinitions;
@@ -26,7 +26,7 @@ const std::vector<bool> perTensorValues = { true, false };
 
 const std::vector<bool> transposeChannelDimValues = { true, false };
 
-INSTANTIATE_TEST_CASE_P(smoke_LPT, PermuteTransformation,
+INSTANTIATE_TEST_CASE_P(smoke_LPT, TransposeAfterMatMulTransformation,
     ::testing::Combine(
         ::testing::ValuesIn(netPrecisions),
         ::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })),
@@ -34,7 +34,7 @@ INSTANTIATE_TEST_CASE_P(smoke_LPT, PermuteTransformation,
         ::testing::ValuesIn(trasformationParamValues),
         ::testing::ValuesIn(perTensorValues),
         ::testing::ValuesIn(transposeChannelDimValues)),
-    PermuteTransformation::getTestCaseName);
+    TransposeAfterMatMulTransformation::getTestCaseName);
 }  // namespace
 
 
index 357326b..9febfd7 100644 (file)
@@ -4,7 +4,7 @@
 
 #include <vector>
 
-#include "low_precision_transformations/multiply_transformation.hpp"
+#include "low_precision_transformations/subtract_transformation.hpp"
 #include "common_test_utils/test_constants.hpp"
 
 using namespace LayerTestsDefinitions;
@@ -19,11 +19,11 @@ const std::vector<LayerTransformation::Params> trasformationParamValues = {
     LayerTestsUtils::LayerTransformationParamsFactory::createParams()
 };
 
-//INSTANTIATE_TEST_CASE_P(LPT, MultiplyTransformation,
-//    ::testing::Combine(
-//        ::testing::ValuesIn(netPrecisions),
-//        ::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })),
-//        ::testing::Values(CommonTestUtils::DEVICE_GPU),
-//        ::testing::ValuesIn(trasformationParamValues)),
-//    MultiplyTransformation::getTestCaseName);
+INSTANTIATE_TEST_CASE_P(smoke_LPT, SubtractTransformation,
+    ::testing::Combine(
+        ::testing::ValuesIn(netPrecisions),
+        ::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })),
+        ::testing::Values(CommonTestUtils::DEVICE_GPU),
+        ::testing::ValuesIn(trasformationParamValues)),
+    SubtractTransformation::getTestCaseName);
 }  // namespace
@@ -4,7 +4,7 @@
 
 #include <vector>
 
-#include "low_precision_transformations/permute_transformation.hpp"
+#include "low_precision_transformations/transpose_after_matmul_transformation.hpp"
 #include "common_test_utils/test_constants.hpp"
 
 using namespace LayerTestsDefinitions;
@@ -23,7 +23,7 @@ const std::vector<bool> perTensorValues = { true, false };
 
 const std::vector<bool> transposeChannelDimValues = { true, false };
 
-INSTANTIATE_TEST_CASE_P(DISABLED_smoke_LPT, PermuteTransformation,
+INSTANTIATE_TEST_CASE_P(smoke_LPT, TransposeAfterMatMulTransformation,
     ::testing::Combine(
         ::testing::ValuesIn(netPrecisions),
         ::testing::Values(InferenceEngine::SizeVector({ 1, 3, 16, 16 })),
@@ -31,7 +31,7 @@ INSTANTIATE_TEST_CASE_P(DISABLED_smoke_LPT, PermuteTransformation,
         ::testing::ValuesIn(trasformationParamValues),
         ::testing::ValuesIn(perTensorValues),
         ::testing::ValuesIn(transposeChannelDimValues)),
-    PermuteTransformation::getTestCaseName);
+    TransposeAfterMatMulTransformation::getTestCaseName);
 }  // namespace
 
 
@@ -17,13 +17,13 @@ typedef std::tuple<
     std::string,
     ngraph::pass::low_precision::LayerTransformation::Params,
     bool,
-    bool> PermuteTransformationParams;
+    bool> TransposeAfterMatMulTransformationParams;
 
-class PermuteTransformation :
-    public testing::WithParamInterface<PermuteTransformationParams>,
+class TransposeAfterMatMulTransformation :
+    public testing::WithParamInterface<TransposeAfterMatMulTransformationParams>,
     public LayerTestsUtils::LayerTransformation {
 public:
-    static std::string getTestCaseName(testing::TestParamInfo<PermuteTransformationParams> obj);
+    static std::string getTestCaseName(testing::TestParamInfo<TransposeAfterMatMulTransformationParams> obj);
 
 protected:
     void SetUp() override;
@@ -2,7 +2,7 @@
 // SPDX-License-Identifier: Apache-2.0
 //
 
-#include "low_precision_transformations/permute_transformation.hpp"
+#include "low_precision_transformations/transpose_after_matmul_transformation.hpp"
 
 #include <memory>
 #include <tuple>
@@ -21,7 +21,7 @@
 
 namespace LayerTestsDefinitions {
 
-std::string PermuteTransformation::getTestCaseName(testing::TestParamInfo<PermuteTransformationParams> obj) {
+std::string TransposeAfterMatMulTransformation::getTestCaseName(testing::TestParamInfo<TransposeAfterMatMulTransformationParams> obj) {
     InferenceEngine::Precision netPrecision;
     InferenceEngine::SizeVector inputShapes;
     std::string targetDevice;
@@ -37,7 +37,7 @@ std::string PermuteTransformation::getTestCaseName(testing::TestParamInfo<Permut
     return result.str();
 }
 
-void PermuteTransformation::SetUp() {
+void TransposeAfterMatMulTransformation::SetUp() {
     InferenceEngine::SizeVector inputShape;
     InferenceEngine::Precision netPrecision;
     ngraph::pass::low_precision::LayerTransformation::Params params;
@@ -66,10 +66,10 @@ void PermuteTransformation::SetUp() {
     transpose->set_friendly_name("transpose");
 
     ngraph::ResultVector results{ std::make_shared<ngraph::opset1::Result>(transpose) };
-    function = std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{ input1, input2 }, "PermuteTransformation");
+    function = std::make_shared<ngraph::Function>(results, ngraph::ParameterVector{ input1, input2 }, "TransposeAfterMatMulTransformation");
 }
 
-TEST_P(PermuteTransformation, CompareWithRefImpl) {
+TEST_P(TransposeAfterMatMulTransformation, CompareWithRefImpl) {
     Run();
 };
 
index 731c63f..aba6838 100644 (file)
@@ -47,22 +47,22 @@ bool compare_rt_keys(const std::shared_ptr<ngraph::Node>& node1, const std::shar
     const auto& first_node_rt_info = node1->get_rt_info();
     const auto& second_node_rt_info = node2->get_rt_info();
 
-    // TODO: should be uncommented
-    // if (first_node_rt_info.size() != second_node_rt_info.size()) {
-    //    return false;
-    // }
-
-    for (auto first_it = first_node_rt_info.begin(); first_it != first_node_rt_info.end(); ++first_it) {
-        bool was_found = false;
-        for (auto secont_it = second_node_rt_info.begin(); secont_it != second_node_rt_info.end(); ++secont_it) {
-            if (first_it->first == secont_it->first) {
-                was_found = true;
-                break;
-            }
-        }
-        if (!was_found) {
+    if (first_node_rt_info.empty() && second_node_rt_info.empty()) {
+        return true;
+    }
+
+    if (first_node_rt_info.size() != second_node_rt_info.size()) {
+        return false;
+    }
+
+    auto first_node_rt_info_it = first_node_rt_info.begin();
+    auto second_node_rt_info_it = second_node_rt_info.begin();
+    while (first_node_rt_info_it != first_node_rt_info.end()) {
+        if (first_node_rt_info_it->first != second_node_rt_info_it->first) {
             return false;
         }
+        ++first_node_rt_info_it;
+        ++second_node_rt_info_it;
     }
 
     return true;
diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/subtract_function.hpp b/inference-engine/tests/ngraph_functions/include/ngraph_functions/low_precision_transformations/subtract_function.hpp
deleted file mode 100644 (file)
index 1a42712..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (C) 2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#pragma once
-
-#include <memory>
-#include <ngraph/ngraph.hpp>
-
-namespace ngraph {
-namespace builder {
-namespace subgraph {
-
-class SubtractFunction {
-public:
-    static std::shared_ptr<ngraph::Function> getOriginal(const ngraph::element::Type ngPrecision, const ngraph::Shape& inputShape);
-    static std::shared_ptr<ngraph::Function> getReference(const ngraph::element::Type ngPrecision, const ngraph::Shape& inputShape);
-};
-
-}  // namespace subgraph
-}  // namespace builder
-}  // namespace ngraph
index d94d90e..f4733ac 100644 (file)
@@ -61,7 +61,9 @@ std::shared_ptr<ngraph::Function> ConvertMulOrAddWithDequantizationFunction::get
 
     const auto weights = std::make_shared<opset1::Constant>(element::f32, inputShape, multiplyConst);
     const auto bias = std::make_shared<opset1::Constant>(element::f32, inputShape, 0.0);
-    const auto scaleShift = std::make_shared<ngraph::op::ScaleShiftIE>(relu, weights, bias);
+    std::shared_ptr<Node> scaleShift = std::make_shared<ngraph::op::ScaleShiftIE>(relu, weights, bias);
+
+    scaleShift = low_precision::NetworkHelper::markAsDequantizationOp(scaleShift);
 
     scaleShift->set_friendly_name("output");
 
index 73b83ae..e30526f 100644 (file)
@@ -61,7 +61,8 @@ namespace subgraph {
 
         std::shared_ptr<ngraph::Node> lastNode;
         if (isDequantization) {
-            const auto scaleshift = std::make_shared<ngraph::op::ScaleShiftIE>(input, weights, biases, precisionAfterOperation);
+            std::shared_ptr<Node> scaleshift = std::make_shared<ngraph::op::ScaleShiftIE>(input, weights, biases, precisionAfterOperation);
+            scaleshift = low_precision::NetworkHelper::markAsDequantizationOp(scaleshift);
             scaleshift->set_friendly_name("add");
             lastNode = scaleshift;
         } else {
index e3ff408..89255db 100644 (file)
@@ -7,6 +7,7 @@
 #include <ngraph_ops/type_relaxed.hpp>
 #include <ngraph/opsets/opset1.hpp>
 #include "ngraph_functions/subgraph_builders.hpp"
+#include "low_precision/common/dequantization_op.hpp"
 
 namespace ngraph {
 namespace builder {
@@ -125,7 +126,7 @@ std::shared_ptr<ngraph::Function> NormalizeL2Function::getReference(
     std::shared_ptr<ngraph::Node> output = normalizeL2;
 
     if (!expectedValues.mutliplyValues.empty()) {
-        const std::shared_ptr<ngraph::Node> multiply = std::make_shared<ngraph::op::TypeRelaxed<ngraph::opset1::Multiply>>(
+        const std::shared_ptr<ngraph::Node> multiply = std::make_shared<ngraph::op::TypeRelaxed<pass::low_precision::DequantizationMultiply>>(
             std::vector<ngraph::element::Type>{ element::f32, element::f32 }, std::vector<ngraph::element::Type>{element::f32},
             ngraph::op::TemporaryReplaceOutputType(output, element::f32).get(),
             ngraph::op::TemporaryReplaceOutputType(std::make_shared<ngraph::opset1::Constant>(
diff --git a/inference-engine/tests/ngraph_functions/src/low_precision_transformations/subtract_function.cpp b/inference-engine/tests/ngraph_functions/src/low_precision_transformations/subtract_function.cpp
deleted file mode 100644 (file)
index 3cf42ee..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (C) 2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include "ngraph_functions/low_precision_transformations/subtract_function.hpp"
-
-#include <ngraph/opsets/opset1.hpp>
-#include "ngraph_functions/subgraph_builders.hpp"
-
-namespace ngraph {
-namespace builder {
-namespace subgraph {
-
-std::shared_ptr<ngraph::Function> SubtractFunction::getOriginal(
-    const ngraph::element::Type ngPrecision,
-    const ngraph::Shape& inputShape) {
-    return nullptr;
-}
-
-std::shared_ptr<ngraph::Function> SubtractFunction::getReference(
-    const ngraph::element::Type ngPrecision,
-    const ngraph::Shape& inputShape) {
-    return nullptr;
-}
-
-}  // namespace subgraph
-}  // namespace builder
-}  // namespace ngraph