Remove obsoleted Dequantize op (#2780)
authorMateusz Tabaka <mateusz.tabaka@intel.com>
Fri, 23 Oct 2020 08:25:08 +0000 (10:25 +0200)
committerGitHub <noreply@github.com>
Fri, 23 Oct 2020 08:25:08 +0000 (11:25 +0300)
* Remove obsoleted Dequantize op

* apply code style

17 files changed:
ngraph/core/include/ngraph/op/dequantize.hpp [deleted file]
ngraph/core/include/ngraph/op/op_version_tbl.hpp
ngraph/core/include/ngraph/ops.hpp
ngraph/core/include/ngraph/pass/constant_folding.hpp
ngraph/core/reference/include/ngraph/runtime/reference/dequantize.hpp [deleted file]
ngraph/core/src/op/dequantize.cpp [deleted file]
ngraph/core/src/op/fake_quantize.cpp
ngraph/core/src/pass/constant_folding_dequantize.cpp [deleted file]
ngraph/frontend/onnx_import/src/op/dequantize_linear.cpp
ngraph/test/CMakeLists.txt
ngraph/test/backend/quantize_dequantize.in.cpp
ngraph/test/constant_folding.cpp
ngraph/test/op_is.cpp
ngraph/test/runtime/interpreter/int_executable.cpp
ngraph/test/runtime/interpreter/int_executable.hpp
ngraph/test/runtime/opset0_tbl.hpp
ngraph/test/type_prop/dequantize.cpp [deleted file]

diff --git a/ngraph/core/include/ngraph/op/dequantize.hpp b/ngraph/core/include/ngraph/op/dequantize.hpp
deleted file mode 100644 (file)
index 32e24ee..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-//*****************************************************************************
-// Copyright 2017-2020 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//*****************************************************************************
-
-#pragma once
-
-#include "ngraph/axis_set.hpp"
-#include "ngraph/op/op.hpp"
-#include "ngraph/type/element_type.hpp"
-
-namespace ngraph
-{
-    namespace op
-    {
-        namespace v0
-        {
-            /// \brief Dequantize operation
-            ///        Maps quantized input (q) to real output (r) using scale (s) and zero point
-            ///        (z):
-            ///        r = (q - o) * s
-            class NGRAPH_DEPRECATED(
-                "This operation is deprecated and will be removed soon. Please do not use it.")
-                NGRAPH_API Dequantize : public ngraph::op::Op
-            {
-                NGRAPH_SUPPRESS_DEPRECATED_START
-            public:
-                static constexpr NodeTypeInfo type_info{"Dequantize", 0};
-                const NodeTypeInfo& get_type_info() const override { return type_info; }
-                /// \brief Constructs a Dequantize operation
-                Dequantize() = default;
-
-                /// \brief Constructs a Dequantize operation
-                /// \param input quantized input
-                /// \param scale scale used for mapping
-                /// \param zero_point zero point used for mapping
-                /// \param type output element type
-                /// \param axes axis positions on which `scale` and `zero_point` are specified
-                Dequantize(const Output<Node>& input,
-                           const Output<Node>& scale,
-                           const Output<Node>& zero_point,
-                           const element::Type& type,
-                           const AxisSet& axes);
-
-                void validate_and_infer_types() override;
-
-                virtual std::shared_ptr<Node>
-                    clone_with_new_inputs(const OutputVector& new_args) const override;
-
-                const AxisSet& get_axes() const { return m_axes; }
-                void set_axes(const AxisSet& axes) { m_axes = axes; }
-                const element::Type& get_type() const { return m_type; }
-                void set_type(const element::Type& type) { m_type = type; }
-            private:
-                element::Type m_type;
-                AxisSet m_axes;
-                NGRAPH_SUPPRESS_DEPRECATED_END
-            };
-        }
-        NGRAPH_SUPPRESS_DEPRECATED_START
-        using v0::Dequantize;
-        NGRAPH_SUPPRESS_DEPRECATED_END
-    }
-}
index e970533..345e83b 100644 (file)
@@ -61,7 +61,6 @@ NGRAPH_OP(CumSum, ngraph::op::v0, 0)
 NGRAPH_OP(DeformableConvolution, ngraph::op::v1, 1)
 NGRAPH_OP(DeformablePSROIPooling, ngraph::op::v1, 1)
 NGRAPH_OP(DepthToSpace, ngraph::op::v0, 0)
-NGRAPH_OP(Dequantize, ngraph::op::v0, 0)
 NGRAPH_OP(DetectionOutput, ngraph::op::v0, 0)
 NGRAPH_OP(Divide, ngraph::op::v0, 0)
 NGRAPH_OP(Divide, ngraph::op::v1, 1)
index 6f28093..6d3983a 100644 (file)
@@ -49,7 +49,6 @@
 #include "ngraph/op/deformable_convolution.hpp"
 #include "ngraph/op/deformable_psroi_pooling.hpp"
 #include "ngraph/op/depth_to_space.hpp"
-#include "ngraph/op/dequantize.hpp"
 #include "ngraph/op/detection_output.hpp"
 #include "ngraph/op/divide.hpp"
 #include "ngraph/op/dot.hpp"
index b064245..648f36b 100644 (file)
@@ -38,7 +38,6 @@ public:
         m_cfmap = cfmap;
         m_enable_shape_inference = true;
         construct_constant_quantize();
-        construct_constant_dequantize();
         construct_constant_convert();
         construct_constant_arithmetic_reduction();
         construct_constant_logical_reduction();
@@ -51,7 +50,6 @@ public:
 
 private:
     void construct_constant_quantize();
-    void construct_constant_dequantize();
     void construct_constant_convert();
     void construct_constant_arithmetic_reduction();
     void construct_constant_logical_reduction();
diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/dequantize.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/dequantize.hpp
deleted file mode 100644 (file)
index d76b7dd..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-//*****************************************************************************
-// Copyright 2017-2020 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//*****************************************************************************
-
-#pragma once
-
-#include <cmath>
-
-#include "ngraph/axis_set.hpp"
-#include "ngraph/coordinate_transform.hpp"
-#include "ngraph/shape_util.hpp"
-
-namespace ngraph
-{
-    namespace runtime
-    {
-        namespace reference
-        {
-            template <typename QUANT, typename REAL>
-            void dequantize(const QUANT* input,
-                            const REAL* scale,
-                            const QUANT* zero_point,
-                            REAL* output,
-                            const Shape& input_shape,
-                            const Shape& scale_zero_point_shape,
-                            const AxisSet& axes)
-            {
-                CoordinateTransform input_transform(input_shape);
-                CoordinateTransform scale_zero_point_transform(scale_zero_point_shape);
-
-                for (const Coordinate& input_coord : input_transform)
-                {
-                    Coordinate scale_zero_point_coord = project(input_coord, axes);
-
-                    output[input_transform.index(input_coord)] =
-                        static_cast<REAL>((
-                            input[input_transform.index(input_coord)] -
-                            zero_point[scale_zero_point_transform.index(scale_zero_point_coord)])) *
-                        scale[scale_zero_point_transform.index(scale_zero_point_coord)];
-                }
-            }
-        }
-    }
-}
diff --git a/ngraph/core/src/op/dequantize.cpp b/ngraph/core/src/op/dequantize.cpp
deleted file mode 100644 (file)
index df3a0fb..0000000
+++ /dev/null
@@ -1,160 +0,0 @@
-//*****************************************************************************
-// Copyright 2017-2020 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//*****************************************************************************
-
-#include "ngraph/op/dequantize.hpp"
-#include "ngraph/shape_util.hpp"
-
-NGRAPH_SUPPRESS_DEPRECATED_START
-
-using namespace std;
-using namespace ngraph;
-
-constexpr NodeTypeInfo op::Dequantize::type_info;
-
-op::Dequantize::Dequantize(const Output<Node>& input,
-                           const Output<Node>& scale,
-                           const Output<Node>& zero_point,
-                           const element::Type& type,
-                           const AxisSet& axes)
-
-    : Op({input, scale, zero_point})
-    , m_type(type)
-    , m_axes(axes)
-{
-    constructor_validate_and_infer_types();
-}
-
-void op::Dequantize::validate_and_infer_types()
-{
-    enum
-    {
-        INPUT,
-        SCALE,
-        ZERO_POINT
-    };
-
-    NODE_VALIDATION_CHECK(this, m_type.is_static(), "Output element type must not be dynamic");
-
-    NODE_VALIDATION_CHECK(
-        this, m_type.is_real(), "Output element type (", m_type, ") must be a floating point type");
-
-    element::Type quantized_type;
-
-    NODE_VALIDATION_CHECK(this,
-                          element::Type::merge(quantized_type,
-                                               get_input_element_type(INPUT),
-                                               get_input_element_type(ZERO_POINT)),
-                          "Zero point element type (",
-                          get_input_element_type(ZERO_POINT),
-                          ") must match input element type (",
-                          get_input_element_type(INPUT),
-                          ")");
-
-    NODE_VALIDATION_CHECK(this,
-                          quantized_type.is_dynamic() || quantized_type.is_quantized(),
-                          "Zero point / input element type (",
-                          quantized_type,
-                          ") must be a quantized type");
-
-    element::Type unquantized_type;
-
-    NODE_VALIDATION_CHECK(
-        this,
-        element::Type::merge(unquantized_type, get_input_element_type(SCALE), m_type),
-        "Scale element type (",
-        get_input_element_type(SCALE),
-        ") must match output element type (",
-        m_type,
-        ")");
-
-    PartialShape input_shape = get_input_partial_shape(0);
-    Dimension input_rank = input_shape.rank();
-
-    for (auto axis : m_axes)
-    {
-        NODE_VALIDATION_CHECK(this,
-                              input_rank.is_dynamic() || axis < input_rank.get_length(),
-                              "Quantization axis (",
-                              axis,
-                              ") must be less than input shape rank (",
-                              input_rank,
-                              ")");
-    }
-
-    PartialShape scale_zero_point_shape = get_input_partial_shape(SCALE);
-
-    NODE_VALIDATION_CHECK(
-        this,
-        PartialShape::merge_into(scale_zero_point_shape, get_input_partial_shape(ZERO_POINT)),
-        "Scale shape (",
-        get_input_partial_shape(SCALE),
-        ") and zero point shape (",
-        get_input_partial_shape(ZERO_POINT),
-        ") must match");
-
-    NODE_VALIDATION_CHECK(this,
-                          scale_zero_point_shape.rank().compatible(m_axes.size()),
-                          "Scale / zero point rank (",
-                          scale_zero_point_shape.rank(),
-                          ") does not match the number of ",
-                          "quantization axes (",
-                          m_axes.size(),
-                          ")");
-
-    set_output_size(1);
-
-    if (input_shape.rank().is_static() && scale_zero_point_shape.rank().is_static())
-    {
-        size_t i = 0;
-
-        vector<Dimension> injected_scale_zero_point_dims;
-
-        for (size_t j = 0; j < input_shape.rank().get_length(); j++)
-        {
-            if (m_axes.count(j) != 0)
-            {
-                injected_scale_zero_point_dims.push_back(scale_zero_point_shape[i++]);
-            }
-            else
-            {
-                injected_scale_zero_point_dims.push_back(Dimension::dynamic());
-            }
-        }
-
-        PartialShape result_shape = input_shape;
-        NODE_VALIDATION_CHECK(
-            this,
-            PartialShape::merge_into(result_shape, PartialShape{injected_scale_zero_point_dims}),
-            "Scale / zero point shape (",
-            scale_zero_point_shape,
-            ") must match input shape (",
-            input_shape,
-            ") at the quantization axes (",
-            m_axes,
-            ")");
-        set_output_type(0, unquantized_type, result_shape);
-    }
-    else
-    {
-        set_output_type(0, unquantized_type, PartialShape::dynamic());
-    }
-}
-
-shared_ptr<Node> op::Dequantize::clone_with_new_inputs(const OutputVector& new_args) const
-{
-    check_new_args_count(this, new_args);
-    return make_shared<Dequantize>(new_args.at(0), new_args.at(1), new_args.at(2), m_type, m_axes);
-}
index 5646920..fec59b9 100644 (file)
@@ -22,7 +22,6 @@
 #include "ngraph/op/add.hpp"
 #include "ngraph/op/constant.hpp"
 #include "ngraph/op/convert.hpp"
-#include "ngraph/op/dequantize.hpp"
 #include "ngraph/op/divide.hpp"
 #include "ngraph/op/greater.hpp"
 #include "ngraph/op/less_eq.hpp"
diff --git a/ngraph/core/src/pass/constant_folding_dequantize.cpp b/ngraph/core/src/pass/constant_folding_dequantize.cpp
deleted file mode 100644 (file)
index 83f6aab..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-//*****************************************************************************
-// Copyright 2017-2020 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//*****************************************************************************
-
-#include "constant_folding.hpp"
-#include "ngraph/log.hpp"
-#include "ngraph/op/dequantize.hpp"
-#include "ngraph/runtime/reference/dequantize.hpp"
-
-NGRAPH_SUPPRESS_DEPRECATED_START
-
-using namespace std;
-using namespace ngraph;
-
-template <class QUANT, class REAL>
-shared_ptr<op::Constant> fold_constant_dequantize(shared_ptr<op::Constant> constant,
-                                                  shared_ptr<op::Dequantize> dequant,
-                                                  shared_ptr<op::Constant> scale,
-                                                  shared_ptr<op::Constant> offset)
-{
-    const Shape& out_shape = constant->get_shape();
-    runtime::AlignedBuffer buffer(shape_size(out_shape) * sizeof(REAL));
-    REAL* data_ptr = buffer.get_ptr<REAL>();
-
-    runtime::reference::dequantize<QUANT, REAL>(constant->get_data_ptr<QUANT>(),
-                                                scale->get_data_ptr<REAL>(),
-                                                offset->get_data_ptr<QUANT>(),
-                                                data_ptr,
-                                                constant->get_shape(),
-                                                scale->get_shape(),
-                                                dequant->get_axes());
-
-    return make_shared<op::Constant>(dequant->get_element_type(), out_shape, data_ptr);
-}
-
-void pass::ConstantFolding::construct_constant_dequantize()
-{
-    auto constant_label =
-        make_shared<pattern::op::Label>(element::u8, Shape{2}, pattern::has_class<op::Constant>());
-    auto dq_scale = op::Constant::create(element::f32, Shape{}, {1});
-    auto dq_offset = op::Constant::create(element::u8, Shape{}, {1});
-    auto dequant_op =
-        make_shared<op::Dequantize>(constant_label, dq_scale, dq_offset, element::f32, AxisSet{});
-    auto dequant = make_shared<pattern::op::Label>(dequant_op, nullptr, NodeVector{dequant_op});
-
-    auto constant_dequantize_callback = [this, constant_label, dequant](pattern::Matcher& m) {
-        NGRAPH_DEBUG << "In callback for constant_dequantize_callback against node = "
-                     << m.get_match_root()->get_name();
-
-        auto pattern_map = m.get_pattern_map();
-
-        auto constant_match = as_type_ptr<op::Constant>(pattern_map[constant_label]);
-        auto dequant_match = pattern_map[dequant];
-        auto dequantize_op = as_type_ptr<op::Dequantize>(dequant_match);
-
-        if (cf_is_disabled(dequantize_op))
-            return false;
-
-        auto scale = as_type_ptr<op::Constant>(dequant_match->input_value(1).get_node_shared_ptr());
-        auto offset =
-            as_type_ptr<op::Constant>(dequant_match->input_value(2).get_node_shared_ptr());
-
-        NGRAPH_CHECK(revalidate_and_ensure_static(dequantize_op));
-        auto type = constant_match->get_element_type();
-
-        if (dequant_match->get_element_type() != element::f32)
-        {
-            return false;
-        }
-
-        if (type == element::u8)
-        {
-            auto const_node = fold_constant_dequantize<uint8_t, float>(
-                constant_match, dequantize_op, scale, offset);
-            const_node->set_friendly_name(m.get_match_root()->get_friendly_name());
-            replace_node(m.get_match_root(), const_node);
-            copy_runtime_info_to_target_inputs(m.get_match_root(), const_node);
-            return true;
-        }
-        else if (type == element::i8)
-        {
-            auto const_node = fold_constant_dequantize<int8_t, float>(
-                constant_match, dequantize_op, scale, offset);
-            const_node->set_friendly_name(m.get_match_root()->get_friendly_name());
-            replace_node(m.get_match_root(), const_node);
-            copy_runtime_info_to_target_inputs(m.get_match_root(), const_node);
-            return true;
-        }
-
-        return false;
-    };
-
-    auto dequantize_matcher =
-        make_shared<pattern::Matcher>(dequant, "ConstantFolding.ConstantDequantize");
-    NGRAPH_SUPPRESS_DEPRECATED_START
-    this->add_matcher(
-        dequantize_matcher, constant_dequantize_callback, PassProperty::CHANGE_DYNAMIC_STATE);
-    NGRAPH_SUPPRESS_DEPRECATED_END
-}
index 6573839..cbe0c49 100644 (file)
@@ -21,7 +21,6 @@
 #include "ngraph/axis_set.hpp"
 #include "ngraph/builder/make_constant.hpp"
 #include "ngraph/op/convert.hpp"
-#include "ngraph/op/dequantize.hpp"
 #include "ngraph/shape.hpp"
 #include "ngraph/validation_util.hpp"
 #include "onnx_import/core/null_node.hpp"
index 67cdbe3..fcfbe27 100644 (file)
@@ -114,7 +114,6 @@ set(SRC
     type_prop/deformable_convolution.cpp
     type_prop/deformable_psroi_pooling.cpp
     type_prop/depth_to_space.cpp
-    type_prop/dequantize.cpp
     type_prop/dot.cpp
     type_prop/dyn_reshape.cpp
     type_prop/strided_slice.cpp
index c90143f..0da1e80 100644 (file)
@@ -61,37 +61,6 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize)
     test_case.run();
 }
 
-NGRAPH_TEST(${BACKEND_NAME}, dequantize)
-{
-    Shape input_shape{4, 3};
-    Shape scale_offset_shape;
-    AxisSet quantization_axes;
-
-    auto input_type = element::u8;
-    auto output_type = element::f32;
-
-    typedef uint8_t input_c_type;
-    typedef float output_c_type;
-
-    auto X = make_shared<op::Parameter>(input_type, input_shape);
-    auto scale = op::Constant::create(output_type, scale_offset_shape, {2});
-    auto offset = op::Constant::create(input_type, scale_offset_shape, {1});
-    auto dequantize = make_shared<op::Dequantize>(X, scale, offset, output_type, quantization_axes);
-    auto f = make_shared<Function>(dequantize, ParameterVector{X});
-
-    std::vector<input_c_type> x{{1, 1, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7}};
-    // minus offset                   1  1  1  1  1  1  1  1  1  1  1  1
-    // eqauls                         0  0  1  2  2  2  3  4  4  4  5  6
-    // multiplied by scale            2  2  2  2  2  2  2  2  2  2  2  2
-    // equals                         0  0  2  4  4  4  6  8  8  8 10 12
-
-    auto test_case = test::TestCase<TestEngine>(f);
-    test_case.add_input<input_c_type>({x});
-    test_case.add_expected_output<output_c_type>(input_shape,
-                                                 {0, 0, 2, 4, 4, 4, 6, 8, 8, 8, 10, 12});
-    test_case.run(MIN_FLOAT_TOLERANCE_BITS);
-}
-
 NGRAPH_TEST(${BACKEND_NAME}, quantize_zero_offset)
 {
     Shape input_shape{4, 3};
@@ -125,37 +94,6 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_zero_offset)
     test_case.run();
 }
 
-NGRAPH_TEST(${BACKEND_NAME}, dequantize_zero_offset)
-{
-    Shape input_shape{4, 3};
-    Shape scale_offset_shape;
-    AxisSet quantization_axes;
-
-    auto input_type = element::u8;
-    auto output_type = element::f32;
-
-    typedef uint8_t input_c_type;
-    typedef float output_c_type;
-
-    auto X = make_shared<op::Parameter>(input_type, input_shape);
-    auto scale = op::Constant::create(output_type, scale_offset_shape, {2});
-    auto offset = op::Constant::create(input_type, scale_offset_shape, {0});
-    auto dequantize = make_shared<op::Dequantize>(X, scale, offset, output_type, quantization_axes);
-    auto f = make_shared<Function>(dequantize, ParameterVector{X});
-
-    std::vector<input_c_type> x{0, 0, 1, 2, 2, 2, 3, 4, 4, 4, 5, 6};
-    // minus offset                   0  0  0  0  0  0  0  0  0  0  0  0
-    // equals                         0  0  1  2  2  2  3  4  4  4  5  6
-    // multiplied by scale            2  2  2  2  2  2  2  2  2  2  2  2
-    // equals                         0  0  2  4  4  4  6  8  8  8 10 12
-
-    auto test_case = test::TestCase<TestEngine>(f);
-    test_case.add_input<input_c_type>({x});
-    test_case.add_expected_output<output_c_type>(input_shape,
-                                                 {0, 0, 2, 4, 4, 4, 6, 8, 8, 8, 10, 12});
-    test_case.run(MIN_FLOAT_TOLERANCE_BITS);
-}
-
 NGRAPH_TEST(${BACKEND_NAME}, quantize_axes)
 {
     Shape input_shape{4, 3};
@@ -190,37 +128,6 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_axes)
     test_case.run();
 }
 
-NGRAPH_TEST(${BACKEND_NAME}, dequantize_axes)
-{
-    Shape input_shape{4, 3};
-    Shape scale_offset_shape{4};
-    AxisSet quantization_axes{0};
-
-    auto input_type = element::u8;
-    auto output_type = element::f32;
-
-    typedef uint8_t input_c_type;
-    typedef float output_c_type;
-
-    auto X = make_shared<op::Parameter>(input_type, input_shape);
-    auto scale = op::Constant::create(output_type, scale_offset_shape, {2, 3, 4, 5});
-    auto offset = op::Constant::create(input_type, scale_offset_shape, {10, 20, 30, 40});
-    auto dequantize = make_shared<op::Dequantize>(X, scale, offset, output_type, quantization_axes);
-    auto f = make_shared<Function>(dequantize, ParameterVector{X});
-
-    std::vector<input_c_type> x{10, 11, 11, 21, 21, 22, 32, 32, 32, 42, 42, 42};
-    // minus offset                   10  10  10  20  20  20  30  30  30  40  40  40
-    // equals                          0   1   1   1   1   2   2   2   2   2   2   2
-    // multiplied by scale             2   2   2   3   3   3   4   4   4   5   5   5
-    // equals                          0   2   2   3   3   6   8   8   8  10  10  10
-
-    auto test_case = test::TestCase<TestEngine>(f);
-    test_case.add_input<input_c_type>({x});
-    test_case.add_expected_output<output_c_type>(input_shape,
-                                                 {0, 2, 2, 3, 3, 6, 8, 8, 8, 10, 10, 10});
-    test_case.run(MIN_FLOAT_TOLERANCE_BITS);
-}
-
 NGRAPH_TEST(${BACKEND_NAME}, quantize_int8)
 {
     Shape input_shape{4, 3};
@@ -255,37 +162,6 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_int8)
     test_case.run();
 }
 
-NGRAPH_TEST(${BACKEND_NAME}, dequantize_int8)
-{
-    Shape input_shape{4, 3};
-    Shape scale_offset_shape;
-    AxisSet quantization_axes;
-
-    auto input_type = element::i8;
-    auto output_type = element::f32;
-
-    typedef int8_t input_c_type;
-    typedef float output_c_type;
-
-    auto X = make_shared<op::Parameter>(input_type, input_shape);
-    auto scale = op::Constant::create(output_type, scale_offset_shape, {2});
-    auto offset = op::Constant::create(input_type, scale_offset_shape, {1});
-    auto dequantize = make_shared<op::Dequantize>(X, scale, offset, output_type, quantization_axes);
-    auto f = make_shared<Function>(dequantize, ParameterVector{X});
-
-    std::vector<input_c_type> x{1, 1, 2, -1, 3, -1, 4, -3, 5, -3, 6, -5};
-    // minus offset                   1  1  1   1  1   1  1   1  1   1  1   1
-    // equals                         0  0  1  -2  2  -2  3  -4  4  -4  5  -6
-    // multiplied by scale            2  2  2   2  2   2  2   2  2   2  2   2
-    // equals                         0  0  2  -4  4  -4  6  -8  8  -8 10 -12
-
-    auto test_case = test::TestCase<TestEngine>(f);
-    test_case.add_input<input_c_type>({x});
-    test_case.add_expected_output<output_c_type>(input_shape,
-                                                 {0, 0, 2, -4, 4, -4, 6, -8, 8, -8, 10, -12});
-    test_case.run(MIN_FLOAT_TOLERANCE_BITS);
-}
-
 NGRAPH_TEST(${BACKEND_NAME}, quantize_int8_zero_offset)
 {
     Shape input_shape{4, 3};
@@ -320,37 +196,6 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_int8_zero_offset)
     test_case.run();
 }
 
-NGRAPH_TEST(${BACKEND_NAME}, dequantize_int8_zero_offset)
-{
-    Shape input_shape{4, 3};
-    Shape scale_offset_shape;
-    AxisSet quantization_axes;
-
-    auto input_type = element::i8;
-    auto output_type = element::f32;
-
-    typedef int8_t input_c_type;
-    typedef float output_c_type;
-
-    auto X = make_shared<op::Parameter>(input_type, input_shape);
-    auto scale = op::Constant::create(output_type, scale_offset_shape, {2});
-    auto offset = op::Constant::create(input_type, scale_offset_shape, {0});
-    auto dequantize = make_shared<op::Dequantize>(X, scale, offset, output_type, quantization_axes);
-    auto f = make_shared<Function>(dequantize, ParameterVector{X});
-
-    std::vector<input_c_type> x{0, 0, 1, -2, 2, -2, 3, -4, 4, -4, 5, -6};
-    // minus offset                   0  0  0   0  0   0  0   0  0   0  0   0
-    // equals                         0  0  1  -2  2  -2  3  -4  4  -4  5  -6
-    // multiplied by scale            2  2  2   2  2   2  2   2  2   2  2   2
-    // equals                         0  0  2  -4  4  -4  6  -8  8  -8 10 -12
-
-    auto test_case = test::TestCase<TestEngine>(f);
-    test_case.add_input<input_c_type>({x});
-    test_case.add_expected_output<output_c_type>(input_shape,
-                                                 {0, 0, 2, -4, 4, -4, 6, -8, 8, -8, 10, -12});
-    test_case.run(MIN_FLOAT_TOLERANCE_BITS);
-}
-
 NGRAPH_TEST(${BACKEND_NAME}, quantize_int32)
 {
     Shape input_shape{4, 3};
@@ -385,37 +230,6 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_int32)
     test_case.run();
 }
 
-NGRAPH_TEST(${BACKEND_NAME}, dequantize_int32)
-{
-    Shape input_shape{4, 3};
-    Shape scale_offset_shape;
-    AxisSet quantization_axes;
-
-    auto input_type = element::i32;
-    auto output_type = element::f32;
-
-    typedef int32_t input_c_type;
-    typedef float output_c_type;
-
-    auto X = make_shared<op::Parameter>(input_type, input_shape);
-    auto scale = op::Constant::create(output_type, scale_offset_shape, {2});
-    auto offset = op::Constant::create(input_type, scale_offset_shape, {1});
-    auto dequantize = make_shared<op::Dequantize>(X, scale, offset, output_type, quantization_axes);
-    auto f = make_shared<Function>(dequantize, ParameterVector{X});
-
-    std::vector<input_c_type> x{1, 1, 2, -1, 3, -1, 4, -3, 5, -3, 6, -5};
-    // minus offset                   1  1  1   1  1   1  1   1  1   1  1   1
-    // equals                         0  0  1  -2  2  -2  3  -4  4  -4  5  -6
-    // multiplied by scale            2  2  2   2  2   2  2   2  2   2  2   2
-    // equals                         0  0  2  -4  4  -4  6  -8  8  -8 10 -12
-
-    auto test_case = test::TestCase<TestEngine>(f);
-    test_case.add_input<input_c_type>({x});
-    test_case.add_expected_output<output_c_type>(input_shape,
-                                                 {0, 0, 2, -4, 4, -4, 6, -8, 8, -8, 10, -12});
-    test_case.run(MIN_FLOAT_TOLERANCE_BITS);
-}
-
 NGRAPH_TEST(${BACKEND_NAME}, quantize_int32_zero_offset)
 {
     Shape input_shape{4, 3};
@@ -450,37 +264,6 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_int32_zero_offset)
     test_case.run();
 }
 
-NGRAPH_TEST(${BACKEND_NAME}, dequantize_int32_zero_offset)
-{
-    Shape input_shape{4, 3};
-    Shape scale_offset_shape;
-    AxisSet quantization_axes;
-
-    auto input_type = element::i32;
-    auto output_type = element::f32;
-
-    typedef int32_t input_c_type;
-    typedef float output_c_type;
-
-    auto X = make_shared<op::Parameter>(input_type, input_shape);
-    auto scale = op::Constant::create(output_type, scale_offset_shape, {2});
-    auto offset = op::Constant::create(input_type, scale_offset_shape, {0});
-    auto dequantize = make_shared<op::Dequantize>(X, scale, offset, output_type, quantization_axes);
-    auto f = make_shared<Function>(dequantize, ParameterVector{X});
-
-    std::vector<input_c_type> x{0, 0, 1, -2, 2, -2, 3, -4, 4, -4, 5, -6};
-    // minus offset                   0  0  0   0  0   0  0   0  0   0  0   0
-    // equals                         0  0  1  -2  2  -2  3  -4  4  -4  5  -6
-    // multiplied by scale            2  2  2   2  2   2  2   2  2   2  2   2
-    // equals                         0  0  2  -4  4  -4  6  -8  8  -8 10 -12
-
-    auto test_case = test::TestCase<TestEngine>(f);
-    test_case.add_input<input_c_type>({x});
-    test_case.add_expected_output<output_c_type>(input_shape,
-                                                 {0, 0, 2, -4, 4, -4, 6, -8, 8, -8, 10, -12});
-    test_case.run(MIN_FLOAT_TOLERANCE_BITS);
-}
-
 NGRAPH_TEST(${BACKEND_NAME}, quantize_clamp_uint8)
 {
     Shape input_shape{4, 3};
@@ -878,37 +661,6 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_DOWN)
     test_case.run();
 }
 
-NGRAPH_TEST(${BACKEND_NAME}, dequantize_dynamic_offset)
-{
-    Shape input_shape{4};
-    Shape scale_offset_shape = {};
-    AxisSet quantization_axes;
-
-    auto input_type = element::u8;
-    auto output_type = element::f32;
-
-    typedef uint8_t input_c_type;
-    typedef float output_c_type;
-
-    auto X = make_shared<op::Parameter>(input_type, input_shape);
-    auto scale = make_shared<op::Parameter>(output_type, scale_offset_shape);
-    auto offset = make_shared<op::Parameter>(input_type, scale_offset_shape);
-    auto dequantize = make_shared<op::Dequantize>(X, scale, offset, output_type, quantization_axes);
-    auto f = make_shared<Function>(dequantize, ParameterVector{X, scale, offset});
-
-    std::vector<input_c_type> x{0, 3, 128, 255};
-    std::vector<output_c_type> Scale{2};
-    std::vector<input_c_type> Offset{128};
-
-    auto test_case = test::TestCase<TestEngine>(f);
-    test_case.add_input<input_c_type>({x});
-    test_case.add_input<output_c_type>({Scale});
-    test_case.add_input<input_c_type>({Offset});
-
-    test_case.add_expected_output<output_c_type>(input_shape, {-256.0f, -250.0f, 0.0f, 254.0f});
-    test_case.run(MIN_FLOAT_TOLERANCE_BITS);
-}
-
 NGRAPH_TEST(${BACKEND_NAME}, quantize_dynamic_offset)
 {
     Shape input_shape{4, 3};
index 614bf74..577bc0b 100644 (file)
@@ -512,42 +512,6 @@ TEST(constant_folding, constant_unary_binary)
     ASSERT_NO_THROW(pass_manager.run_passes(func_error));
 }
 
-TEST(constant_folding, const_dequantize)
-{
-    Shape input_shape{12};
-    Shape scale_offset_shape;
-    AxisSet quantization_axes;
-
-    auto quant_type = element::u8;
-    auto output_type = element::f32;
-    typedef float output_c_type;
-
-    vector<uint8_t> values_in{1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7};
-    auto constant = op::Constant::create(quant_type, input_shape, values_in);
-    auto scale = op::Constant::create(output_type, scale_offset_shape, {2});
-    auto offset = op::Constant::create(quant_type, scale_offset_shape, {1});
-    auto dequantize =
-        make_shared<op::Dequantize>(constant, scale, offset, output_type, quantization_axes);
-    dequantize->set_friendly_name("test");
-    auto f = make_shared<Function>(dequantize, ParameterVector{});
-
-    pass::Manager pass_manager;
-    pass_manager.register_pass<pass::ConstantFolding>();
-    pass_manager.run_passes(f);
-
-    ASSERT_EQ(count_ops_of_type<op::Dequantize>(f), 0);
-    ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
-
-    auto new_const =
-        as_type_ptr<op::Constant>(f->get_results().at(0)->input_value(0).get_node_shared_ptr());
-    ASSERT_TRUE(new_const);
-    ASSERT_EQ(new_const->get_friendly_name(), "test");
-    auto values_out = new_const->get_vector<output_c_type>();
-
-    vector<output_c_type> values_dequantize{0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12};
-    ASSERT_EQ(values_dequantize, values_out);
-}
-
 TEST(constant_folding, const_quantize)
 {
     Shape input_shape{12};
index fe64a5f..b8cc9ae 100644 (file)
@@ -209,15 +209,6 @@ namespace
         EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
     }
 
-    void op_is_Dequantize()
-    {
-        op::Dequantize node;
-        EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
-        EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
-        EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
-        EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
-    }
-
     void op_is_Divide()
     {
         op::Divide node;
index 9b54f73..ed4a961 100644 (file)
@@ -177,8 +177,7 @@ bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::
 
         // get op type
         element::Type type;
-        if (is_type<op::Convert>(op) || is_type<op::Quantize>(op) || is_type<op::Dequantize>(op) ||
-            is_type<op::PriorBox>(op))
+        if (is_type<op::Convert>(op) || is_type<op::Quantize>(op) || is_type<op::PriorBox>(op))
         {
             type = op->get_input_element_type(0);
         }
index b3b3550..9e473a3 100644 (file)
@@ -46,7 +46,6 @@
 #include "ngraph/runtime/reference/ctc_greedy_decoder.hpp"
 #include "ngraph/runtime/reference/ctc_loss.hpp"
 #include "ngraph/runtime/reference/cum_sum.hpp"
-#include "ngraph/runtime/reference/dequantize.hpp"
 #include "ngraph/runtime/reference/detection_output.hpp"
 #include "ngraph/runtime/reference/dot.hpp"
 #include "ngraph/runtime/reference/elu.hpp"
@@ -474,40 +473,6 @@ protected:
             }
             break;
         }
-        case OP_TYPEID::Dequantize:
-        {
-            const op::Dequantize* dequantize = static_cast<const op::Dequantize*>(&node);
-            auto type = dequantize->get_element_type();
-
-            if (type == element::f32)
-            {
-                reference::dequantize<T>(args[0]->get_data_ptr<const T>(),
-                                         args[1]->get_data_ptr<const float>(),
-                                         args[2]->get_data_ptr<const T>(),
-                                         out[0]->get_data_ptr<float>(),
-                                         node.get_input_shape(0),
-                                         node.get_input_shape(1),
-                                         dequantize->get_axes());
-            }
-            else if (type == element::f64)
-            {
-                reference::dequantize<T>(args[0]->get_data_ptr<const T>(),
-                                         args[1]->get_data_ptr<const double>(),
-                                         args[2]->get_data_ptr<const T>(),
-                                         out[0]->get_data_ptr<double>(),
-                                         node.get_input_shape(0),
-                                         node.get_input_shape(1),
-                                         dequantize->get_axes());
-            }
-            else
-            {
-                std::stringstream ss;
-                ss << "unsupported element type " << type << " op Dequantize";
-                throw std::runtime_error(ss.str());
-            }
-
-            break;
-        }
         case OP_TYPEID::Dot:
         {
             const op::Dot* dot = static_cast<const op::Dot*>(&node);
index abf6a58..6c2ff78 100644 (file)
@@ -70,7 +70,6 @@ NGRAPH_OP(Cos, ngraph::op)
 NGRAPH_OP(Cosh, ngraph::op)
 NGRAPH_OP(CumSum, ngraph::op::v0)
 NGRAPH_OP(DepthToSpace, ngraph::op)
-NGRAPH_OP(Dequantize, ngraph::op)
 NGRAPH_OP(Divide, ngraph::op)
 NGRAPH_OP(Dot, ngraph::op)
 NGRAPH_OP(Elu, ngraph::op)
diff --git a/ngraph/test/type_prop/dequantize.cpp b/ngraph/test/type_prop/dequantize.cpp
deleted file mode 100644 (file)
index 2c55ddb..0000000
+++ /dev/null
@@ -1,724 +0,0 @@
-//*****************************************************************************
-// Copyright 2017-2020 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//*****************************************************************************
-
-#include "gtest/gtest.h"
-#include "ngraph/ngraph.hpp"
-#include "util/type_prop.hpp"
-
-NGRAPH_SUPPRESS_DEPRECATED_START
-
-using namespace std;
-using namespace ngraph;
-
-TEST(type_prop, dequantize_f32_from_i8_nchw_per_channel_ok)
-{
-    Shape batch_shape{64, 3, 480, 640};
-    Shape scale_shape{3};
-    Shape zero_point_shape{3};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{1};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-    auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-
-    ASSERT_EQ(quant->get_output_element_type(0), unquantized_type);
-    ASSERT_EQ(quant->get_output_shape(0), batch_shape);
-}
-
-TEST(type_prop, dequantize_f32_from_i8_nchw_per_image_ok)
-{
-    Shape batch_shape{64, 3, 480, 640};
-    Shape scale_shape{64};
-    Shape zero_point_shape{64};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{0};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-    auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-
-    ASSERT_EQ(quant->get_output_element_type(0), unquantized_type);
-    ASSERT_EQ(quant->get_output_shape(0), batch_shape);
-}
-
-TEST(type_prop, dequantize_f32_from_i8_nchw_per_row_ok)
-{
-    Shape batch_shape{64, 3, 480, 640};
-    Shape scale_shape{480};
-    Shape zero_point_shape{480};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{2};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-    auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-
-    ASSERT_EQ(quant->get_output_element_type(0), unquantized_type);
-    ASSERT_EQ(quant->get_output_shape(0), batch_shape);
-}
-
-TEST(type_prop, dequantize_f32_from_i8_nchw_per_image_channel_ok)
-{
-    Shape batch_shape{64, 3, 480, 640};
-    Shape scale_shape{64, 3};
-    Shape zero_point_shape{64, 3};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{0, 1};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-    auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-
-    ASSERT_EQ(quant->get_output_element_type(0), unquantized_type);
-    ASSERT_EQ(quant->get_output_shape(0), batch_shape);
-}
-
-TEST(type_prop, dequantize_f32_from_i8_nchw_whole_batch_ok)
-{
-    Shape batch_shape{64, 3, 480, 640};
-    Shape scale_shape{};
-    Shape zero_point_shape{};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-    auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-
-    ASSERT_EQ(quant->get_output_element_type(0), unquantized_type);
-    ASSERT_EQ(quant->get_output_shape(0), batch_shape);
-}
-
-TEST(type_prop, dequantize_f64_from_i8_ok)
-{
-    Shape batch_shape{64, 3, 480, 640};
-    Shape scale_shape{};
-    Shape zero_point_shape{};
-    element::Type unquantized_type = element::f64;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-    auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-
-    ASSERT_EQ(quant->get_output_element_type(0), unquantized_type);
-    ASSERT_EQ(quant->get_output_shape(0), batch_shape);
-}
-
-TEST(type_prop, dequantize_f64_to_u8_ok)
-{
-    Shape batch_shape{64, 3, 480, 640};
-    Shape scale_shape{};
-    Shape zero_point_shape{};
-    element::Type unquantized_type = element::f64;
-    element::Type quantized_type = element::u8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-    auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-
-    ASSERT_EQ(quant->get_output_element_type(0), unquantized_type);
-    ASSERT_EQ(quant->get_output_shape(0), batch_shape);
-}
-
-TEST(type_prop, dequantize_i8_from_u8_fails)
-{
-    Shape batch_shape{64, 3, 480, 640};
-    Shape scale_shape{};
-    Shape zero_point_shape{};
-    element::Type unquantized_type = element::i8;
-    element::Type quantized_type = element::u8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-
-    try
-    {
-        auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-        FAIL() << "Attempt to dequantize to non-floating point type not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(error.what(),
-                             "Output element type (i8) must be a floating point type");
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, dequantize_f32_from_f32_fails)
-{
-    Shape batch_shape{64, 3, 480, 640};
-    Shape scale_shape{};
-    Shape zero_point_shape{};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::f32;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-
-    try
-    {
-        auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-        FAIL() << "Attempt to dequantize from non-quantized type not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(error.what(),
-                             "Zero point / input element type (f32) must be a quantized type");
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, dequantize_batch_zero_point_type_mismatch_fails)
-{
-    Shape batch_shape{64, 3, 480, 640};
-    Shape scale_shape{};
-    Shape zero_point_shape{};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = element::u8;
-    AxisSet axes{};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-
-    try
-    {
-        auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-        FAIL() << "Mismatch of batch and zero point element types not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(error.what(),
-                             "Zero point element type (u8) must match input element type (i8)");
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, dequantize_scale_type_mismatch_fails)
-{
-    Shape batch_shape{64, 3, 480, 640};
-    Shape scale_shape{};
-    Shape zero_point_shape{};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = element::f64;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-
-    try
-    {
-        auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-        FAIL() << "Mismatch of scale element type with scale argument not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(error.what(),
-                             "Scale element type (f64) must match output element type (f32)");
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, dequantize_oob_axis_fails)
-{
-    Shape batch_shape{64, 3, 480, 640};
-    Shape scale_shape{320};
-    Shape zero_point_shape{320};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{3, 4};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-
-    try
-    {
-        auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-        FAIL() << "Out-of-bounds quantization axis not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(error.what(),
-                             "Quantization axis (4) must be less than input shape rank (4)");
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, dequantize_scale_shape_mismatch_same_rank_fails)
-{
-    Shape batch_shape{64, 3, 480, 640};
-    Shape scale_shape{64, 4};
-    Shape zero_point_shape{64, 3};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{0, 1};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-
-    try
-    {
-        auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-        FAIL() << "Mismatch of scale argument shape with required shape not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(error.what(),
-                             "Scale shape ({64,4}) and zero point shape ({64,3}) must match");
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, dequantize_scale_shape_mismatch_different_rank_fails)
-{
-    Shape batch_shape{64, 3, 480, 640};
-    Shape scale_shape{64, 3, 2};
-    Shape zero_point_shape{64, 3};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{0, 1};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-
-    try
-    {
-        auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-        FAIL() << "Mismatch of scale argument shape with required shape not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(error.what(),
-                             "Scale shape ({64,3,2}) and zero point shape ({64,3}) must match");
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, dequantize_zero_point_shape_mismatch_same_rank_fails)
-{
-    Shape batch_shape{64, 3, 480, 640};
-    Shape scale_shape{64, 3};
-    Shape zero_point_shape{64, 4};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{0, 1};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-
-    try
-    {
-        auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-        FAIL() << "Mismatch of zero point argument shape with required shape not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(error.what(),
-                             "Scale shape ({64,3}) and zero point shape ({64,4}) must match");
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, dequantize_zero_point_shape_mismatch_different_rank_fails)
-{
-    Shape batch_shape{64, 3, 480, 640};
-    Shape scale_shape{64, 3};
-    Shape zero_point_shape{64, 3, 2};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{0, 1};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-
-    try
-    {
-        auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-        FAIL() << "Mismatch of zero point argument shape with required shape not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(error.what(),
-                             "Scale shape ({64,3}) and zero point shape ({64,3,2}) must match");
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, dequantize_partial_all_rank_dynamic_ok)
-{
-    PartialShape batch_shape{PartialShape::dynamic()};
-    PartialShape scale_shape{PartialShape::dynamic()};
-    PartialShape zero_point_shape{PartialShape::dynamic()};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{0, 1, 2000};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-    auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-
-    ASSERT_EQ(quant->get_output_element_type(0), unquantized_type);
-    ASSERT_TRUE(quant->get_output_partial_shape(0).rank().is_dynamic());
-}
-
-TEST(type_prop,
-     dequantize_partial_input_rank_dynamic_scale_rank_static_dynamic_zero_point_rank_dynamic_ok)
-{
-    PartialShape batch_shape{PartialShape::dynamic()};
-    PartialShape scale_shape{64, Dimension::dynamic(), 96};
-    PartialShape zero_point_shape{PartialShape::dynamic()};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{0, 1, 2000};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-    auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-
-    ASSERT_EQ(quant->get_output_element_type(0), unquantized_type);
-    ASSERT_TRUE(quant->get_output_partial_shape(0).rank().is_dynamic());
-}
-
-TEST(
-    type_prop,
-    dequantize_partial_input_rank_dynamic_scale_rank_static_dynamic_zero_point_rank_dynamic_axis_count_inconsistent)
-{
-    PartialShape batch_shape{PartialShape::dynamic()};
-    PartialShape scale_shape{64, Dimension::dynamic(), 96};
-    PartialShape zero_point_shape{PartialShape::dynamic()};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{0, 1};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-
-    try
-    {
-        auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-        FAIL() << "Mismatch of scale / zero point rank with axis count not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(
-            error.what(),
-            "Scale / zero point rank (3) does not match the number of quantization axes (2)");
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(
-    type_prop,
-    dequantize_partial_input_rank_dynamic_scale_rank_static_dynamic_zero_point_rank_static_dynamic_ok)
-{
-    PartialShape batch_shape{PartialShape::dynamic()};
-    PartialShape scale_shape{64, Dimension::dynamic(), 96, Dimension::dynamic()};
-    PartialShape zero_point_shape{64, 22, Dimension::dynamic(), Dimension::dynamic()};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{0, 1, 5, 88};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-    auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-
-    ASSERT_EQ(quant->get_output_element_type(0), unquantized_type);
-    ASSERT_TRUE(quant->get_output_partial_shape(0).rank().is_dynamic());
-}
-
-TEST(
-    type_prop,
-    dequantize_partial_input_rank_dynamic_scale_rank_static_dynamic_zero_point_rank_static_dynamic_ranks_inconsistent)
-{
-    PartialShape batch_shape{PartialShape::dynamic()};
-    PartialShape scale_shape{64, Dimension::dynamic(), 96, Dimension::dynamic()};
-    PartialShape zero_point_shape{64, 22, Dimension::dynamic(), Dimension::dynamic(), 3};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{0, 1, 5, 88};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-
-    try
-    {
-        auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-        FAIL() << "Inconsistent scale / zero point ranks not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(
-            error.what(),
-            "Scale shape ({64,?,96,?}) and zero point shape ({64,22,?,?,3}) must match");
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(
-    type_prop,
-    dequantize_partial_input_rank_dynamic_scale_rank_static_dynamic_zero_point_rank_static_dynamic_dims_inconsistent)
-{
-    PartialShape batch_shape{PartialShape::dynamic()};
-    PartialShape scale_shape{64, Dimension::dynamic(), 96, Dimension::dynamic()};
-    PartialShape zero_point_shape{65, 22, Dimension::dynamic(), Dimension::dynamic()};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{0, 1, 5, 88};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-
-    try
-    {
-        auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-        FAIL() << "Inconsistent scale / zero point dims not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(
-            error.what(),
-            "Scale shape ({64,?,96,?}) and zero point shape ({65,22,?,?}) must match");
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(
-    type_prop,
-    dequantize_partial_input_static_rank_dynamic_scale_rank_static_dynamic_zero_point_rank_static_dynamic_ok)
-{
-    PartialShape batch_shape{2, 4, 6, Dimension::dynamic(), 10, Dimension::dynamic()};
-    PartialShape scale_shape{4, Dimension::dynamic(), Dimension::dynamic()};
-    PartialShape zero_point_shape{Dimension::dynamic(), 8, Dimension::dynamic()};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{1, 3, 5};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-    auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-
-    ASSERT_EQ(quant->get_output_element_type(0), unquantized_type);
-    ASSERT_TRUE(quant->get_output_partial_shape(0).same_scheme(
-        PartialShape{2, 4, 6, 8, 10, Dimension::dynamic()}));
-}
-
-TEST(
-    type_prop,
-    dequantize_partial_input_static_rank_dynamic_scale_rank_static_dynamic_zero_point_rank_static_dynamic_axis_oob)
-{
-    PartialShape batch_shape{2, 4, 6, Dimension::dynamic(), 10, Dimension::dynamic()};
-    PartialShape scale_shape{4, Dimension::dynamic(), Dimension::dynamic()};
-    PartialShape zero_point_shape{Dimension::dynamic(), 8, Dimension::dynamic()};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{1, 3, 6};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-
-    try
-    {
-        auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-        FAIL() << "Out-of-bound quantization axis not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(error.what(),
-                             "Quantization axis (6) must be less than input shape rank (6)");
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(
-    type_prop,
-    dequantize_partial_input_static_rank_dynamic_scale_rank_static_dynamic_zero_point_rank_static_dynamic_dims_inconsistent)
-{
-    PartialShape batch_shape{2, 5, 6, Dimension::dynamic(), 10, Dimension::dynamic()};
-    PartialShape scale_shape{4, Dimension::dynamic(), Dimension::dynamic()};
-    PartialShape zero_point_shape{Dimension::dynamic(), 8, Dimension::dynamic()};
-    element::Type unquantized_type = element::f32;
-    element::Type quantized_type = element::i8;
-    element::Type batch_type = quantized_type;
-    element::Type scale_type = unquantized_type;
-    element::Type zero_point_type = quantized_type;
-    AxisSet axes{1, 3, 5};
-
-    auto batch = make_shared<op::Parameter>(batch_type, batch_shape);
-    auto scale = make_shared<op::Parameter>(scale_type, scale_shape);
-    auto zero_point = make_shared<op::Parameter>(zero_point_type, zero_point_shape);
-
-    try
-    {
-        auto quant = make_shared<op::Dequantize>(batch, scale, zero_point, unquantized_type, axes);
-        FAIL() << "Inconsistent dimensions not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(
-            error.what(),
-            "Scale / zero point shape ({4,8,?}) must match input shape ({2,5,6,?,10,?}) "
-            "at the quantization axes (AxisSet{1, 3, 5})");
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}