Remove some builders for old operations (#1736)
authorIlya Churaev <ilya.churaev@intel.com>
Thu, 13 Aug 2020 04:17:24 +0000 (07:17 +0300)
committerGitHub <noreply@github.com>
Thu, 13 Aug 2020 04:17:24 +0000 (07:17 +0300)
* Remove some builders

* Removed reshape v0 builder

* Fixed code style

20 files changed:
ngraph/core/include/ngraph/builder/quantization_utils.hpp [deleted file]
ngraph/core/include/ngraph/builder/quantized_concat_builder.hpp [deleted file]
ngraph/core/include/ngraph/builder/reduce_ops.hpp
ngraph/core/include/ngraph/builder/reshape.hpp
ngraph/core/src/builder/autobroadcast.cpp
ngraph/core/src/builder/matmul_factory.cpp
ngraph/core/src/builder/quantization_utils.cpp [deleted file]
ngraph/core/src/builder/quantized_concat_builder.cpp [deleted file]
ngraph/core/src/builder/reduce_ops.cpp
ngraph/core/src/builder/reshape.cpp
ngraph/core/src/op/fused/grn.cpp
ngraph/core/src/op/fused/gru_cell.cpp
ngraph/core/src/op/fused/lstm_cell.cpp
ngraph/core/src/op/fused/matmul.cpp
ngraph/core/src/op/fused/mvn.cpp
ngraph/core/src/op/fused/rnn_cell.cpp
ngraph/core/src/op/fused/shuffle_channels.cpp
ngraph/test/builder.cpp
ngraph/test/runtime/pass/opset0_downgrade.cpp
ngraph/test/runtime/pass/opset1_upgrade.cpp

diff --git a/ngraph/core/include/ngraph/builder/quantization_utils.hpp b/ngraph/core/include/ngraph/builder/quantization_utils.hpp
deleted file mode 100644 (file)
index f17b18d..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-//*****************************************************************************
-// Copyright 2017-2020 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//*****************************************************************************
-
-#pragma once
-
-#include <limits>
-#include <vector>
-#include "ngraph/builder/make_constant.hpp"
-#include "ngraph/node.hpp"
-#include "ngraph/op/abs.hpp"
-#include "ngraph/op/add.hpp"
-#include "ngraph/op/broadcast.hpp"
-#include "ngraph/op/constant.hpp"
-#include "ngraph/op/divide.hpp"
-#include "ngraph/op/maximum.hpp"
-#include "ngraph/op/minimum.hpp"
-#include "ngraph/op/multiply.hpp"
-#include "ngraph/op/subtract.hpp"
-#include "ngraph/util.hpp"
-
-namespace ngraph
-{
-    namespace builder
-    {
-        namespace quantization_utils
-        {
-            std::shared_ptr<Node> max_abs(const Output<Node>& a, const Output<Node>& b);
-
-            std::shared_ptr<Node> get_scale(const Output<Node>& input_min_range,
-                                            const Output<Node>& input_max_range,
-                                            const ngraph::element::Type& quant_type,
-                                            bool bump_by_eps = false);
-
-            std::shared_ptr<Node> get_bias_scale(Output<Node> min_input,
-                                                 Output<Node> max_input,
-                                                 Output<Node> min_filter,
-                                                 Output<Node> max_filter);
-
-            std::shared_ptr<Node> get_sum_scale(Output<Node> min_freezed_output_conv_1,
-                                                Output<Node> max_freezed_output_conv_1,
-                                                Output<Node> min_freezed_output_conv_2,
-                                                Output<Node> max_freezed_output_conv_2);
-
-            std::shared_ptr<Node> get_dot_scale(Output<Node> min_input,
-                                                Output<Node> max_input,
-                                                Output<Node> min_filter,
-                                                Output<Node> max_filter,
-                                                Output<Node> min_freezed_output,
-                                                Output<Node> max_freezed_output,
-                                                const ngraph::element::Type& input_type,
-                                                const ngraph::element::Type& output_type,
-                                                const bool requantize = true);
-
-            void check_concat(const OutputVector& args,
-                              const OutputVector& mins,
-                              const OutputVector& maxs);
-        }
-    }
-}
diff --git a/ngraph/core/include/ngraph/builder/quantized_concat_builder.hpp b/ngraph/core/include/ngraph/builder/quantized_concat_builder.hpp
deleted file mode 100644 (file)
index 71b2c1f..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-//*****************************************************************************
-// Copyright 2017-2020 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//*****************************************************************************
-
-#pragma once
-
-#include "ngraph/builder/make_constant.hpp"
-#include "ngraph/coordinate_diff.hpp"
-#include "ngraph/node.hpp"
-#include "ngraph/op/concat.hpp"
-#include "ngraph/op/dequantize.hpp"
-#include "ngraph/op/max.hpp"
-#include "ngraph/op/min.hpp"
-#include "ngraph/op/quantize.hpp"
-#include "ngraph/op/reshape.hpp"
-#include "quantization_utils.hpp"
-
-namespace ngraph
-{
-    namespace builder
-    {
-        NGRAPH_API
-        std::shared_ptr<Node> QuantizedConcatBuilder(const OutputVector& args,
-                                                     size_t concatenation_axis,
-                                                     const OutputVector& mins,
-                                                     const OutputVector& maxs);
-    }
-}
index 2fc553e..ded40b0 100644 (file)
@@ -23,120 +23,6 @@ namespace ngraph
 {
     namespace builder
     {
-        // clang-format off
-        /// \brief Sum-based L2 Norm of a Tensor.
-        ///
-        /// Calculates
-        ///
-        /// \f$\left(\sum_{i=1}^{N} x_i^2\right)^{0.5}\f$
-        ///
-        /// Where `i` traverses all of the axes provided in `reduction_axes`
-        ///
-        /// ## Inputs
-        ///
-        /// |                  | Type                              | Description                                          |
-        /// | ---------------- | --------------------------------- | ---------------------------------------------------- |
-        /// | `value`          | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape                         |
-        /// | `reduction_axes` | AxesSet                           | The axes to eliminate through reduction (0 indexed). |
-        ///
-        /// ## Output
-        ///
-        /// | Type                                      | Description                                                                                                      |
-        /// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
-        /// | \f$E[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by reduction. |
-        // clang-format on
-        NGRAPH_API
-        std::shared_ptr<Node> l2_norm(const Output<Node>& value, const AxisSet& reduction_axes);
-
-        // clang-format off
-        /// \brief Sum-based Mean of a Tensor.
-        ///
-        /// Calculates
-        ///
-        /// \f$\sum_{i=1}^{N} \frac{x_i}{N}\f$
-        ///
-        /// Where `i` traverses all of the axes provided in `reduction_axes`
-        ///
-        /// ## Inputs
-        ///
-        /// |                  | Type                              | Description |                                        |
-        /// | ---------------- | --------------------------------- | ---------------------------------------------------- |
-        /// | `node`           | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape                         |
-        /// | `reduction_axes` | AxesSet                           | The axes to eliminate through reduction (0 indexed). |
-        ///
-        /// ## Output
-        ///
-        /// | Type                                      | Description                                                                                                      |
-        /// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
-        /// | \f$E[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by reduction. |
-        // clang-format on
-        NGRAPH_API
-        std::shared_ptr<Node> mean(const Output<Node>& node, const AxisSet& reduction_axes);
-
-        // clang-format off
-        /// \brief Sum-based Standard Deviation of a Tensor.
-        ///
-        /// If bessel_correct is true, calculates
-        ///
-        /// \f$\sqrt{\frac{\sum_{i=1}^{N}\left(x_i-\bar{x}\right)^2}{N-1}}\f$
-        ///
-        /// else, calculates
-        ///
-        /// \f$\sqrt{\frac{\sum_{i=1}^{N}\left(x_i-\bar{x}\right)^2}{N}}\f$
-        ///
-        /// Where `i` traverses all of the axes provided in `reduction_axes` and \f$\bar{x} = \sum_{i=1}^{N} \frac{x_i}{N}\f$
-        ///
-        /// ## Inputs
-        ///
-        /// |                     | Type                              | Description                                                  |
-        /// | ------------------- | --------------------------------- | ------------------------------------------------------------ |
-        /// | `value`             | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape                                 |
-        /// | `reduction_axes`    | AxesSet                           | The axes to eliminate through reduction (0 indexed).         |
-        /// | `bessel_correction` | bool (default = false)            | Enable Bessel's correction to std_dev for Small sample sizes |
-        ///
-        /// ## Output
-        ///
-        /// | Type                                      | Description                                                                                                      |
-        /// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
-        /// | \f$E[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by reduction. |
-        // clang-format on
-        NGRAPH_API
-        std::shared_ptr<Node> std_dev(const Output<Node>& value,
-                                      const AxisSet& reduction_axes,
-                                      const bool bessel_correction = false);
-
-        // clang-format off
-        /// \brief Sum-based Variance of a Tensor.
-        ///
-        /// If bessel_correct is true, calculates
-        ///
-        /// \f$\frac{\sum_{i=1}^{N}\left(x_i-\bar{x}\right)^2}{N-1}\f$
-        ///
-        /// else, calculates
-        ///
-        /// \f$\frac{\sum_{i=1}^{N}\left(x_i-\bar{x}\right)^2}{N}\f$
-        ///
-        /// Where `i` traverses all of the axes provided in `reduction_axes` and \f$\bar{x} = \sum_{i=1}^{N} \frac{x_i}{N}\f$
-        ///
-        /// ## Inputs
-        ///
-        /// |                     | Type                              | Description                                                  |
-        /// | ------------------- | --------------------------------- | ------------------------------------------------------------ |
-        /// | `value              | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape                                 |
-        /// | `reduction_axes`    | AxesSet                           | The axes to eliminate through reduction (0 indexed).         |
-        /// | `bessel_correction` | bool (default = false)            | Enable Bessel's correction to std_dev for Small sample sizes |
-        ///
-        /// ## Output
-        ///
-        /// | Type                                      | Description                                                                                                      |
-        /// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
-        /// | \f$E[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by reduction. |
-        // clang-format on
-        NGRAPH_API
-        std::shared_ptr<Node> variance(const Output<Node>& value,
-                                       const AxisSet& reduction_axes,
-                                       const bool bessel_correction = false);
-
         namespace opset1
         {
             // clang-format off
index 144351f..1c974fe 100644 (file)
@@ -27,77 +27,6 @@ namespace ngraph
 {
     namespace builder
     {
-        /// \brief      Change shape of a value
-        ///
-        /// \param[in]  value  The value to be reshaped.
-        /// \param[in]  shape  The new shape.
-        ///
-        /// \return     The reshaped value.
-        ///
-        NGRAPH_API
-        std::shared_ptr<Node> reshape(const Output<Node>& value, const Shape& shape);
-
-        /// \brief Permute axes according to specified axes_order parameter.
-        ///
-        /// \param value The vlaue whose axes we want to permute.
-        /// \param axes_order The permutation of axes.
-        ///
-        /// \return: Value with permuted axes.
-        std::shared_ptr<Node> reorder_axes(const Output<Node>& value,
-                                           std::vector<size_t> axes_order = {});
-
-        /// \brief Return transposed vlaue (with axes in reversed order).
-        ///
-        /// \param value Value to transpose.
-        ///
-        /// \return: Value with reversed dimensions.
-        std::shared_ptr<Node> transpose(const Output<Node>& value);
-
-        /// \brief Flatten a value into a 2D matrix, with a static dividing axis.
-        ///
-        /// \param value The tensor to be flattened.
-        /// \param axis  The axis dividing shape.
-        ///
-        /// \return The new value will be a 2D matrix representing the flattened input node.
-        NGRAPH_API
-        std::shared_ptr<Node> flatten(const Output<Node>& value, int axis);
-
-        /// \brief      Remove empty axes from input tensor.
-        ///
-        /// \param[in]  value  The value to be squeezed.
-        /// \param[in]  axes   The vector defining indexes of axes to be removed.
-        ///
-        /// \return     The squeezed node.
-        ///
-        NGRAPH_API
-        std::shared_ptr<Node> squeeze(const Output<Node>& value,
-                                      std::vector<std::size_t> axes = {0});
-
-        /// \brief      Collapse specified axes into single one.
-        ///
-        /// \note       Collapsed axes create a continuous range starting from outermost axis.
-        ///
-        /// \param[in]  value       The value to be reshaped.
-        /// \param[in]  start_axis  The start axis index.
-        /// \param[in]  end_axis    The end axis (inclusive) index.
-        ///
-        /// \return     The node with collapsed specified axes.
-        ///
-        std::shared_ptr<Node> collapse(const Output<Node>& value,
-                                       const std::size_t start_axis,
-                                       const std::size_t end_axis);
-
-        /// \brief      Expands node tensor shape with empty axis at
-        ///             specified position.
-        ///
-        /// \param[in]  value  The value to be expanded.
-        /// \param[in]  axis   The position in the expanded axes where the
-        ///                    new axis is placed.
-        ///
-        /// \return     The node with added empty axis.
-        ///
-        std::shared_ptr<Node> expand_dims(const Output<Node>& value, std::size_t axis = 0);
-
         namespace opset1
         {
             /// \brief      Change shape of a value
index 18a6eec..8f91890 100644 (file)
@@ -171,7 +171,7 @@ namespace ngraph
 
             if (squeezed_shape != value.get_shape())
             {
-                broadcasted_node = builder::reshape(value, squeezed_shape);
+                broadcasted_node = builder::opset1::reshape(value, squeezed_shape);
             }
 
             if (!broadcast_axes.empty())
index 10634db..4c27078 100644 (file)
@@ -17,6 +17,7 @@
 #include <cstddef>
 #include <iterator>
 #include <memory>
+#include <numeric>
 
 #include "ngraph/builder/autobroadcast.hpp"
 #include "ngraph/builder/make_constant.hpp"
@@ -55,7 +56,7 @@ static Output<Node> get_sub_matrix(const Output<Node>& node, size_t idx)
 
     auto sub_matrix = Output<Node>{make_shared<op::Slice>(node, lower_bounds, upper_bounds)};
     // Remove first single entry dim.
-    return builder::squeeze(sub_matrix);
+    return builder::opset1::squeeze(sub_matrix);
 }
 
 Output<Node> builder::MatmulFactory::get_left()
@@ -70,6 +71,19 @@ Output<Node> builder::MatmulFactory::get_right()
 
 OutputVector builder::MatmulFactory::make_matmul_op()
 {
+    auto collapse = [](const Output<Node>& value, const size_t start_axis, const size_t end_axis) {
+        auto shape = value.get_shape();
+        size_t collapsed_axis_size = accumulate(next(begin(shape), start_axis),
+                                                next(begin(shape), end_axis + 1),
+                                                1UL,
+                                                multiplies<size_t>());
+
+        Shape output_shape{collapsed_axis_size};
+        output_shape.insert(end(output_shape), next(begin(shape), end_axis + 1), end(shape));
+        return make_shared<op::Reshape>(
+                   value, get_default_order(value.get_shape().size()), output_shape)
+            ->add_provenance_group_members_above({value});
+    };
     auto left = get_left();
     auto right = get_right();
 
@@ -104,11 +118,11 @@ OutputVector builder::MatmulFactory::make_matmul_op()
     // This will make easier further dot product calculations.
     if (left_shape.size() > 3)
     {
-        left = builder::collapse(left, 0, left_shape.size() - 3);
+        left = collapse(left, 0, left_shape.size() - 3);
     }
     if (right_shape.size() > 3)
     {
-        right = builder::collapse(right, 0, right_shape.size() - 3);
+        right = collapse(right, 0, right_shape.size() - 3);
     }
 
     // Perform multiple small dot products
@@ -129,7 +143,7 @@ OutputVector builder::MatmulFactory::make_matmul_op()
 
         // Expand sub_dot result with single empty outermost axis, in order to
         // later concatenate sub_dots at this axis.
-        small_dots.at(g) = builder::expand_dims(sub_dot);
+        small_dots.at(g) = builder::opset1::expand_dims(sub_dot);
     }
 
     // Concatenate sub_dots on groups axis.
diff --git a/ngraph/core/src/builder/quantization_utils.cpp b/ngraph/core/src/builder/quantization_utils.cpp
deleted file mode 100644 (file)
index 2b5c2c1..0000000
+++ /dev/null
@@ -1,204 +0,0 @@
-//*****************************************************************************
-// Copyright 2017-2020 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//*****************************************************************************
-
-#include "quantization_utils.hpp"
-
-namespace ngraph
-{
-    namespace builder
-    {
-        namespace quantization_utils
-        {
-            std::shared_ptr<Node> max_abs(const Output<Node>& a, const Output<Node>& b)
-            {
-                auto abs_a = std::make_shared<op::Abs>(a);
-                auto abs_b = std::make_shared<op::Abs>(b);
-                return std::make_shared<op::Maximum>(abs_a, abs_b)
-                    ->add_provenance_group_members_above({a, b});
-            }
-
-            std::shared_ptr<Node> get_scale(const Output<Node>& input_min_range,
-                                            const Output<Node>& input_max_range,
-                                            const ngraph::element::Type& quant_type,
-                                            bool bump_by_eps)
-            {
-                auto type = input_min_range.get_element_type();
-                if (type != input_max_range.get_element_type())
-                {
-                    throw ngraph_error("get_scale: min and max must have same type");
-                }
-
-                auto shape = input_min_range.get_shape();
-                if (shape != input_max_range.get_shape())
-                {
-                    throw ngraph_error("get_scale: min and max must have same shape");
-                }
-
-                auto min_range = input_min_range;
-                auto max_range = input_max_range;
-
-                if (bump_by_eps)
-                {
-                    auto zero = make_constant(type, shape, 0);
-                    min_range = std::make_shared<op::Minimum>(zero, input_min_range);
-
-                    auto max_abs_input_range = max_abs(input_min_range, input_max_range);
-
-                    auto one = make_constant(type, shape, 1);
-                    auto hundred = make_constant(type, shape, 100);
-                    auto epsilon =
-                        std::make_shared<op::Maximum>(one, max_abs_input_range) / hundred;
-
-                    max_range = std::make_shared<op::Maximum>(input_max_range, min_range + epsilon);
-                    max_range = std::make_shared<op::Maximum>(zero, max_range);
-                }
-
-                size_t bw = quant_type.bitwidth();
-                float range = static_cast<float>(
-                    (quant_type.is_signed() ? std::pow(2, (bw - 1)) : std::pow(2, bw)) - 1);
-
-                auto max_abs_range = max_abs(min_range, max_range);
-                auto target_range = make_constant(type, shape, range);
-
-                return (max_abs_range / target_range)
-                    ->add_provenance_group_members_above({input_min_range, input_max_range});
-            }
-
-            std::shared_ptr<Node> get_bias_scale(Output<Node> min_input,
-                                                 Output<Node> max_input,
-                                                 Output<Node> min_filter,
-                                                 Output<Node> max_filter)
-            {
-                auto type = min_input.get_element_type();
-                if (type != max_input.get_element_type() || type != min_filter.get_element_type() ||
-                    type != max_filter.get_element_type())
-                {
-                    throw ngraph_error("get_bias_scale: min and max must have same type");
-                }
-
-                auto shape = min_input.get_shape();
-                if (shape != max_input.get_shape() || shape != min_filter.get_shape() ||
-                    shape != max_filter.get_shape())
-                {
-                    throw ngraph_error("get_bias_scale: min and max must have same shape");
-                }
-
-                auto max_abs_input_range = max_abs(min_input, max_input);
-                auto max_abs_filter_range = max_abs(min_filter, max_filter);
-                auto range = make_constant(type,
-                                           shape,
-                                           std::numeric_limits<uint8_t>::max() *
-                                               std::numeric_limits<int8_t>::max());
-
-                // Inverting the scale calculation here as the Quantize op passes scale as 1/scale.
-                return (max_abs_input_range * max_abs_filter_range) / range;
-            }
-
-            std::shared_ptr<Node> get_sum_scale(Output<Node> min_freezed_output_conv_1,
-                                                Output<Node> max_freezed_output_conv_1,
-                                                Output<Node> min_freezed_output_conv_2,
-                                                Output<Node> max_freezed_output_conv_2)
-            {
-                auto type = min_freezed_output_conv_1.get_element_type();
-                if (type != max_freezed_output_conv_1.get_element_type() ||
-                    type != min_freezed_output_conv_2.get_element_type() ||
-                    type != max_freezed_output_conv_2.get_element_type())
-                {
-                    throw ngraph_error("get_sum_scale: min and max must have same type");
-                }
-
-                auto shape = min_freezed_output_conv_1.get_shape();
-                if (shape != max_freezed_output_conv_1.get_shape() ||
-                    shape != min_freezed_output_conv_2.get_shape() ||
-                    shape != max_freezed_output_conv_2.get_shape())
-                {
-                    throw ngraph_error("get_sum_scale: min and max must have same shape");
-                }
-
-                auto max_abs_conv_1 = max_abs(min_freezed_output_conv_1, max_freezed_output_conv_1);
-                auto max_abs_conv_2 = max_abs(min_freezed_output_conv_2, max_freezed_output_conv_2);
-                return max_abs_conv_2 / max_abs_conv_1;
-            }
-
-            std::shared_ptr<Node> get_dot_scale(Output<Node> min_input,
-                                                Output<Node> max_input,
-                                                Output<Node> min_filter,
-                                                Output<Node> max_filter,
-                                                Output<Node> min_freezed_output,
-                                                Output<Node> max_freezed_output,
-                                                const ngraph::element::Type& input_type,
-                                                const ngraph::element::Type& output_type,
-                                                const bool requantize)
-            {
-                auto type = min_input.get_element_type();
-                if (type != max_input.get_element_type() || type != min_filter.get_element_type() ||
-                    type != max_filter.get_element_type() ||
-                    type != min_freezed_output.get_element_type() ||
-                    type != max_freezed_output.get_element_type())
-                {
-                    throw ngraph_error("get_dot_scale: min and max must have same type");
-                }
-
-                auto shape = min_input.get_shape();
-                if (shape != max_input.get_shape() || shape != min_filter.get_shape() ||
-                    shape != max_filter.get_shape() || shape != min_freezed_output.get_shape() ||
-                    shape != max_freezed_output.get_shape())
-                {
-                    throw ngraph_error("get_dot_scale: min and max must have same shape");
-                }
-                auto data_scale = get_scale(min_input, max_input, input_type);
-                auto weight_scale = get_scale(min_filter, max_filter, element::i8);
-                auto out_scale = get_scale(min_freezed_output, max_freezed_output, output_type);
-                if (requantize)
-                {
-                    return data_scale * weight_scale / out_scale;
-                }
-                else
-                {
-                    return data_scale * weight_scale;
-                }
-            }
-
-            void check_concat(const OutputVector& args,
-                              const OutputVector& mins,
-                              const OutputVector& maxs)
-            {
-                auto size = args.size();
-                if (size != mins.size() || size != maxs.size())
-                {
-                    throw ngraph_error("Min and Max node vectors must be of same length");
-                }
-                for (size_t i = 0; i < size; i++)
-                {
-                    auto min = mins[i];
-                    auto max = maxs[i];
-                    auto type = min.get_element_type();
-                    if (type != max.get_element_type())
-                    {
-                        throw ngraph_error("check_concat: min and max must have same type");
-                    }
-
-                    if (min.get_shape() != Shape{1} || max.get_shape() != Shape{1})
-                    {
-                        throw ngraph_error("check_concat: min/max shape not Shape{1}: " +
-                                           vector_to_string(min.get_shape()) +
-                                           vector_to_string(max.get_shape()));
-                    }
-                }
-            }
-        }
-    }
-}
diff --git a/ngraph/core/src/builder/quantized_concat_builder.cpp b/ngraph/core/src/builder/quantized_concat_builder.cpp
deleted file mode 100644 (file)
index 97a1916..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-//*****************************************************************************
-// Copyright 2017-2020 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//*****************************************************************************
-
-#include <memory>
-
-#include "ngraph/builder/quantized_concat_builder.hpp"
-
-using namespace std;
-using namespace ngraph;
-
-namespace ngraph
-{
-    namespace builder
-    {
-        shared_ptr<Node> QuantizedConcatBuilder(const OutputVector& args,
-                                                size_t concatenation_axis,
-                                                const OutputVector& mins,
-                                                const OutputVector& maxs)
-        {
-            quantization_utils::check_concat(args, mins, maxs);
-            auto quant_type = args[0].get_element_type();
-
-            // output scale
-            auto min = make_shared<op::Min>(make_shared<op::Concat>(mins, 0), ngraph::AxisSet{0});
-            auto max = make_shared<op::Max>(make_shared<op::Concat>(maxs, 0), ngraph::AxisSet{0});
-            auto out_scale = quantization_utils::get_scale(min, max, quant_type);
-
-            OutputVector rescaled_args(args.size());
-            for (size_t i = 0; i < args.size(); ++i)
-            {
-                auto q_type = args[i].get_element_type();
-                auto in_scale = make_shared<ngraph::op::Reshape>(
-                    quantization_utils::get_scale(mins[i], maxs[i], q_type),
-                    AxisVector{0},
-                    Shape{});
-                auto zero = make_constant(q_type, in_scale->get_output_shape(0), 0);
-
-                rescaled_args[i] =
-                    make_shared<op::Dequantize>(args[i], in_scale, zero, element::f32, AxisSet{});
-                rescaled_args[i] =
-                    make_shared<op::Quantize>(rescaled_args[i],
-                                              out_scale,
-                                              zero,
-                                              q_type,
-                                              AxisSet{},
-                                              op::Quantize::RoundMode::ROUND_NEAREST_TOWARD_EVEN);
-            }
-            OutputVector base = args;
-            for (auto node : mins)
-            {
-                base.push_back(node);
-            };
-            for (auto node : maxs)
-            {
-                base.push_back(node);
-            };
-            return make_shared<op::Concat>(rescaled_args, concatenation_axis)
-                ->add_provenance_group_members_above(base);
-        }
-    }
-}
index 96f8dd2..9c92576 100644 (file)
@@ -56,75 +56,6 @@ namespace ngraph
                 dim_values, ngraph::opset1::Constant::create(element::i64, {}, {0}));
         }
 
-        std::shared_ptr<Node> l2_norm(const Output<Node>& node, const AxisSet& reduction_axes)
-        {
-            auto x2 = node * node;
-            auto x2sum = std::make_shared<op::Sum>(x2, reduction_axes);
-
-            return std::make_shared<op::Sqrt>(x2sum)->add_provenance_group_members_above({node});
-        }
-
-        std::shared_ptr<Node> mean(const Output<Node>& value, const AxisSet& reduction_axes)
-        {
-            auto xsum = std::make_shared<op::Sum>(value, reduction_axes);
-
-            auto N = get_num_elements(value.get_shape(), reduction_axes);
-            const auto& et = value.get_element_type();
-
-            auto divisor = op::Constant::create(et, xsum->get_shape(), {N});
-
-            return (xsum / divisor)->add_provenance_group_members_above({value});
-        }
-
-        std::shared_ptr<Node> std_dev(const Output<Node>& node,
-                                      const AxisSet& reduction_axes,
-                                      const bool bessel_correction)
-        {
-            return std::make_shared<op::Sqrt>(variance(node, reduction_axes, bessel_correction))
-                ->add_provenance_group_members_above({node});
-        }
-
-        // This currently calculates [E[X^2] - E[X]^2] instead of [E[(X-\mu)^2]]
-        // The second might be more numerically stable/easier to pattern match
-        // It also requires adding a broadcast op, and would probably be slower
-        // TODO(mbrookhart): Switch to E[(X-\mu)^2]?
-        std::shared_ptr<Node> variance(const Output<Node>& value,
-                                       const AxisSet& reduction_axes,
-                                       const bool bessel_correction)
-        {
-            std::shared_ptr<Node> mu = mean(value, reduction_axes);
-
-            auto reshape = value.get_shape();
-            for (auto i : reduction_axes)
-            {
-                reshape[i] = 1;
-            }
-
-            ngraph::AxisVector order = ngraph::get_default_order(mu->get_shape());
-
-            mu = std::make_shared<op::Reshape>(mu, order, reshape);
-
-            Output<Node> diff = make_with_numpy_broadcast<op::Subtract>(value, mu);
-
-            diff = std::make_shared<op::Sum>(diff * diff, reduction_axes);
-
-            const auto& et = value.get_element_type();
-            auto N = get_num_elements(value.get_shape(), reduction_axes);
-
-            std::shared_ptr<Node> result;
-            if (bessel_correction)
-            {
-                auto N1const = op::Constant::create(et, diff.get_shape(), {N - 1});
-                result = diff / N1const;
-            }
-            else
-            {
-                auto Nconst = op::Constant::create(et, diff.get_shape(), {N});
-                result = diff / Nconst;
-            }
-            return result->add_provenance_group_members_above({value});
-        }
-
         std::shared_ptr<Node> builder::opset1::mean(const Output<Node>& value,
                                                     const AxisSet& reduction_axes,
                                                     bool keep_dims)
index d0e19fb..26ce7bf 100644 (file)
 using namespace ngraph;
 using namespace std;
 
-shared_ptr<Node> builder::reshape(const Output<Node>& value, const Shape& shape)
-{
-    return make_shared<op::Reshape>(value, get_default_order(value.get_shape().size()), shape)
-        ->add_provenance_group_members_above({value});
-}
-
-shared_ptr<Node> builder::reorder_axes(const Output<Node>& value, vector<size_t> axes_order)
-{
-    Shape out_shape = value.get_shape();
-    if (axes_order.empty())
-    {
-        axes_order.resize(out_shape.size());
-        iota(begin(axes_order), end(axes_order), 0);
-    }
-    else
-    {
-        for (size_t i = 0; i < axes_order.size(); ++i)
-        {
-            out_shape[i] = value.get_shape().at(axes_order.at(i));
-        }
-    }
-
-    auto axis_vector = AxisVector{begin(axes_order), end(axes_order)};
-    return make_shared<op::Reshape>(value, axis_vector, out_shape)
-        ->add_provenance_group_members_above({value});
-}
-
-shared_ptr<Node> builder::transpose(const Output<Node>& value)
-{
-    vector<size_t> axes_order(value.get_shape().size());
-    iota(begin(axes_order), end(axes_order), 0);
-    reverse(begin(axes_order), end(axes_order));
-    return builder::reorder_axes(value, axes_order);
-}
-
-shared_ptr<Node> builder::flatten(const Output<Node>& value, int axis)
-{
-    auto data_shape = value.get_shape();
-
-    // First dimension of output tensor is the product of [d_0, ... d_{axis-1}] dimensions of input
-    // tensor. The last dimension is the product of the rest of input tensor dimensions:
-    // [d_{axis}, ..., d_n]
-    size_t first_dim_size =
-        accumulate(begin(data_shape), next(begin(data_shape), axis), 1UL, multiplies<size_t>());
-
-    size_t last_dim_size =
-        accumulate(next(begin(data_shape), axis), end(data_shape), 1UL, multiplies<size_t>());
-
-    return make_shared<op::Reshape>(
-               value, get_default_order(data_shape.size()), Shape{first_dim_size, last_dim_size})
-        ->add_provenance_group_members_above({value});
-}
-
-shared_ptr<Node> builder::squeeze(const Output<Node>& value, vector<size_t> axes)
-{
-    if (axes.empty())
-    {
-        return value.get_node_shared_ptr();
-    }
-
-    Shape in_shape{value.get_shape()};
-    for (size_t idx = 0; idx < axes.size(); ++idx)
-    {
-        in_shape.at(axes.at(idx)) = 0;
-    }
-    Shape output_shape;
-    for (auto axis : in_shape)
-    {
-        if (axis != 0)
-        {
-            output_shape.push_back(axis);
-        }
-    }
-    return builder::reshape(value, output_shape);
-}
-
-shared_ptr<Node>
-    builder::collapse(const Output<Node>& value, const size_t start_axis, const size_t end_axis)
-{
-    auto shape = value.get_shape();
-    size_t collapsed_axis_size = accumulate(next(begin(shape), start_axis),
-                                            next(begin(shape), end_axis + 1),
-                                            1UL,
-                                            multiplies<size_t>());
-
-    Shape output_shape{collapsed_axis_size};
-    output_shape.insert(end(output_shape), next(begin(shape), end_axis + 1), end(shape));
-    return builder::reshape(value, output_shape);
-}
-
-shared_ptr<Node> builder::expand_dims(const Output<Node>& value, size_t axis)
-{
-    Shape output_shape(value.get_shape());
-    // Add empty axis at specified position.
-    auto empty_axis_it = begin(output_shape);
-    advance(empty_axis_it, axis);
-    output_shape.insert(empty_axis_it, 1);
-    return make_shared<op::Reshape>(
-               value, get_default_order(value.get_shape().size()), output_shape)
-        ->add_provenance_group_members_above({value});
-}
-
-NGRAPH_API
 shared_ptr<Node> builder::opset1::reshape(const Output<Node>& value, const Shape& shape)
 {
     if (value.get_partial_shape().same_scheme(shape))
index 5aa3070..27a2875 100644 (file)
@@ -72,7 +72,7 @@ OutputVector op::GRN::decompose_op() const
     {
         Shape data_shape(4 - input_shape.size(), 1);
         copy(begin(input_shape), end(input_shape), back_inserter(data_shape));
-        data = builder::reshape(data, data_shape);
+        data = builder::opset1::reshape(data, data_shape);
     }
 
     const auto axis_set_const = op::Constant::create(element::i64, {}, {1});
@@ -85,7 +85,7 @@ OutputVector op::GRN::decompose_op() const
     // get back original input tensor rank
     if (input_shape.size() != 4)
     {
-        data = builder::reshape(data, input_shape);
+        data = builder::opset1::reshape(data, input_shape);
     }
 
     return OutputVector{data};
index 963d87d..3bad7cc 100644 (file)
@@ -207,8 +207,8 @@ OutputVector op::v3::GRUCell::decompose_op() const
     Output<Node> B = input_value(4);
 
     // Xt*(W^T)
-    auto Xt_W = make_shared<op::Dot>(X, builder::transpose(W));
-    auto R_transpose = builder::transpose(R);
+    auto Xt_W = make_shared<op::Dot>(X, builder::opset1::transpose(W));
+    auto R_transpose = builder::opset1::transpose(R);
     // Ht-1*(R^T)
     auto Ht_R = make_shared<op::Dot>(H_t, R_transpose);
 
index 777c53f..f90de39 100644 (file)
@@ -285,9 +285,9 @@ OutputVector op::LSTMCell::decompose_op() const
     const auto& p_f = p_iof.at(2);
 
     // Xt*(W^T) -- for [iofc] gates.
-    auto Xt_W = make_shared<op::Dot>(X, builder::transpose(W));
+    auto Xt_W = make_shared<op::Dot>(X, builder::opset1::transpose(W));
     // Ht-1*(R^T)  -- for [iofc] gates.
-    auto Ht_R = make_shared<op::Dot>(H_t, builder::transpose(R));
+    auto Ht_R = make_shared<op::Dot>(H_t, builder::opset1::transpose(R));
     // Xt*(W^T) + Ht-1*(R^T) + Wb + Rb  -- for [iofc] gates.
     auto gates = add(Xt_W, add(Ht_R, bias));
 
index bce107a..239231c 100644 (file)
@@ -84,7 +84,7 @@ OutputVector op::MatMul::decompose_op() const
         iota(axes_order.begin(), axes_order.end(), 0);
         // transpose the last 2 spatial dims
         swap(axes_order[a_rank - 1], axes_order[a_rank - 2]);
-        A = builder::reorder_axes(A, axes_order);
+        A = builder::opset1::reorder_axes(A, axes_order);
     }
 
     if (m_transpose_b && b_rank >= 2)
@@ -92,7 +92,7 @@ OutputVector op::MatMul::decompose_op() const
         vector<size_t> axes_order(b_rank);
         iota(axes_order.begin(), axes_order.end(), 0);
         swap(axes_order[b_rank - 1], axes_order[b_rank - 2]);
-        B = builder::reorder_axes(B, axes_order);
+        B = builder::opset1::reorder_axes(B, axes_order);
     }
 
     builder::MatmulFactory factory({A, B});
index 04621a7..dc327a9 100644 (file)
@@ -76,7 +76,7 @@ OutputVector op::MVN::decompose_op() const
     auto data_shape = data.get_shape(); // assume that data has n and c channels.
 
     // calculate mean normalization
-    auto mean = builder::mean(data, m_reduction_axes);
+    auto mean = builder::opset1::mean(data, m_reduction_axes);
     mean = std::make_shared<op::Broadcast>(mean, data_shape, m_reduction_axes);
     auto mean_normalization = data - mean;
 
@@ -87,7 +87,7 @@ OutputVector op::MVN::decompose_op() const
     else
     {
         // calculate variance
-        auto variance = builder::variance(data, m_reduction_axes);
+        auto variance = builder::opset1::variance(data, m_reduction_axes);
         variance = make_shared<op::Sqrt>(variance);
         // add epsilon
         auto eps_node = op::Constant::create(
index 822eaa0..0579e95 100644 (file)
@@ -180,9 +180,9 @@ OutputVector op::RNNCell::decompose_op() const
     Output<Node> bias = input_value(4);
 
     // Xt*(W^T)
-    auto Xt_W = std::make_shared<op::Dot>(X, builder::transpose(W));
+    auto Xt_W = std::make_shared<op::Dot>(X, builder::opset1::transpose(W));
     // Ht-1*(R^T)
-    auto Ht_R = std::make_shared<op::Dot>(H_t, builder::transpose(R));
+    auto Ht_R = std::make_shared<op::Dot>(H_t, builder::opset1::transpose(R));
     // Xt*(W^T) + Ht-1*(R^T) + Wb + Rb
     auto i_t = add(Xt_W, add(Ht_R, bias));
 
index ca1ded0..ade7c81 100644 (file)
@@ -90,10 +90,10 @@ OutputVector op::ShuffleChannels::decompose_op() const
     const auto data = input_value(0);
     const auto& data_shape = data.get_shape();
 
-    const auto reshaped = builder::reshape(data, get_pre_shuffle_shape(data_shape));
-    const auto shuffled = builder::reorder_axes(reshaped, {0, 2, 1, 3});
+    const auto reshaped = builder::opset1::reshape(data, get_pre_shuffle_shape(data_shape));
+    const auto shuffled = builder::opset1::reorder_axes(reshaped, {0, 2, 1, 3});
 
-    return {builder::reshape(shuffled, data_shape)};
+    return {builder::opset1::reshape(shuffled, data_shape)};
 }
 
 shared_ptr<Node> op::ShuffleChannels::clone_with_new_inputs(const OutputVector& new_args) const
index a86b570..8658b0c 100644 (file)
@@ -75,34 +75,3 @@ shared_ptr<runtime::Tensor> make_reduce_result_false(
 
     return result;
 }
-
-TEST(builder, l2_norm)
-{
-    auto result = make_reduce_result(builder::l2_norm);
-    ASSERT_TRUE(test::all_close((vector<float>{5.9160797831f, 7.48331477355f}),
-                                read_vector<float>(result)));
-}
-
-TEST(builder, mean)
-{
-    auto result = make_reduce_result(builder::mean);
-    ASSERT_TRUE(test::all_close((vector<float>{3, 4}), read_vector<float>(result)));
-}
-
-TEST(builder, std_dev)
-{
-    auto result = make_reduce_result_false(builder::std_dev);
-    ASSERT_TRUE(test::all_close((vector<float>{1.63299316186f, 1.63299316186f}),
-                                read_vector<float>(result)));
-    result = make_reduce_result_true(builder::std_dev);
-    ASSERT_TRUE(test::all_close((vector<float>{2, 2}), read_vector<float>(result)));
-}
-
-TEST(builder, variance)
-{
-    auto result = make_reduce_result_false(builder::variance);
-    ASSERT_TRUE(test::all_close((vector<float>{2.66666666666f, 2.66666666666f}),
-                                read_vector<float>(result)));
-    result = make_reduce_result_true(builder::variance);
-    ASSERT_TRUE(test::all_close((vector<float>{4, 4}), read_vector<float>(result)));
-}
index 7ecc7b5..f6dd906 100644 (file)
@@ -173,7 +173,31 @@ namespace
         }
         if (!empty_axes.empty())
         {
-            squeezed_arg = builder::squeeze(arg, empty_axes);
+            auto v0squeeze = [](const Output<Node>& value, vector<size_t> axes) {
+                if (axes.empty())
+                {
+                    return value.get_node_shared_ptr();
+                }
+
+                Shape in_shape{value.get_shape()};
+                for (size_t idx = 0; idx < axes.size(); ++idx)
+                {
+                    in_shape.at(axes.at(idx)) = 0;
+                }
+                Shape output_shape;
+                for (auto axis : in_shape)
+                {
+                    if (axis != 0)
+                    {
+                        output_shape.push_back(axis);
+                    }
+                }
+                return make_shared<op::Reshape>(
+                           value, get_default_order(value.get_shape().size()), output_shape)
+                    ->add_provenance_group_members_above({value});
+
+            };
+            squeezed_arg = v0squeeze(arg, empty_axes);
         }
 
         replacement_node =
index b0d9872..db0836e 100644 (file)
@@ -209,7 +209,7 @@ namespace
             filters_shape[0] /= groups;
             filters_shape.insert(filters_shape.begin(), groups);
 
-            auto reshaped_filters = builder::reshape(node->input_value(1), filters_shape);
+            auto reshaped_filters = builder::opset1::reshape(node->input_value(1), filters_shape);
 
             replacement_node = make_shared<op::v1::GroupConvolution>(node->input_value(0),
                                                                      reshaped_filters,
@@ -251,7 +251,7 @@ namespace
 
         filters_shape[0] /= groups;
         filters_shape.insert(filters_shape.begin(), groups);
-        auto reshaped_filters = builder::reshape(node->input_value(1), filters_shape);
+        auto reshaped_filters = builder::opset1::reshape(node->input_value(1), filters_shape);
 
         auto replacement_node = make_shared<op::v1::GroupConvolutionBackpropData>(
             node->input_value(2),