Remove obsoleted v0::Slice and v0::Split ops (#2908)
authorMateusz Tabaka <mateusz.tabaka@intel.com>
Thu, 5 Nov 2020 06:11:45 +0000 (07:11 +0100)
committerGitHub <noreply@github.com>
Thu, 5 Nov 2020 06:11:45 +0000 (09:11 +0300)
* Remove obsoleted v0::Slice op

* Remove deprecated v0::Split op

* Fix build_graph tests

31 files changed:
inference-engine/src/transformations/src/transformations/common_optimizations/nop_elimination.cpp
inference-engine/tests/functional/inference_engine/transformations/algebraic_simplification.cpp
inference-engine/tests/functional/inference_engine/transformations/nop_elimination.cpp
ngraph/core/builder/src/builder/matmul_factory.cpp
ngraph/core/builder/src/builder/split.cpp
ngraph/core/include/ngraph/op/op_version_tbl.hpp
ngraph/core/include/ngraph/op/slice.hpp [deleted file]
ngraph/core/include/ngraph/op/split.hpp
ngraph/core/include/ngraph/ops.hpp
ngraph/core/src/op/concat.cpp
ngraph/core/src/op/group_conv.cpp
ngraph/core/src/op/slice.cpp [deleted file]
ngraph/core/src/op/split.cpp
ngraph/frontend/onnx_import/src/op/conv.cpp
ngraph/test/CMakeLists.txt
ngraph/test/backend/fused_op.in.cpp
ngraph/test/backend/split.in.cpp [new file with mode: 0644]
ngraph/test/build_graph.cpp
ngraph/test/constant_folding.cpp
ngraph/test/copy.cpp
ngraph/test/op_is.cpp
ngraph/test/runtime/ie/unit_test.manifest
ngraph/test/runtime/interpreter/int_executable.hpp
ngraph/test/runtime/interpreter/opset_int_tbl.hpp
ngraph/test/runtime/op/group_conv.cpp
ngraph/test/runtime/opset0_tbl.hpp
ngraph/test/runtime/pass/dyn_elimination.cpp
ngraph/test/runtime/pass/opset0_downgrade.cpp
ngraph/test/runtime/pass/opset1_upgrade.cpp
ngraph/test/type_prop/slice.cpp [deleted file]
ngraph/test/type_prop/split.cpp

index 0514b74..33cde3c 100644 (file)
@@ -337,7 +337,7 @@ bool pass::NopElimination::run_on_function(std::shared_ptr<Function> function) {
         dispatcher{{TI(opset3::Pad), &eliminate_nop},
                    {TI(op::v0::Sum), &eliminate_sum},
                    {TI(opset3::Convert), &eliminate_convert},
-                   {TI(op::v0::Slice), &eliminate_nop},
+                   {TI(op::v1::StridedSlice), &eliminate_nop},
                    {TI(opset3::Reshape), &eliminate_reshape_v1},
                    {TI(opset3::Concat), &eliminate_concat},
                    {TI(opset3::Squeeze), &eliminate_squeeze},
index 1e3d5df..1b22009 100644 (file)
@@ -110,9 +110,20 @@ TEST(algebraic_simplification, multiply_sum_negative) {
 
 TEST(algebraic_simplification, concat_parameter_slices_reversed) {
     auto a = make_shared<op::Parameter>(element::f32, Shape{96, 100});
-    auto slice1 = make_shared<op::Slice>(a, Coordinate{0, 0}, Coordinate{32, 100}, Strides{1, 1});
-    auto slice2 = make_shared<op::Slice>(a, Coordinate{32, 0}, Coordinate{64, 100}, Strides{1, 1});
-    auto slice3 = make_shared<op::Slice>(a, Coordinate{64, 0}, Coordinate{96, 100}, Strides{1, 1});
+    auto strides = op::Constant::create(element::i64, {2}, {1, 1});
+    std::vector<int64_t> mask(2, 0);
+    auto slice1 = make_shared<op::v1::StridedSlice>(a,
+            op::Constant::create(element::i64, {2}, {0, 0}),
+            op::Constant::create(element::i64, {2}, {32, 100}),
+            strides, mask, mask);
+    auto slice2 = make_shared<op::v1::StridedSlice>(a,
+            op::Constant::create(element::i64, {2}, {32, 0}),
+            op::Constant::create(element::i64, {2}, {64, 100}),
+            strides, mask, mask);
+    auto slice3 = make_shared<op::v1::StridedSlice>(a,
+            op::Constant::create(element::i64, {2}, {64, 0}),
+            op::Constant::create(element::i64, {2}, {96, 100}),
+            strides, mask, mask);
 
     size_t concat_axis = 0;
     auto concat = make_shared<op::Concat>(NodeVector{slice3, slice2, slice1}, concat_axis);
@@ -128,9 +139,20 @@ TEST(algebraic_simplification, concat_parameter_slices_reversed) {
 TEST(algebraic_simplification, concat_parameter_slices_element_count) {
     auto a = make_shared<op::Parameter>(element::f32, Shape{96, 100});
     // slicing 30 elements out of 96; should trigger a check that some elements are missing
-    auto slice1 = make_shared<op::Slice>(a, Coordinate{0, 0}, Coordinate{10, 100}, Strides{1, 1});
-    auto slice2 = make_shared<op::Slice>(a, Coordinate{10, 0}, Coordinate{20, 100}, Strides{1, 1});
-    auto slice3 = make_shared<op::Slice>(a, Coordinate{20, 0}, Coordinate{30, 100}, Strides{1, 1});
+    auto strides = op::Constant::create(element::i64, {2}, {1, 1});
+    std::vector<int64_t> mask(2, 0);
+    auto slice1 = make_shared<op::v1::StridedSlice>(a,
+            op::Constant::create(element::i64, {2}, {0, 0}),
+            op::Constant::create(element::i64, {2}, {10, 100}),
+            strides, mask, mask);
+    auto slice2 = make_shared<op::v1::StridedSlice>(a,
+            op::Constant::create(element::i64, {2}, {10, 0}),
+            op::Constant::create(element::i64, {2}, {20, 100}),
+            strides, mask, mask);
+    auto slice3 = make_shared<op::v1::StridedSlice>(a,
+            op::Constant::create(element::i64, {2}, {20, 0}),
+            op::Constant::create(element::i64, {2}, {30, 100}),
+            strides, mask, mask);
 
     size_t concat_axis = 0;
     auto concat = make_shared<op::Concat>(NodeVector{slice1, slice2, slice3}, concat_axis);
@@ -145,9 +167,20 @@ TEST(algebraic_simplification, concat_parameter_slices_element_count) {
 
 TEST(algebraic_simplification, concat_parameter_non_uniform_slices) {
     auto a = make_shared<op::Parameter>(element::f32, Shape{96, 100});
-    auto slice1 = make_shared<op::Slice>(a, Coordinate{0, 0}, Coordinate{38, 100}, Strides{1, 1});
-    auto slice2 = make_shared<op::Slice>(a, Coordinate{38, 0}, Coordinate{64, 100}, Strides{1, 1});
-    auto slice3 = make_shared<op::Slice>(a, Coordinate{64, 0}, Coordinate{96, 100}, Strides{1, 1});
+    auto strides = op::Constant::create(element::i64, {2}, {1, 1});
+    std::vector<int64_t> mask(2, 0);
+    auto slice1 = make_shared<op::v1::StridedSlice>(a,
+            op::Constant::create(element::i64, {2}, {0, 0}),
+            op::Constant::create(element::i64, {2}, {38, 100}),
+            strides, mask, mask);
+    auto slice2 = make_shared<op::v1::StridedSlice>(a,
+            op::Constant::create(element::i64, {2}, {38, 0}),
+            op::Constant::create(element::i64, {2}, {64, 100}),
+            strides, mask, mask);
+    auto slice3 = make_shared<op::v1::StridedSlice>(a,
+            op::Constant::create(element::i64, {2}, {64, 0}),
+            op::Constant::create(element::i64, {2}, {96, 100}),
+            strides, mask, mask);
 
     size_t concat_axis = 0;
     auto concat = make_shared<op::Concat>(NodeVector{slice1, slice2, slice3}, concat_axis);
@@ -164,12 +197,20 @@ TEST(algebraic_simplification, concat_different_inputs) {
     auto a = make_shared<op::Parameter>(element::f32, Shape{96, 100});
     auto goe1 = -a;
     auto goe2 = -a;
-    auto slice1 =
-        make_shared<op::Slice>(goe1, Coordinate{0, 0}, Coordinate{32, 100}, Strides{1, 1});
-    auto slice2 =
-        make_shared<op::Slice>(goe2, Coordinate{32, 0}, Coordinate{64, 100}, Strides{1, 1});
-    auto slice3 =
-        make_shared<op::Slice>(goe1, Coordinate{64, 0}, Coordinate{96, 100}, Strides{1, 1});
+    auto strides = op::Constant::create(element::i64, {2}, {1, 1});
+    std::vector<int64_t> mask(2, 0);
+    auto slice1 = make_shared<op::v1::StridedSlice>(goe1,
+            op::Constant::create(element::i64, {2}, {0, 0}),
+            op::Constant::create(element::i64, {2}, {32, 100}),
+            strides, mask, mask);
+    auto slice2 = make_shared<op::v1::StridedSlice>(goe2,
+            op::Constant::create(element::i64, {2}, {32, 0}),
+            op::Constant::create(element::i64, {2}, {64, 100}),
+            strides, mask, mask);
+    auto slice3 = make_shared<op::v1::StridedSlice>(goe1,
+            op::Constant::create(element::i64, {2}, {64, 0}),
+            op::Constant::create(element::i64, {2}, {96, 100}),
+            strides, mask, mask);
 
     size_t concat_axis = 0;
     auto concat = make_shared<op::Concat>(NodeVector{slice1, slice2, slice3}, concat_axis);
index f9d9560..49a566b 100644 (file)
@@ -70,17 +70,20 @@ TEST(nop_elimination, convert_type_agnostic) {
     ASSERT_EQ(count_ops_of_type<op::v0::Convert>(f), 0);
 }
 
-TEST(nop_elimination, eliminate_slice) {
+TEST(nop_elimination, eliminate_strided_slice) {
     Shape shape{2, 2};
     auto A = make_shared<op::Parameter>(element::f32, shape);
-    auto s = make_shared<op::v0::Slice>(A, Coordinate{0, 0}, Coordinate{2, 2});
+    auto begin_node = op::Constant::create(element::i64, {2}, {0, 0});
+    auto end_node = op::Constant::create(element::i64, {2}, {2, 2});
+    std::vector<int64_t> mask(2, 0);
+    auto s = make_shared<op::v1::StridedSlice>(A, begin_node, end_node, mask, mask);
     auto f = make_shared<Function>(make_shared<op::v0::Abs>(s), ParameterVector{A});
 
     pass::Manager pass_manager;
     pass_manager.register_pass<pass::NopElimination>();
     pass_manager.run_passes(f);
 
-    ASSERT_EQ(count_ops_of_type<op::v0::Slice>(f), 0);
+    ASSERT_EQ(count_ops_of_type<op::v1::StridedSlice>(f), 0);
 }
 
 TEST(nop_elimination, eliminate_broadcast) {
index 92df40a..aa16959 100644 (file)
@@ -27,7 +27,7 @@
 #include "ngraph/op/dot.hpp"
 #include "ngraph/op/quantized_dot.hpp"
 #include "ngraph/op/reshape.hpp"
-#include "ngraph/op/slice.hpp"
+#include "ngraph/op/strided_slice.hpp"
 
 NGRAPH_SUPPRESS_DEPRECATED_START
 
@@ -56,7 +56,10 @@ static Output<Node> get_sub_matrix(const Output<Node>& node, size_t idx)
     lower_bounds.at(0) = idx;
     upper_bounds.at(0) = idx + 1;
 
-    auto sub_matrix = Output<Node>{make_shared<op::Slice>(node, lower_bounds, upper_bounds)};
+    std::vector<int64_t> mask(shape.size(), 0);
+    auto begin = op::Constant::create(element::i64, {lower_bounds.size()}, lower_bounds);
+    auto end = op::Constant::create(element::i64, {upper_bounds.size()}, upper_bounds);
+    auto sub_matrix = make_shared<op::v1::StridedSlice>(node, begin, end, mask, mask);
     // Remove first single entry dim.
     return builder::opset1::squeeze(sub_matrix);
 }
index 76293d6..7b254d3 100644 (file)
 //*****************************************************************************
 
 #include "builder/split.hpp"
-#include "ngraph/op/slice.hpp"
 #include "ngraph/opsets/opset1.hpp"
 
 NGRAPH_SUPPRESS_DEPRECATED_START
 
 using namespace ngraph;
 
-namespace
-{
-    inline size_t get_valid_array_index(size_t idx, size_t axis_size)
-    {
-        return std::min(idx, axis_size);
-    }
-
-    std::shared_ptr<op::Slice> make_ng_slice(const Output<Node>& output,
-                                             const std::vector<int64_t>& axes,
-                                             const std::vector<size_t>& starts,
-                                             const std::vector<size_t>& ends)
-    {
-        std::vector<size_t> upper_bounds{output.get_shape()};
-        std::vector<size_t> lower_bounds(upper_bounds.size());
-        for (size_t index{0}; index < axes.size(); ++index)
-        {
-            int64_t axis{axes.at(index)};
-            lower_bounds.at(axis) =
-                get_valid_array_index(starts.at(index), output.get_shape().at(axis));
-            upper_bounds.at(axis) =
-                get_valid_array_index(ends.at(index), output.get_shape().at(axis));
-        }
-        return std::static_pointer_cast<op::Slice>(
-            std::make_shared<op::Slice>(output, lower_bounds, upper_bounds)
-                ->add_provenance_group_members_above({output}));
-    }
-}
-
-OutputVector
-    builder::split(const Output<Node>& value, const std::vector<size_t>& length_parts, int64_t axis)
-{
-    size_t start_index{0};
-    OutputVector outputs;
-    for (const auto& length_part : length_parts)
-    {
-        size_t end_index{start_index + length_part};
-        outputs.push_back(make_ng_slice(value, {axis}, {start_index}, {end_index}));
-        start_index = end_index;
-    }
-    return outputs;
-}
-
-OutputVector builder::split(const Output<Node>& value, size_t split_parts, int axis)
-{
-    size_t axis_to_split{static_cast<size_t>(axis)};
-    if (axis < 0)
-    {
-        axis_to_split = value.get_shape().size() + axis;
-    }
-
-    size_t length_axis_to_split{value.get_shape().at(axis_to_split)};
-    std::vector<size_t> length_parts(split_parts, length_axis_to_split / split_parts);
-    return split(value, length_parts, axis_to_split);
-}
-
 OutputVector builder::opset1::split(const Output<Node>& value,
                                     const std::vector<size_t>& split_lengths,
                                     int64_t axis)
index f4e322b..b24efd7 100644 (file)
@@ -162,13 +162,11 @@ NGRAPH_OP(Sigmoid, ngraph::op::v0, 0)
 NGRAPH_OP(Sign, ngraph::op::v0, 0)
 NGRAPH_OP(Sin, ngraph::op::v0, 0)
 NGRAPH_OP(Sinh, ngraph::op::v0, 0)
-NGRAPH_OP(Slice, ngraph::op::v0, 0)
 NGRAPH_OP(Softmax, ngraph::op::v0, 0)
 NGRAPH_OP(Softmax, ngraph::op::v1, 1)
 NGRAPH_OP(SpaceToBatch, ngraph::op::v1, 1)
 NGRAPH_OP(SpaceToDepth, ngraph::op::v0, 0)
 NGRAPH_OP(Split, ngraph::op::v1, 1)
-NGRAPH_OP(Split, ngraph::op::v0, 0)
 NGRAPH_OP(Sqrt, ngraph::op::v0, 0)
 NGRAPH_OP(SquaredDifference, ngraph::op::v0, 0)
 NGRAPH_OP(Squeeze, ngraph::op::v0, 0)
diff --git a/ngraph/core/include/ngraph/op/slice.hpp b/ngraph/core/include/ngraph/op/slice.hpp
deleted file mode 100644 (file)
index 0cb0104..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-//*****************************************************************************
-// Copyright 2017-2020 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//*****************************************************************************
-
-#pragma once
-
-#include "ngraph/coordinate.hpp"
-#include "ngraph/op/op.hpp"
-#include "ngraph/strides.hpp"
-
-namespace ngraph
-{
-    namespace op
-    {
-        namespace v0
-        {
-            /// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a
-            ///        bounding box, optionally with stride.
-            class NGRAPH_DEPRECATED(
-                "This operation is deprecated and will be removed soon. Please do not use it.")
-                NGRAPH_API Slice : public Op
-            {
-                NGRAPH_SUPPRESS_DEPRECATED_START
-            public:
-                static constexpr NodeTypeInfo type_info{"Slice", 0};
-                const NodeTypeInfo& get_type_info() const override { return type_info; }
-                /// \brief Constructs a tensor slice operation
-                Slice() = default;
-                /// \brief Constructs a tensor slice operation.
-                ///
-                /// \param arg The tensor to be sliced.
-                /// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
-                /// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
-                /// \param strides The slicing strides; for example, strides of `{n,m}` means to
-                /// take
-                ///                every nth row and every mth column of the input matrix.
-                Slice(const Output<Node>& arg,
-                      const Coordinate& lower_bounds,
-                      const Coordinate& upper_bounds,
-                      const Strides& strides);
-                /// \brief Constructs a tensor slice operation with unit strides; i.e., every
-                /// element
-                ///        inside the bounding box will be copied to the output slice.
-                ///
-                /// \param arg The tensor to be sliced.
-                /// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
-                /// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
-                Slice(const Output<Node>& arg,
-                      const Coordinate& lower_bounds,
-                      const Coordinate& upper_bounds);
-
-                virtual std::shared_ptr<Node>
-                    clone_with_new_inputs(const OutputVector& new_args) const override;
-                void validate_and_infer_types() override;
-
-                /// \return The inclusive lower-bound coordinates.
-                const Coordinate& get_lower_bounds() const { return m_lower_bounds; }
-                /// \return The exclusive upper-bound coordinates.
-                const Coordinate& get_upper_bounds() const { return m_upper_bounds; }
-                /// \return The slicing strides.
-                const Strides& get_strides() const { return m_strides; }
-                bool evaluate(const HostTensorVector& outputs,
-                              const HostTensorVector& inputs) const override;
-
-            protected:
-                Coordinate m_lower_bounds;
-                Coordinate m_upper_bounds;
-                Strides m_strides;
-                NGRAPH_SUPPRESS_DEPRECATED_END
-            };
-        }
-        // default opset version
-        NGRAPH_SUPPRESS_DEPRECATED_START
-        using v0::Slice;
-        NGRAPH_SUPPRESS_DEPRECATED_END
-    }
-}
index d5984d7..436c799 100644 (file)
 #include "ngraph/node.hpp"
 #include "ngraph/op/util/fused_op.hpp"
 
-NGRAPH_SUPPRESS_DEPRECATED_START
-
 namespace ngraph
 {
     namespace op
     {
-        namespace v0
-        {
-            /// \brief Splits the input tensor into a list of smaller tensors ("pieces")
-            class NGRAPH_DEPRECATED(
-                "This operation is deprecated and will be removed soon. "
-                "Use v1::Split instead of it.") NGRAPH_API Split : public ngraph::op::util::FusedOp
-            {
-                NGRAPH_SUPPRESS_DEPRECATED_START
-            public:
-                static constexpr NodeTypeInfo type_info{"Split", 0};
-                const NodeTypeInfo& get_type_info() const override { return type_info; }
-                Split() = default;
-                /// \brief Constructs a Split op that evenly divides the input tensor.
-                ///
-                /// \param data       Node producing the input tensor
-                /// \param axis       Node producing an axis along which the input tensor
-                ///                   should be split. Negative values mean counting from
-                ///                   the back of the input tensor's shape.
-                /// \param num_split  a number of "pieces" the input tensor will be split to
-                Split(const Output<Node>& data, const Output<Node>& axis, const size_t num_split);
-
-                /// \brief Constructs a Split op that splits the input tensor into variable length
-                ///        "pieces"
-                ///
-                /// \param data    Node producing the input tensor
-                /// \param axis    Node producing an axis along which the input tensor
-                ///                should be split. Negative values mean counting from
-                ///                the back of the input tensor's shape.
-                /// \param splits  a list of lengths that the input tensor should be
-                ///                split to. Use this constructor to split the input
-                ///                tensor to variable length chunks.
-                Split(const Output<Node>& data,
-                      const Output<Node>& axis,
-                      const std::vector<size_t>& splits);
-
-                void pre_validate_and_infer_types() override;
-
-                virtual OutputVector decompose_op() const override;
-
-                virtual std::shared_ptr<Node>
-                    clone_with_new_inputs(const OutputVector& new_args) const override;
-
-                size_t get_axis() const { return m_axis; }
-                const std::vector<size_t>& get_splits() const { return m_splits; }
-            private:
-                /// used internally for validation purposes, indicates which constructor was used
-                bool m_split_evenly;
-                int64_t m_axis;
-                size_t m_num_split;
-                /// contains lengths of chunks that the input tensor will be split into
-                std::vector<size_t> m_splits;
-                NGRAPH_SUPPRESS_DEPRECATED_END
-            };
-        }
-
         namespace v1
         {
             /// \brief Splits the input tensor into a list of equal sized tensors
@@ -115,11 +58,5 @@ namespace ngraph
                 size_t m_num_splits;
             };
         }
-
-        NGRAPH_SUPPRESS_DEPRECATED_START
-        using v0::Split;
-        NGRAPH_SUPPRESS_DEPRECATED_END
     }
 }
-
-NGRAPH_SUPPRESS_DEPRECATED_END
index 47e337c..95ffdc5 100644 (file)
 #include "ngraph/op/sign.hpp"
 #include "ngraph/op/sin.hpp"
 #include "ngraph/op/sinh.hpp"
-#include "ngraph/op/slice.hpp"
 #include "ngraph/op/softmax.hpp"
 #include "ngraph/op/softplus.hpp"
 #include "ngraph/op/space_to_batch.hpp"
index 728399b..aa993f2 100644 (file)
@@ -19,7 +19,6 @@
 #include "itt.hpp"
 #include "ngraph/attribute_visitor.hpp"
 #include "ngraph/op/concat.hpp"
-#include "ngraph/op/slice.hpp"
 #include "ngraph/runtime/host_tensor.hpp"
 #include "ngraph/runtime/reference/concat.hpp"
 
index 78c7596..0ae3c22 100644 (file)
@@ -23,7 +23,6 @@
 #include "ngraph/op/convolution.hpp"
 #include "ngraph/op/group_conv.hpp"
 #include "ngraph/op/reshape.hpp"
-#include "ngraph/op/slice.hpp"
 #include "ngraph/validation_util.hpp"
 
 using namespace std;
@@ -507,9 +506,9 @@ OutputVector op::v1::GroupConvolutionBackpropData::decompose_op() const
 
     auto groups = filters.get_shape()[0];
     // slice data
-    OutputVector sliced_data = builder::split(data, groups, 1);
+    OutputVector sliced_data = builder::opset1::split(data, groups, 1);
     // slice filters
-    OutputVector sliced_filters = builder::split(filters, groups, 0);
+    OutputVector sliced_filters = builder::opset1::split(filters, groups, 0);
     // We have to squeeze first empty dimension (groups).
     std::transform(
         std::begin(sliced_filters),
diff --git a/ngraph/core/src/op/slice.cpp b/ngraph/core/src/op/slice.cpp
deleted file mode 100644 (file)
index 5c28318..0000000
+++ /dev/null
@@ -1,166 +0,0 @@
-//*****************************************************************************
-// Copyright 2017-2020 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//*****************************************************************************
-
-#include "ngraph/op/slice.hpp"
-
-#include "ngraph/runtime/host_tensor.hpp"
-#include "ngraph/runtime/reference/slice.hpp"
-
-NGRAPH_SUPPRESS_DEPRECATED_START
-
-using namespace std;
-using namespace ngraph;
-
-constexpr NodeTypeInfo op::Slice::type_info;
-
-op::Slice::Slice(const Output<Node>& arg,
-                 const Coordinate& lower_bounds,
-                 const Coordinate& upper_bounds,
-                 const Strides& strides)
-    : Op({arg})
-    , m_lower_bounds(lower_bounds)
-    , m_upper_bounds(upper_bounds)
-    , m_strides(strides)
-{
-    constructor_validate_and_infer_types();
-}
-
-op::Slice::Slice(const Output<Node>& arg,
-                 const Coordinate& lower_bounds,
-                 const Coordinate& upper_bounds)
-    : Op({arg})
-    , m_lower_bounds(lower_bounds)
-    , m_upper_bounds(upper_bounds)
-    , m_strides(Strides())
-{
-    constructor_validate_and_infer_types();
-}
-
-void op::Slice::validate_and_infer_types()
-{
-    // An empty stride vector with lower_bounds/upper_bounds filled in means that we need to
-    // construct the default value.
-    if (m_strides.size() == 0)
-    {
-        m_strides = Strides(m_lower_bounds.size(), 1);
-    }
-
-    NODE_VALIDATION_CHECK(this,
-                          m_lower_bounds.size() == m_upper_bounds.size() &&
-                              m_lower_bounds.size() == m_strides.size(),
-                          "Ranks of lower bounds (",
-                          m_lower_bounds,
-                          "), upper bounds (",
-                          m_upper_bounds,
-                          ") and strides (",
-                          m_strides,
-                          ") do not match.");
-
-    size_t output_rank = m_upper_bounds.size();
-
-    for (size_t i = 0; i < output_rank; i++)
-    {
-        NODE_VALIDATION_CHECK(this,
-                              m_lower_bounds[i] <= m_upper_bounds[i],
-                              "Lower bound for slice is greater than upper bound at axis ",
-                              i,
-                              " (lower bounds: ",
-                              m_lower_bounds,
-                              ", upper bounds: ",
-                              m_upper_bounds,
-                              ").");
-
-        NODE_VALIDATION_CHECK(this,
-                              m_strides[i] != 0,
-                              "Stride for slice is zero at axis ",
-                              i,
-                              " (strides: ",
-                              m_strides,
-                              ").");
-    }
-
-    const PartialShape& input_shape = get_input_partial_shape(0);
-    Dimension input_rank = input_shape.rank();
-
-    NODE_VALIDATION_CHECK(this,
-                          input_rank.is_dynamic() || input_rank.get_length() == output_rank,
-                          "Input rank does not match the rank of the lower bounds (",
-                          m_lower_bounds,
-                          "), upper bounds (",
-                          m_upper_bounds,
-                          "), and strides (",
-                          m_strides,
-                          ").");
-
-    std::vector<Dimension> result_dims(output_rank);
-
-    for (size_t i = 0; i < output_rank; i++)
-    {
-        NODE_VALIDATION_CHECK(this,
-                              input_rank.is_dynamic() || input_shape[i].is_dynamic() ||
-                                  m_upper_bounds[i] <= input_shape[i].get_length(),
-                              "Upper bound for slice at axis ",
-                              i,
-                              " is out of range ",
-                              "(upper bounds: ",
-                              m_upper_bounds,
-                              ", argument shape: ",
-                              input_shape,
-                              ").");
-
-        size_t result_axis_size = m_upper_bounds[i] - m_lower_bounds[i];
-        result_axis_size =
-            result_axis_size / m_strides[i] + ((result_axis_size % m_strides[i] == 0) ? 0 : 1);
-        result_dims[i] = result_axis_size;
-    }
-
-    set_output_type(0, get_input_element_type(0), PartialShape{result_dims});
-}
-
-shared_ptr<Node> op::Slice::clone_with_new_inputs(const OutputVector& new_args) const
-{
-    check_new_args_count(this, new_args);
-    return make_shared<Slice>(new_args.at(0), m_lower_bounds, m_upper_bounds, m_strides);
-}
-
-namespace
-{
-    bool evaluate_slice(const HostTensorPtr& in,
-                        const HostTensorPtr& out,
-                        const Coordinate& lower_bounds,
-                        const Coordinate& upper_bounds,
-                        const Strides& strides)
-    {
-        runtime::reference::slice(in->get_data_ptr<const char>(),
-                                  out->get_data_ptr<char>(),
-                                  in->get_shape(),
-                                  lower_bounds,
-                                  upper_bounds,
-                                  strides,
-                                  out->get_shape(),
-                                  in->get_element_type().size());
-
-        return true;
-    }
-}
-
-bool op::Slice::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
-{
-    const auto& data = inputs[0];
-    const auto& output = outputs[0];
-
-    return evaluate_slice(data, output, m_lower_bounds, m_upper_bounds, m_strides);
-}
index c56c25f..e191bd9 100644 (file)
 #include "ngraph/validation_util.hpp"
 
 #include "ngraph/runtime/host_tensor.hpp"
-NGRAPH_SUPPRESS_DEPRECATED_START
 
 using namespace std;
 using namespace ngraph;
 
-NGRAPH_SUPPRESS_DEPRECATED_START
-
-constexpr NodeTypeInfo op::v0::Split::type_info;
-
-op::v0::Split::Split(const Output<Node>& data, const Output<Node>& axis, const size_t num_split)
-    : FusedOp({data, axis})
-    , m_split_evenly{true}
-    , m_num_split{num_split}
-{
-    constructor_validate_and_infer_types();
-}
-
-op::v0::Split::Split(const Output<Node>& data,
-                     const Output<Node>& axis,
-                     const std::vector<size_t>& splits)
-    : FusedOp({data, axis})
-    , m_split_evenly{false}
-    , m_num_split{0}
-    , m_splits{splits}
-{
-    constructor_validate_and_infer_types();
-}
-
-void op::v0::Split::pre_validate_and_infer_types()
-{
-    const auto axis_shape = get_input_shape(1);
-    NODE_VALIDATION_CHECK(this, is_scalar(axis_shape), "The 'axis' input node must be scalar");
-
-    const auto axis_node = input_value(1).get_node_shared_ptr();
-    NODE_VALIDATION_CHECK(
-        this, op::is_constant(axis_node), "The 'axis' input node must be constant");
-    const auto axis_node_const = as_type_ptr<op::Constant>(axis_node);
-    m_axis = axis_node_const->get_data_ptr<int64_t>()[0];
-
-    // Create dynamic-typed outputs. Actual shape/type will be computed during shape inference
-    for (size_t i = 0; i < std::max(m_splits.size(), m_num_split); i++)
-    {
-        set_output_type(i, get_input_element_type(0), PartialShape::dynamic());
-    }
-
-    if (is_dynamic())
-    {
-        return;
-    }
-
-    const auto shape = get_input_shape(0);
-
-    const auto data_rank = get_input_partial_shape(0).rank();
-    m_axis = ngraph::normalize_axis(this, m_axis, data_rank);
-    const auto dimension_at_axis = shape.at(m_axis);
-    if (m_split_evenly)
-    {
-        NODE_VALIDATION_CHECK(this,
-                              dimension_at_axis % m_num_split == 0,
-                              "The input tensor's dimension pointed by the 'axis' parameter: ",
-                              dimension_at_axis,
-                              " has to be a multiple of the 'num_split' parameter value: ",
-                              m_num_split);
-
-        m_splits.assign(m_num_split, dimension_at_axis / m_num_split);
-    }
-    else
-    {
-        const auto sum_splits = accumulate(begin(m_splits), end(m_splits), 0UL);
-        NODE_VALIDATION_CHECK(this,
-                              sum_splits == dimension_at_axis,
-                              "The input tensor's dimension pointed by the 'axis' parameter: ",
-                              dimension_at_axis,
-                              " has to be equal to the sum of splits passed to the op: ",
-                              sum_splits);
-
-        const bool all_splits_positive =
-            all_of(begin(m_splits), end(m_splits), [](const size_t v) { return v > 0; });
-
-        NODE_VALIDATION_CHECK(this,
-                              all_splits_positive == true,
-                              "All values of the 'splits' attribute must be greater than zero");
-    }
-    set_input_is_relevant_to_shape(0);
-}
-
-OutputVector op::v0::Split::decompose_op() const
-{
-    return builder::split(input_value(0), m_splits, m_axis);
-}
-
-shared_ptr<Node> op::v0::Split::clone_with_new_inputs(const OutputVector& new_args) const
-{
-    check_new_args_count(this, new_args);
-    return make_shared<Split>(new_args.at(0), new_args.at(1), m_splits);
-}
-
 NGRAPH_RTTI_DEFINITION(op::v1::Split, "Split", 1);
 
 op::v1::Split::Split(const Output<Node>& data, const Output<Node>& axis, const size_t num_splits)
index be8937b..5e9a219 100644 (file)
@@ -21,7 +21,6 @@
 #include "conv.hpp"
 #include "ngraph/builder/reshape.hpp"
 #include "ngraph/op/group_conv.hpp"
-#include "ngraph/op/slice.hpp"
 #include "ngraph/op/util/attr_types.hpp"
 #include "onnx_import/default_opset.hpp"
 #include "onnx_import/exceptions.hpp"
index 97870dc..bdb0840 100644 (file)
@@ -172,7 +172,6 @@ set(SRC
     type_prop/select.cpp
     type_prop/shape_of.cpp
     type_prop/shuffle_channels.cpp
-    type_prop/slice.cpp
     type_prop/softplus.cpp
     type_prop/space_to_batch.cpp
     type_prop/space_to_depth.cpp
@@ -336,8 +335,8 @@ set(MULTI_TEST_SRC
     backend/sign.in.cpp
     backend/sin.in.cpp
     backend/sinh.in.cpp
-    backend/slice.in.cpp
     backend/softmax.in.cpp
+    backend/split.in.cpp
     backend/sqrt.in.cpp
     backend/subtract.in.cpp
     backend/sum.in.cpp
index 47bd89a..8aabaaf 100644 (file)
@@ -1628,42 +1628,6 @@ NGRAPH_TEST(${BACKEND_NAME}, squared_difference_broadcast)
     test_case.run();
 }
 
-NGRAPH_TEST(${BACKEND_NAME}, split_3_equal_parts)
-{
-    const auto data = make_shared<op::Parameter>(element::i32, Shape{6});
-    const auto axis = op::Constant::create(element::i64, Shape{}, {0});
-
-    const auto tested_op = make_shared<op::Split>(data, axis, 3);
-    const auto function = make_shared<Function>(tested_op->decompose_op(), ParameterVector{data});
-
-    auto test_case = test::TestCase<TestEngine>(function);
-    test_case.add_input<int32_t>({1, 2, 3, 4, 5, 6});
-
-    test_case.add_expected_output<int32_t>(Shape{2}, {1, 2});
-    test_case.add_expected_output<int32_t>(Shape{2}, {3, 4});
-    test_case.add_expected_output<int32_t>(Shape{2}, {5, 6});
-
-    test_case.run();
-}
-
-NGRAPH_TEST(${BACKEND_NAME}, split_var_len_parts)
-{
-    const auto data = make_shared<op::Parameter>(element::i32, Shape{2, 6});
-
-    const std::vector<size_t> splits = {2, 4};
-    const auto axis = op::Constant::create(element::i64, Shape{}, {1});
-    const auto tested_op = make_shared<op::Split>(data, axis, splits);
-    const auto function = make_shared<Function>(tested_op->decompose_op(), ParameterVector{data});
-
-    auto test_case = test::TestCase<TestEngine>(function);
-    test_case.add_input<int32_t>({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11});
-
-    test_case.add_expected_output<int32_t>(Shape{2, 2}, {0, 1, 6, 7});
-    test_case.add_expected_output<int32_t>(Shape{2, 4}, {2, 3, 4, 5, 8, 9, 10, 11});
-
-    test_case.run();
-}
-
 NGRAPH_TEST(${BACKEND_NAME}, lstm_cell_zero_bias_peepholes)
 {
     const size_t batch_size = 2;
diff --git a/ngraph/test/backend/split.in.cpp b/ngraph/test/backend/split.in.cpp
new file mode 100644 (file)
index 0000000..953295d
--- /dev/null
@@ -0,0 +1,236 @@
+//*****************************************************************************
+// Copyright 2017-2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//*****************************************************************************
+
+#include "gtest/gtest.h"
+#include "ngraph/ngraph.hpp"
+#include "util/engine/test_engines.hpp"
+#include "util/test_case.hpp"
+#include "util/test_control.hpp"
+
+using namespace std;
+using namespace ngraph;
+
+static string s_manifest = "${MANIFEST}";
+using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
+
+NGRAPH_TEST(${BACKEND_NAME}, split_1d)
+{
+    const auto data = make_shared<op::Parameter>(element::i32, Shape{6});
+    const auto axis = op::Constant::create(element::i64, Shape{}, {0});
+
+    const auto tested_op = make_shared<op::v1::Split>(data, axis, 3);
+    const auto function = make_shared<Function>(tested_op, ParameterVector{data});
+
+    auto test_case = test::TestCase<TestEngine>(function);
+    test_case.add_input<int32_t>({1, 2, 3, 4, 5, 6});
+
+    test_case.add_expected_output<int32_t>(Shape{2}, {1, 2});
+    test_case.add_expected_output<int32_t>(Shape{2}, {3, 4});
+    test_case.add_expected_output<int32_t>(Shape{2}, {5, 6});
+
+    test_case.run();
+}
+
+NGRAPH_TEST(${BACKEND_NAME}, split_2d_axis_0)
+{
+    Shape shape{6, 2};
+    const auto data = make_shared<op::Parameter>(element::f32, shape);
+    const auto axis = op::Constant::create(element::i64, Shape{}, {0});
+
+    const auto tested_op = make_shared<op::v1::Split>(data, axis, 2);
+    const auto function = make_shared<Function>(tested_op, ParameterVector{data});
+
+    auto test_case = test::TestCase<TestEngine>(function);
+    std::vector<float> in(shape_size(shape));
+    std::iota(in.begin(), in.end(), 0);
+    test_case.add_input<float>(in);
+
+    test_case.add_expected_output<float>(Shape{3, 2}, {0, 1, 2, 3, 4, 5});
+    test_case.add_expected_output<float>(Shape{3, 2}, {6, 7, 8, 9, 10, 11});
+
+    test_case.run();
+}
+
+NGRAPH_TEST(${BACKEND_NAME}, split_2d_axis_1)
+{
+    Shape shape{6, 2};
+    const auto data = make_shared<op::Parameter>(element::f32, shape);
+    const auto axis = op::Constant::create(element::i64, Shape{}, {1});
+
+    const auto tested_op = make_shared<op::v1::Split>(data, axis, 2);
+    const auto function = make_shared<Function>(tested_op, ParameterVector{data});
+
+    auto test_case = test::TestCase<TestEngine>(function);
+    std::vector<float> in(shape_size(shape));
+    std::iota(in.begin(), in.end(), 0);
+    test_case.add_input<float>(in);
+
+    test_case.add_expected_output<float>(Shape{6, 1}, {0, 2, 4, 6, 8, 10});
+    test_case.add_expected_output<float>(Shape{6, 1}, {1, 3, 5, 7, 9, 11});
+
+    test_case.run();
+}
+
+NGRAPH_TEST(${BACKEND_NAME}, split_3d_axis_0)
+{
+    Shape shape{2, 2, 3};
+    const auto data = make_shared<op::Parameter>(element::f32, shape);
+    const auto axis = op::Constant::create(element::i64, Shape{}, {0});
+
+    const auto tested_op = make_shared<op::v1::Split>(data, axis, 2);
+    const auto function = make_shared<Function>(tested_op, ParameterVector{data});
+
+    auto test_case = test::TestCase<TestEngine>(function);
+    std::vector<float> in(shape_size(shape));
+    std::iota(in.begin(), in.end(), 0);
+    test_case.add_input<float>(in);
+
+    test_case.add_expected_output<float>(Shape{1, 2, 3}, {0, 1, 2, 3, 4, 5});
+    test_case.add_expected_output<float>(Shape{1, 2, 3}, {6, 7, 8, 9, 10, 11});
+
+    test_case.run();
+}
+
+NGRAPH_TEST(${BACKEND_NAME}, split_3d_axis_1)
+{
+    Shape shape{2, 8, 2};
+    const auto data = make_shared<op::Parameter>(element::f32, shape);
+    const auto axis = op::Constant::create(element::i64, Shape{}, {1});
+
+    const auto tested_op = make_shared<op::v1::Split>(data, axis, 4);
+    const auto function = make_shared<Function>(tested_op, ParameterVector{data});
+
+    auto test_case = test::TestCase<TestEngine>(function);
+    std::vector<float> in(shape_size(shape));
+    std::iota(in.begin(), in.end(), 0);
+    test_case.add_input<float>(in);
+
+    test_case.add_expected_output<float>(Shape{2, 2, 2}, {0, 1, 2, 3, 16, 17, 18, 19});
+    test_case.add_expected_output<float>(Shape{2, 2, 2}, {4, 5, 6, 7, 20, 21, 22, 23});
+    test_case.add_expected_output<float>(Shape{2, 2, 2}, {8, 9, 10, 11, 24, 25, 26, 27});
+    test_case.add_expected_output<float>(Shape{2, 2, 2}, {12, 13, 14, 15, 28, 29, 30, 31});
+
+    test_case.run();
+}
+
+NGRAPH_TEST(${BACKEND_NAME}, split_3d_axis_2)
+{
+    Shape shape{2, 1, 6};
+    const auto data = make_shared<op::Parameter>(element::f32, shape);
+    const auto axis = op::Constant::create(element::i64, Shape{}, {2});
+
+    const auto tested_op = make_shared<op::v1::Split>(data, axis, 2);
+    const auto function = make_shared<Function>(tested_op, ParameterVector{data});
+
+    auto test_case = test::TestCase<TestEngine>(function);
+    std::vector<float> in(shape_size(shape));
+    std::iota(in.begin(), in.end(), 0);
+    test_case.add_input<float>(in);
+
+    test_case.add_expected_output<float>(Shape{2, 1, 3}, {0, 1, 2, 6, 7, 8});
+    test_case.add_expected_output<float>(Shape{2, 1, 3}, {3, 4, 5, 9, 10, 11});
+
+    test_case.run();
+}
+
+NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_0)
+{
+    Shape shape{3, 2, 3, 1};
+    const auto data = make_shared<op::Parameter>(element::f32, shape);
+    const auto axis = op::Constant::create(element::i64, Shape{}, {0});
+
+    const auto tested_op = make_shared<op::v1::Split>(data, axis, 3);
+    const auto function = make_shared<Function>(tested_op, ParameterVector{data});
+
+    auto test_case = test::TestCase<TestEngine>(function);
+    std::vector<float> in(shape_size(shape));
+    std::iota(in.begin(), in.end(), 0);
+    test_case.add_input<float>(in);
+
+    test_case.add_expected_output<float>(Shape{1, 2, 3, 1}, {0, 1, 2, 3, 4, 5});
+    test_case.add_expected_output<float>(Shape{1, 2, 3, 1}, {6, 7, 8, 9, 10, 11});
+    test_case.add_expected_output<float>(Shape{1, 2, 3, 1}, {12, 13, 14, 15, 16, 17});
+
+    test_case.run();
+}
+
+NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_1)
+{
+    Shape shape{2, 8, 2, 2};
+    const auto data = make_shared<op::Parameter>(element::f32, shape);
+    const auto axis = op::Constant::create(element::i64, Shape{}, {1});
+
+    const auto tested_op = make_shared<op::v1::Split>(data, axis, 4);
+    const auto function = make_shared<Function>(tested_op, ParameterVector{data});
+
+    auto test_case = test::TestCase<TestEngine>(function);
+    std::vector<float> in(shape_size(shape));
+    std::iota(in.begin(), in.end(), 0);
+    test_case.add_input<float>(in);
+
+    test_case.add_expected_output<float>(Shape{2, 2, 2, 2},
+                                         {0, 1, 2, 3, 4, 5, 6, 7, 32, 33, 34, 35, 36, 37, 38, 39});
+    test_case.add_expected_output<float>(
+        Shape{2, 2, 2, 2}, {8, 9, 10, 11, 12, 13, 14, 15, 40, 41, 42, 43, 44, 45, 46, 47});
+    test_case.add_expected_output<float>(
+        Shape{2, 2, 2, 2}, {16, 17, 18, 19, 20, 21, 22, 23, 48, 49, 50, 51, 52, 53, 54, 55});
+    test_case.add_expected_output<float>(
+        Shape{2, 2, 2, 2}, {24, 25, 26, 27, 28, 29, 30, 31, 56, 57, 58, 59, 60, 61, 62, 63});
+
+    test_case.run();
+}
+
+NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_2)
+{
+    Shape shape{2, 1, 6, 2};
+    const auto data = make_shared<op::Parameter>(element::f32, shape);
+    const auto axis = op::Constant::create(element::i64, Shape{}, {2});
+
+    const auto tested_op = make_shared<op::v1::Split>(data, axis, 3);
+    const auto function = make_shared<Function>(tested_op, ParameterVector{data});
+
+    auto test_case = test::TestCase<TestEngine>(function);
+    std::vector<float> in(shape_size(shape));
+    std::iota(in.begin(), in.end(), 0);
+    test_case.add_input<float>(in);
+
+    test_case.add_expected_output<float>(Shape{2, 1, 2, 2}, {0, 1, 2, 3, 12, 13, 14, 15});
+    test_case.add_expected_output<float>(Shape{2, 1, 2, 2}, {4, 5, 6, 7, 16, 17, 18, 19});
+    test_case.add_expected_output<float>(Shape{2, 1, 2, 2}, {8, 9, 10, 11, 20, 21, 22, 23});
+
+    test_case.run();
+}
+
+NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_3)
+{
+    Shape shape{2, 1, 2, 6};
+    const auto data = make_shared<op::Parameter>(element::f32, shape);
+    const auto axis = op::Constant::create(element::i64, Shape{}, {3});
+
+    const auto tested_op = make_shared<op::v1::Split>(data, axis, 3);
+    const auto function = make_shared<Function>(tested_op, ParameterVector{data});
+
+    auto test_case = test::TestCase<TestEngine>(function);
+    std::vector<float> in(shape_size(shape));
+    std::iota(in.begin(), in.end(), 0);
+    test_case.add_input<float>(in);
+
+    test_case.add_expected_output<float>(Shape{2, 1, 2, 2}, {0, 1, 6, 7, 12, 13, 18, 19});
+    test_case.add_expected_output<float>(Shape{2, 1, 2, 2}, {2, 3, 8, 9, 14, 15, 20, 21});
+    test_case.add_expected_output<float>(Shape{2, 1, 2, 2}, {4, 5, 10, 11, 16, 17, 22, 23});
+
+    test_case.run();
+}
index 04c83cb..fb6bdfa 100644 (file)
@@ -158,7 +158,7 @@ TEST(build_graph, multi_output_split_dynamic)
 {
     const auto data = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
     const auto axis = op::Constant::create(element::i64, Shape{}, {1});
-    const auto split = make_shared<op::Split>(data, axis, 2);
+    const auto split = make_shared<op::v1::Split>(data, axis, 2);
     auto abs = make_shared<op::Abs>(split->output(1));
     EXPECT_TRUE(abs->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
 
@@ -214,7 +214,7 @@ TEST(build_graph, build_graph_with_sink)
     auto pattern = make_shared<op::Concat>(args, 1);
     auto res = make_shared<op::Result>(pattern);
     const auto axis = op::Constant::create(element::i64, Shape{}, {1});
-    auto crop = make_shared<op::Split>(pattern, axis, 3);
+    auto crop = make_shared<op::v1::Split>(pattern, axis, 3);
     auto assign = make_shared<op::Assign>(crop, "v0");
 
     auto f = make_shared<Function>(ResultVector({res}), SinkVector({assign}), ParameterVector{arg});
@@ -235,7 +235,7 @@ TEST(build_graph, build_graph_with_sink_output_ctor)
     auto pattern = make_shared<op::Concat>(args, 1);
     auto res = make_shared<op::Result>(pattern);
     const auto axis = op::Constant::create(element::i64, Shape{}, {1});
-    auto crop = make_shared<op::Split>(pattern, axis, 3);
+    auto crop = make_shared<op::v1::Split>(pattern, axis, 3);
     auto assign = make_shared<op::Assign>(crop, "v0");
 
     auto f = make_shared<Function>(
@@ -257,7 +257,7 @@ TEST(build_graph, build_graph_with_add_sink)
     auto pattern = make_shared<op::Concat>(args, 1);
     auto res = make_shared<op::Result>(pattern);
     const auto axis = op::Constant::create(element::i64, Shape{}, {1});
-    auto crop = make_shared<op::Split>(pattern, axis, 3);
+    auto crop = make_shared<op::v1::Split>(pattern, axis, 3);
     auto assign = make_shared<op::Assign>(crop, "v0");
 
     auto f = make_shared<Function>(ResultVector({res}), ParameterVector{arg});
@@ -284,7 +284,7 @@ TEST(build_graph, build_graph_with_wrong_remove_sink)
     auto pattern = make_shared<op::Concat>(args, 1);
     auto res = make_shared<op::Result>(pattern);
     const auto axis = op::Constant::create(element::i64, Shape{}, {1});
-    auto crop = make_shared<op::Split>(pattern, axis, 3);
+    auto crop = make_shared<op::v1::Split>(pattern, axis, 3);
     auto assign = make_shared<op::Assign>(crop, "v0");
 
     auto f = make_shared<Function>(ResultVector({res}), SinkVector({assign}), ParameterVector{arg});
@@ -308,7 +308,7 @@ TEST(build_graph, build_graph_with_remove_sink)
     auto pattern = make_shared<op::Concat>(args, 1);
     auto res = make_shared<op::Result>(pattern);
     const auto axis = op::Constant::create(element::i64, Shape{}, {1});
-    auto crop = make_shared<op::Split>(pattern, axis, 3);
+    auto crop = make_shared<op::v1::Split>(pattern, axis, 3);
     auto assign = make_shared<op::Assign>(crop, "v0");
 
     auto f = make_shared<Function>(ResultVector({res}), SinkVector({assign}), ParameterVector{arg});
@@ -334,7 +334,7 @@ TEST(build_graph, build_graph_with_add_result)
     auto pattern = make_shared<op::Concat>(args, 1);
     auto res = make_shared<op::Result>(pattern);
     const auto axis = op::Constant::create(element::i64, Shape{}, {1});
-    auto crop = make_shared<op::Split>(pattern, axis, 3);
+    auto crop = make_shared<op::v1::Split>(pattern, axis, 3);
     auto res2 = make_shared<op::Result>(crop, "v0");
 
     auto f = make_shared<Function>(ResultVector({res}), ParameterVector{arg});
@@ -361,7 +361,7 @@ TEST(build_graph, build_graph_with_remove_result)
     auto pattern = make_shared<op::Concat>(args, 1);
     auto res = make_shared<op::Result>(pattern);
     const auto axis = op::Constant::create(element::i64, Shape{}, {1});
-    auto crop = make_shared<op::Split>(pattern, axis, 3);
+    auto crop = make_shared<op::v1::Split>(pattern, axis, 3);
     auto res2 = make_shared<op::Result>(crop, "v0");
 
     auto f = make_shared<Function>(ResultVector({res, res2}), ParameterVector{arg});
@@ -376,4 +376,4 @@ TEST(build_graph, build_graph_with_remove_result)
     EXPECT_EQ(results.size(), 1);
     nodes = f->get_ops();
     EXPECT_EQ(nodes.size(), 5);
-}
\ No newline at end of file
+}
index dfe150c..ed62bd3 100644 (file)
@@ -1978,13 +1978,17 @@ TEST(constant_folding, const_gather_v1_subgraph_skip_if_not_single_input)
     ASSERT_EQ(count_ops_of_type<op::v1::Gather>(f), 1);
 }
 
-TEST(constant_folding, const_slice)
+TEST(constant_folding, const_strided_slice)
 {
     Shape shape_in{16};
 
     vector<int> values_in{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
     auto constant = make_shared<op::Constant>(element::i32, shape_in, values_in);
-    auto slice = make_shared<op::Slice>(constant, Coordinate{2}, Coordinate{15}, Strides{3});
+    auto begin = op::Constant::create(element::i64, {1}, {2});
+    auto end = op::Constant::create(element::i64, {1}, {15});
+    auto stride = op::Constant::create(element::i64, {1}, {3});
+    auto slice = make_shared<op::v1::StridedSlice>(
+        constant, begin, end, stride, std::vector<int64_t>{0}, std::vector<int64_t>{0});
     slice->set_friendly_name("test");
 
     auto f = make_shared<Function>(slice, ParameterVector{});
@@ -1993,7 +1997,7 @@ TEST(constant_folding, const_slice)
     pass_manager.register_pass<pass::ConstantFolding>();
     pass_manager.run_passes(f);
 
-    ASSERT_EQ(count_ops_of_type<op::Slice>(f), 0);
+    ASSERT_EQ(count_ops_of_type<op::v1::StridedSlice>(f), 0);
     ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
 
     auto new_const =
index 0c563c6..fe109c9 100644 (file)
@@ -322,7 +322,7 @@ TEST(copy, sinh)
     ASSERT_TRUE(check_unary<op::Sinh>());
 }
 
-TEST(copy, slice)
+TEST(copy, strided_slice)
 {
     Shape shape_in{2, 3, 4};
     Coordinate lower{0, 0, 0};
@@ -330,18 +330,39 @@ TEST(copy, slice)
     Strides strides{1, 1, 1};
 
     auto arg0 = make_shared<op::Parameter>(element::f32, shape_in);
-    OutputVector new_args{make_shared<op::Parameter>(element::f32, shape_in)};
-
-    auto node = make_shared<op::Slice>(arg0, lower, upper, strides);
+    OutputVector new_args{make_shared<op::Parameter>(element::f32, shape_in),
+                          op::Constant::create(element::u64, {lower.size()}, lower),
+                          op::Constant::create(element::u64, {upper.size()}, upper),
+                          op::Constant::create(element::i64, {strides.size()}, strides)};
+
+    auto begin_node = op::Constant::create(element::i64, {lower.size()}, lower);
+    auto end_node = op::Constant::create(element::i64, {upper.size()}, upper);
+    auto strides_node = op::Constant::create(element::i64, {strides.size()}, strides);
+    auto node = make_shared<op::v1::StridedSlice>(arg0,
+                                                  begin_node,
+                                                  end_node,
+                                                  strides_node,
+                                                  std::vector<int64_t>{0, 0, 1},
+                                                  std::vector<int64_t>{1, 0, 0},
+                                                  std::vector<int64_t>{0, 1, 0},
+                                                  std::vector<int64_t>{0, 0, 1},
+                                                  std::vector<int64_t>{1, 0, 0});
     auto new_node = node->clone_with_new_inputs(new_args);
-    auto node_cast = as_type_ptr<op::Slice>(new_node);
+    auto node_cast = as_type_ptr<op::v1::StridedSlice>(new_node);
     ASSERT_NE(node_cast, nullptr);
 
     ASSERT_TRUE(nullptr != new_node);
     ASSERT_TRUE(new_args == new_node->input_values());
-    ASSERT_TRUE(lower == node_cast->get_lower_bounds());
-    ASSERT_TRUE(upper == node_cast->get_upper_bounds());
-    ASSERT_TRUE(strides == node_cast->get_strides());
+    std::vector<int64_t> expected_begin_mask{0, 0, 1};
+    std::vector<int64_t> expected_end_mask{1, 0, 0};
+    std::vector<int64_t> expected_new_axis_mask{0, 1, 0};
+    std::vector<int64_t> expected_shrink_axis_mask{0, 0, 1};
+    std::vector<int64_t> expected_ellipsis_mask{1, 0, 0};
+    ASSERT_TRUE(expected_begin_mask == node_cast->get_begin_mask());
+    ASSERT_TRUE(expected_end_mask == node_cast->get_end_mask());
+    ASSERT_TRUE(expected_new_axis_mask == node_cast->get_new_axis_mask());
+    ASSERT_TRUE(expected_shrink_axis_mask == node_cast->get_shrink_axis_mask());
+    ASSERT_TRUE(expected_ellipsis_mask == node_cast->get_ellipsis_mask());
 }
 
 TEST(copy, subtract)
index 6503487..76c0c30 100644 (file)
@@ -758,15 +758,6 @@ namespace
         EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
     }
 
-    void op_is_Slice()
-    {
-        op::Slice node;
-        EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
-        EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
-        EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
-        EXPECT_FALSE(op::is_binary_elementwise_logical(&node));
-    }
-
     void op_is_Softmax()
     {
         op::Softmax node;
@@ -787,7 +778,7 @@ namespace
 
     void op_is_Split()
     {
-        op::Split node;
+        op::v1::Split node;
         EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node));
         EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node));
         EXPECT_FALSE(op::is_binary_elementwise_comparison(&node));
index 0821515..559f684 100644 (file)
@@ -75,6 +75,16 @@ onnx_model_split_equal_parts_2d
 onnx_model_split_variable_parts_2d
 onnx_top_k_opset_10_const_k
 onnx_top_k_opset_11_const_k_smallest
+split_1d
+split_2d_axis_0
+split_2d_axis_1
+split_3d_axis_0
+split_3d_axis_1
+split_3d_axis_2
+split_4d_axis_0
+split_4d_axis_1
+split_4d_axis_2
+split_4d_axis_3
 
 # [NOT_IMPLEMENTED] Input image format BOOL is not supported yet...
 bool_input_or
index 8352aae..081fb32 100644 (file)
@@ -1353,7 +1353,6 @@ protected:
         case OP_TYPEID::Selu:
         case OP_TYPEID::ShuffleChannels:
         case OP_TYPEID::SpaceToDepth:
-        case OP_TYPEID::Split:
         case OP_TYPEID::SquaredDifference:
         case OP_TYPEID::TensorIterator:
         case OP_TYPEID::Tile:
@@ -1389,12 +1388,12 @@ protected:
         case OP_TYPEID::ShapeOf_v3:
         case OP_TYPEID::ShapeOf:
         case OP_TYPEID::Softmax:
+        case OP_TYPEID::Split_v1:
         case OP_TYPEID::Squeeze:
         case OP_TYPEID::Sum:
         case OP_TYPEID::Subtract:
         case OP_TYPEID::Unsqueeze:
         case OP_TYPEID::Xor:
-        case OP_TYPEID::Slice:
             // These ops are handled by op evaluators so nothing to do
             break;
 #if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8)
index 9c7711b..5a3bf7f 100644 (file)
@@ -34,6 +34,7 @@ NGRAPH_OP(LogicalXor, op::v1)
 NGRAPH_OP(LogicalNot, op::v1)
 NGRAPH_OP(GatherTree, op::v1)
 NGRAPH_OP(OneHot, op::v1)
+NGRAPH_OP(Split, op::v1)
 NGRAPH_OP(Reshape, op::v1)
 NGRAPH_OP(Reverse, op::v1)
 #undef ID_SUFFIX
index 587a001..8fd9860 100644 (file)
@@ -23,7 +23,6 @@
 #include "ngraph/builder/split.hpp"
 #include "ngraph/op/concat.hpp"
 #include "ngraph/op/convolution.hpp"
-#include "ngraph/op/slice.hpp"
 #include "ngraph/validation_util.hpp"
 
 using namespace std;
@@ -199,9 +198,9 @@ OutputVector op::v0::GroupConvolution::decompose_op() const
     NodeVector convolution_nodes;
 
     // slice data
-    auto sliced_data = builder::split(data, get_groups(), 1);
+    auto sliced_data = builder::opset1::split(data, get_groups(), 1);
     // slice filters
-    auto sliced_filters = builder::split(filters, get_groups(), 0);
+    auto sliced_filters = builder::opset1::split(filters, get_groups(), 0);
     auto shape = Shape(std::next(std::begin(filters_shape), 1), std::end(filters_shape));
     for (std::size_t group{0}; group < get_groups(); ++group)
     {
@@ -306,9 +305,9 @@ OutputVector op::v0::GroupConvolutionBackpropData::decompose_op() const
     // slice data shape
     data_shape[1] /= groups;
     // slice delta
-    auto sliced_delta = builder::split(output_delta, groups, 1);
+    auto sliced_delta = builder::opset1::split(output_delta, groups, 1);
     // slice filters
-    auto sliced_filters = builder::split(filters, groups, 0);
+    auto sliced_filters = builder::opset1::split(filters, groups, 0);
 
     auto num_spatials = get_window_movement_strides().size();
 
index 682532a..2167fe9 100644 (file)
@@ -117,10 +117,8 @@ NGRAPH_OP(Sigmoid, ngraph::op)
 NGRAPH_OP(Sign, ngraph::op)
 NGRAPH_OP(Sin, ngraph::op)
 NGRAPH_OP(Sinh, ngraph::op)
-NGRAPH_OP(Slice, ngraph::op)
 NGRAPH_OP(Softmax, ngraph::op)
 NGRAPH_OP(SpaceToDepth, ngraph::op)
-NGRAPH_OP(Split, ngraph::op)
 NGRAPH_OP(Sqrt, ngraph::op)
 NGRAPH_OP(SquaredDifference, ngraph::op)
 NGRAPH_OP(Squeeze, ngraph::op)
index ac0406d..0f82643 100644 (file)
@@ -20,7 +20,6 @@
 #include "ngraph/builder/reshape.hpp"
 #include "ngraph/op/broadcast.hpp"
 #include "ngraph/op/range.hpp"
-#include "ngraph/op/slice.hpp"
 #include "ngraph/op/transpose.hpp"
 #include "ngraph/pattern/matcher.hpp"
 #include "ngraph/pattern/op/label.hpp"
index e450756..87453ac 100644 (file)
@@ -360,86 +360,6 @@ namespace opset0_downgrade
         return replacement_node;
     }
 
-    shared_ptr<Node> op_cast(shared_ptr<op::v1::StridedSlice> node)
-    {
-        auto convert_mask_to_axes = [](const std::vector<int64_t>& mask) {
-            AxisSet axes{};
-            for (auto i = 0; i < mask.size(); ++i)
-            {
-                if (mask[i] == 1)
-                {
-                    axes.emplace(i);
-                }
-            }
-            return axes;
-        };
-
-        const auto input_data = node->input_value(0);
-        const auto input_data_pshape = input_data.get_partial_shape();
-
-        NGRAPH_CHECK(input_data_pshape.is_static(),
-                     "Unable to convert StridedSlice:v1 to Slice:v0 "
-                     "if input rank is not static. Node: ",
-                     *node);
-
-        const auto begin_const =
-            as_type_ptr<op::Constant>(node->input_value(1).get_node_shared_ptr());
-        const auto end_const =
-            as_type_ptr<op::Constant>(node->input_value(2).get_node_shared_ptr());
-        const auto strides = as_type_ptr<op::Constant>(node->input_value(3).get_node_shared_ptr());
-
-        NGRAPH_CHECK(begin_const && end_const && strides,
-                     "Unable to convert StridedSlice:v1 to Slice:v0 "
-                     "if begin, end or strides are not constant. Node: ",
-                     *node);
-
-        SlicePlan p = make_slice_plan(input_data_pshape.to_shape(),
-                                      begin_const->get_vector<int64_t>(),
-                                      end_const->get_vector<int64_t>(),
-                                      strides->get_vector<int64_t>(),
-                                      convert_mask_to_axes(node->get_begin_mask()),
-                                      convert_mask_to_axes(node->get_end_mask()),
-                                      convert_mask_to_axes(node->get_new_axis_mask()),
-                                      convert_mask_to_axes(node->get_shrink_axis_mask()),
-                                      convert_mask_to_axes(node->get_ellipsis_mask()));
-
-        shared_ptr<Node> replacement_node =
-            make_shared<op::v0::Slice>(input_data,
-                                       Coordinate(p.begins.begin(), p.begins.end()),
-                                       Coordinate(p.ends.begin(), p.ends.end()),
-                                       Strides(p.strides.begin(), p.strides.end()));
-
-        if (p.reshape_in_shape != p.reshape_out_shape)
-        {
-            auto shape_pattern = op::Constant::create(
-                element::u64, {p.reshape_out_shape.size()}, p.reshape_out_shape);
-            replacement_node = make_shared<op::v1::Reshape>(replacement_node, shape_pattern, false);
-        }
-
-        if (!p.reverse_axes.empty())
-        {
-            replacement_node = make_shared<op::v1::Reverse>(
-                replacement_node,
-                op::Constant::create(
-                    element::u64, {p.reverse_axes.size()}, p.reverse_axes.to_vector()),
-                op::v1::Reverse::Mode::INDEX);
-        }
-
-        replace_node(node, replacement_node);
-        return replacement_node;
-    }
-
-    shared_ptr<Node> op_cast(shared_ptr<op::v1::Split> node)
-    {
-        const auto num_splits = node->get_num_splits();
-
-        auto replacement_node =
-            make_shared<op::v0::Split>(node->input_value(0), node->input_value(1), num_splits);
-
-        replace_node(node, replacement_node);
-        return replacement_node;
-    }
-
     shared_ptr<Node> op_cast(shared_ptr<op::v1::Subtract> node)
     {
         return op_cast_binary_elementwise_node<op::v0::Subtract, op::v1::Subtract>(node);
@@ -471,25 +391,6 @@ namespace opset0_downgrade
         return replacement_node;
     }
 
-    shared_ptr<Node> op_cast(shared_ptr<op::v1::VariadicSplit> node)
-    {
-        const auto split_lengths = node->input_value(2).get_node_shared_ptr();
-
-        NGRAPH_CHECK(op::is_constant(split_lengths),
-                     "Unable to convert VariadicSplit:v1 to Split:v0 "
-                     "if 'split_lengths' input is not constant. Node: ",
-                     *node);
-
-        const auto splits = as_type_ptr<op::Constant>(split_lengths)->cast_vector<int64_t>();
-        const std::vector<size_t> splits_unsigned{splits.begin(), splits.end()};
-
-        auto replacement_node =
-            make_shared<op::v0::Split>(node->input_value(0), node->input_value(1), splits_unsigned);
-
-        replace_node(node, replacement_node);
-        return replacement_node;
-    }
-
     using DispatchMap = map<NodeTypeInfo, std::function<bool(shared_ptr<Node> node)>>;
 
     template <typename T>
index 32013c9..0cdf6eb 100644 (file)
@@ -302,57 +302,6 @@ namespace opset1_upgrade
         return replacement_node;
     }
 
-    shared_ptr<Node> op_cast(shared_ptr<op::Slice> node)
-    {
-        const auto data = node->input_value(0);
-        const auto begin = op::Constant::create(
-            element::i64, Shape{node->get_lower_bounds().size()}, node->get_lower_bounds());
-        const auto end = op::Constant::create(
-            element::i64, Shape{node->get_upper_bounds().size()}, node->get_upper_bounds());
-        const auto strides = op::Constant::create(
-            element::i64, Shape{node->get_strides().size()}, node->get_strides());
-        int64_t input_size = node->get_lower_bounds().size();
-
-        auto replacement_node = make_shared<op::v1::StridedSlice>(data,
-                                                                  begin,
-                                                                  end,
-                                                                  strides,
-                                                                  vector<int64_t>(input_size, 0),
-                                                                  vector<int64_t>(input_size, 0));
-
-        replace_node(node, replacement_node);
-        return replacement_node;
-    }
-
-    shared_ptr<Node> op_cast(shared_ptr<op::Split> node)
-    {
-        const auto& splits_vec = node->get_splits();
-        const auto first_elem = splits_vec.front();
-
-        const bool split_evenly =
-            std::all_of(splits_vec.begin(), splits_vec.end(), [first_elem](const size_t split) {
-                return split == first_elem;
-            });
-
-        std::shared_ptr<Node> replacement_node;
-        if (split_evenly)
-        {
-            replacement_node = make_shared<op::v1::Split>(
-                node->input_value(0), node->input_value(1), splits_vec.front());
-        }
-        else
-        {
-            const auto split_lengths =
-                ngraph::op::Constant::create(element::u64, Shape{splits_vec.size()}, splits_vec);
-
-            replacement_node = make_shared<op::v1::VariadicSplit>(
-                node->input_value(0), node->input_value(1), split_lengths);
-        }
-
-        replace_node(node, replacement_node);
-        return replacement_node;
-    }
-
     shared_ptr<Node> op_cast(shared_ptr<op::Subtract> node)
     {
         return op_cast_binary_elementwise_node<op::v0::Subtract, op::v1::Subtract>(node);
diff --git a/ngraph/test/type_prop/slice.cpp b/ngraph/test/type_prop/slice.cpp
deleted file mode 100644 (file)
index b18e8da..0000000
+++ /dev/null
@@ -1,438 +0,0 @@
-//*****************************************************************************
-// Copyright 2017-2020 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//*****************************************************************************
-
-#include "gtest/gtest.h"
-#include "ngraph/ngraph.hpp"
-#include "util/type_prop.hpp"
-
-NGRAPH_SUPPRESS_DEPRECATED_START
-
-using namespace std;
-using namespace ngraph;
-
-TEST(type_prop, slice_deduce_vector)
-{
-    auto param = make_shared<op::Parameter>(element::f32, Shape{6});
-    auto sl = make_shared<op::Slice>(param, Coordinate{2}, Coordinate{5});
-    ASSERT_EQ(sl->get_element_type(), element::f32);
-    ASSERT_EQ(sl->get_shape(), (Shape{3}));
-}
-
-TEST(type_prop, slice_deduce_matrix)
-{
-    auto param = make_shared<op::Parameter>(element::f32, Shape{6, 8});
-    auto sl = make_shared<op::Slice>(param, Coordinate{2, 1}, Coordinate{5, 7});
-    ASSERT_EQ(sl->get_element_type(), element::f32);
-    ASSERT_EQ(sl->get_shape(), (Shape{3, 6}));
-}
-
-TEST(type_prop, slice_deduce_matrix_strided)
-{
-    auto param = make_shared<op::Parameter>(element::f32, Shape{6, 8});
-    auto sl = make_shared<op::Slice>(param, Coordinate{2, 1}, Coordinate{5, 7}, Strides{3, 2});
-    ASSERT_EQ(sl->get_element_type(), element::f32);
-    ASSERT_EQ(sl->get_shape(), (Shape{1, 3}));
-}
-
-TEST(type_prop, slice_deduce_matrix_strided_uneven)
-{
-    auto param = make_shared<op::Parameter>(element::f32, Shape{6, 8});
-    auto sl = make_shared<op::Slice>(param, Coordinate{2, 1}, Coordinate{5, 7}, Strides{3, 4});
-    ASSERT_EQ(sl->get_element_type(), element::f32);
-    ASSERT_EQ(sl->get_shape(), (Shape{1, 2}));
-}
-
-TEST(type_prop, slice_deduce_vector_edge)
-{
-    auto param = make_shared<op::Parameter>(element::f32, Shape{6});
-    auto sl = make_shared<op::Slice>(param, Coordinate{0}, Coordinate{6});
-    ASSERT_EQ(sl->get_element_type(), element::f32);
-    ASSERT_EQ(sl->get_shape(), (Shape{6}));
-}
-
-TEST(type_prop, slice_deduce_matrix_edge)
-{
-    auto param = make_shared<op::Parameter>(element::f32, Shape{6, 8});
-    auto sl = make_shared<op::Slice>(param, Coordinate{0, 0}, Coordinate{6, 8});
-    ASSERT_EQ(sl->get_element_type(), element::f32);
-    ASSERT_EQ(sl->get_shape(), (Shape{6, 8}));
-}
-
-TEST(type_prop, slice_deduce_matrix_zero_cols)
-{
-    auto param = make_shared<op::Parameter>(element::f32, Shape{6, 8});
-    auto sl = make_shared<op::Slice>(param, Coordinate{0, 0}, Coordinate{6, 0});
-    ASSERT_EQ(sl->get_element_type(), element::f32);
-    ASSERT_EQ(sl->get_shape(), (Shape{6, 0}));
-}
-
-TEST(type_prop, slice_deduce_matrix_zero_zero)
-{
-    auto param = make_shared<op::Parameter>(element::f32, Shape{6, 8});
-    auto sl = make_shared<op::Slice>(param, Coordinate{0, 0}, Coordinate{0, 0});
-    ASSERT_EQ(sl->get_element_type(), element::f32);
-    ASSERT_EQ(sl->get_shape(), (Shape{0, 0}));
-}
-
-TEST(type_prop, slice_deduce_vector_invalid_strides)
-{
-    auto param = make_shared<op::Parameter>(element::f32, Shape{6});
-    try
-    {
-        auto sl = make_shared<op::Slice>(param, Coordinate{0}, Coordinate{7}, Strides{1, 2});
-        // Should have thrown, so fail if it didn't
-        FAIL() << "Invalid slice strides not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(
-            error.what(),
-            std::string("Ranks of lower bounds (Coordinate{0}), upper bounds "
-                        "(Coordinate{7}) and strides (Strides{1, 2}) do not match"));
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, slice_deduce_vector_edge_upper_oob)
-{
-    auto param = make_shared<op::Parameter>(element::f32, Shape{6});
-    try
-    {
-        auto sl = make_shared<op::Slice>(param, Coordinate{0}, Coordinate{7});
-        // Should have thrown, so fail if it didn't
-        FAIL() << "Upper bound out of range not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(error.what(),
-                             std::string("Upper bound for slice at axis 0 is out of range"));
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, slice_deduce_matrix_edge_upper_oob)
-{
-    auto param = make_shared<op::Parameter>(element::f32, Shape{6, 8});
-    try
-    {
-        auto sl = make_shared<op::Slice>(param, Coordinate{0, 0}, Coordinate{6, 9});
-        // Should have thrown, so fail if it didn't
-        FAIL() << "Upper bound out of range not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(error.what(),
-                             std::string("Upper bound for slice at axis 1 is out of range"));
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, slice_deduce_vector_lower_above_upper)
-{
-    auto param = make_shared<op::Parameter>(element::f32, Shape{6});
-    try
-    {
-        auto sl = make_shared<op::Slice>(param, Coordinate{3}, Coordinate{2});
-        // Should have thrown, so fail if it didn't
-        FAIL() << "Lower bound above upper not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(
-            error.what(),
-            std::string("Lower bound for slice is greater than upper bound at axis 0"));
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, slice_deduce_matrix_lower_above_upper)
-{
-    auto param = make_shared<op::Parameter>(element::f32, Shape{6, 8});
-    try
-    {
-        auto sl = make_shared<op::Slice>(param, Coordinate{0, 5}, Coordinate{6, 4});
-        // Should have thrown, so fail if it didn't
-        FAIL() << "Lower bound above upper not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(
-            error.what(),
-            std::string("Lower bound for slice is greater than upper bound at axis 1"));
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, slice_deduce_matrix_lower_missing)
-{
-    auto param = make_shared<op::Parameter>(element::f32, Shape{6, 8});
-    try
-    {
-        auto sl = make_shared<op::Slice>(param, Coordinate{0}, Coordinate{5, 5});
-        // Should have thrown, so fail if it didn't
-        FAIL() << "Missing lower bound coordinate not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(
-            error.what(),
-            std::string("Ranks of lower bounds (Coordinate{0}), upper bounds "
-                        "(Coordinate{5, 5}) and strides (Strides{1}) do not match"));
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, slice_deduce_matrix_upper_missing)
-{
-    auto param = make_shared<op::Parameter>(element::f32, Shape{6, 8});
-    try
-    {
-        auto sl = make_shared<op::Slice>(param, Coordinate{0, 0}, Coordinate{5});
-        // Should have thrown, so fail if it didn't
-        FAIL() << "Missing upper bound coordinate not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(
-            error.what(),
-            std::string("Ranks of lower bounds (Coordinate{0, 0}), upper bounds "
-                        "(Coordinate{5}) and strides (Strides{1, 1}) do not match"));
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, slice_deduce_matrix_lower_extra)
-{
-    auto param = make_shared<op::Parameter>(element::f32, Shape{6, 8});
-    try
-    {
-        auto sl = make_shared<op::Slice>(param, Coordinate{0, 0, 0}, Coordinate{5, 5});
-        // Should have thrown, so fail if it didn't
-        FAIL() << "Extra lower bound coordinate not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(error.what(),
-                             std::string("Ranks of lower bounds (Coordinate{0, 0, "
-                                         "0}), upper bounds (Coordinate{5, 5}) and "
-                                         "strides (Strides{1, 1, 1}) do not match"));
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, slice_deduce_matrix_upper_extra)
-{
-    auto param = make_shared<op::Parameter>(element::f32, Shape{6, 8});
-    try
-    {
-        auto sl = make_shared<op::Slice>(param, Coordinate{0, 0}, Coordinate{5, 5, 5});
-        // Should have thrown, so fail if it didn't
-        FAIL() << "Extra upper bound coordinate not detected";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(error.what(),
-                             std::string("Ranks of lower bounds (Coordinate{0, 0}), "
-                                         "upper bounds (Coordinate{5, 5, 5}) and "
-                                         "strides (Strides{1, 1}) do not match"));
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, slice_partial_arg_input_rank_dynamic_attribs_ok)
-{
-    PartialShape input_shape{PartialShape::dynamic()};
-    Coordinate lower_bounds{1, 2, 3, 4};
-    Coordinate upper_bounds{1, 3, 5, 7};
-    Strides strides{1, 1, 1, 2};
-
-    auto param = make_shared<op::Parameter>(element::f32, input_shape);
-    auto sl = make_shared<op::Slice>(param, lower_bounds, upper_bounds, strides);
-
-    ASSERT_EQ(sl->get_element_type(), element::f32);
-    ASSERT_EQ(sl->get_shape(), (Shape{0, 1, 2, 2}));
-}
-
-TEST(type_prop, slice_partial_arg_rank_dynamic_attribs_rank_mismatch)
-{
-    PartialShape input_shape{PartialShape::dynamic()};
-    Coordinate lower_bounds{1, 2, 3, 4};
-    Coordinate upper_bounds{1, 3, 5};
-    Strides strides{1, 1, 1, 2};
-
-    auto param = make_shared<op::Parameter>(element::f32, input_shape);
-    try
-    {
-        auto sl = make_shared<op::Slice>(param, lower_bounds, upper_bounds, strides);
-        // Should have thrown, so fail if it didn't
-        FAIL() << "Mismatch of lower-bounds/upper-bounds/strides ranks not detected (argument "
-                  "rank-dynamic)";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(
-            error.what(),
-            std::string("Ranks of lower bounds (Coordinate{1, 2, 3, 4}), upper bounds "
-                        "(Coordinate{1, 3, 5}) and strides (Strides{1, 1, 1, 2}) do not match"));
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, slice_partial_arg_rank_dynamic_attribs_bounds_crossing)
-{
-    PartialShape input_shape{PartialShape::dynamic()};
-    Coordinate lower_bounds{1, 2, 3, 8};
-    Coordinate upper_bounds{1, 3, 5, 7};
-    Strides strides{1, 1, 1, 2};
-
-    auto param = make_shared<op::Parameter>(element::f32, input_shape);
-    try
-    {
-        auto sl = make_shared<op::Slice>(param, lower_bounds, upper_bounds, strides);
-        // Should have thrown, so fail if it didn't
-        FAIL() << "Crossing lower/upper bounds not detected (argument rank-dynamic)";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(
-            error.what(),
-            std::string("Lower bound for slice is greater than upper bound at axis 3 (lower "
-                        "bounds: Coordinate{1, 2, 3, 8}, upper bounds: Coordinate{1, 3, 5, 7})"));
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, slice_partial_arg_rank_static_dynamic_ok)
-{
-    PartialShape input_shape{
-        Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()};
-    Coordinate lower_bounds{1, 2, 3, 4};
-    Coordinate upper_bounds{1, 3, 5, 7};
-    Strides strides{1, 1, 1, 2};
-
-    auto param = make_shared<op::Parameter>(element::f32, input_shape);
-    auto sl = make_shared<op::Slice>(param, lower_bounds, upper_bounds, strides);
-
-    ASSERT_EQ(sl->get_element_type(), element::f32);
-    ASSERT_EQ(sl->get_shape(), (Shape{0, 1, 2, 2}));
-}
-
-TEST(type_prop, slice_partial_arg_rank_static_dynamic_some_dims_known_ok)
-{
-    PartialShape input_shape{2, 4, 10, Dimension::dynamic()};
-    Coordinate lower_bounds{1, 2, 3, 4};
-    Coordinate upper_bounds{1, 3, 5, 7};
-    Strides strides{1, 1, 1, 2};
-
-    auto param = make_shared<op::Parameter>(element::f32, input_shape);
-    auto sl = make_shared<op::Slice>(param, lower_bounds, upper_bounds, strides);
-
-    ASSERT_EQ(sl->get_element_type(), element::f32);
-    ASSERT_EQ(sl->get_shape(), (Shape{0, 1, 2, 2}));
-}
-
-TEST(type_prop, slice_partial_arg_rank_static_dynamic_attribs_rank_mismatches_arg)
-{
-    PartialShape input_shape{Dimension::dynamic(),
-                             Dimension::dynamic(),
-                             Dimension::dynamic(),
-                             Dimension::dynamic(),
-                             Dimension::dynamic()};
-    Coordinate lower_bounds{1, 2, 3, 4};
-    Coordinate upper_bounds{1, 3, 5, 7};
-    Strides strides{1, 1, 1, 2};
-
-    auto param = make_shared<op::Parameter>(element::f32, input_shape);
-    try
-    {
-        auto sl = make_shared<op::Slice>(param, lower_bounds, upper_bounds, strides);
-        // Should have thrown, so fail if it didn't
-        FAIL() << "Mismatch of attrib ranks with arg ranks not detected (argument rank-static "
-                  "dynamic)";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(error.what(),
-                             std::string("Input rank does not match the "
-                                         "rank of the lower bounds (Coordinate{1, 2, "
-                                         "3, 4}), upper bounds (Coordinate{1, 3, 5, "
-                                         "7}), and strides (Strides{1, 1, 1, 2})"));
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
-
-TEST(type_prop, slice_partial_arg_rank_static_dynamic_some_dims_known_upper_bounds_oob)
-{
-    PartialShape input_shape{2, 2, 10, Dimension::dynamic()};
-    Coordinate lower_bounds{1, 2, 3, 4};
-    Coordinate upper_bounds{1, 3, 5, 7};
-    Strides strides{1, 1, 1, 2};
-
-    auto param = make_shared<op::Parameter>(element::f32, input_shape);
-    try
-    {
-        auto sl = make_shared<op::Slice>(param, lower_bounds, upper_bounds, strides);
-        // Should have thrown, so fail if it didn't
-        FAIL() << "Upper bounds out of bounds not detected (argument rank-static dynamic)";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(error.what(),
-                             std::string("Upper bound for slice at axis 1 is out of "
-                                         "range (upper bounds: Coordinate{1, 3, 5, "
-                                         "7}, argument shape: {2,2,10,?})"));
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason";
-    }
-}
index 70d431b..8abbe59 100644 (file)
@@ -29,22 +29,22 @@ TEST(type_prop, split)
 
     try
     {
-        const std::vector<size_t> splits = {1, 6}; // should sum up to 6
         const auto axis = op::Constant::create(element::i64, Shape{}, {1});
-        const auto split = make_shared<op::Split>(data, axis, splits);
+        const auto split = make_shared<op::v1::Split>(data, axis, 7);
         FAIL() << "Split node was created with incorrect data.";
     }
     catch (const NodeValidationFailure& error)
     {
         EXPECT_HAS_SUBSTRING(
-            error.what(), std::string("has to be equal to the sum of splits passed to the op: 7"));
+            error.what(),
+            std::string("The input tensor's dimension pointed by the 'axis' parameter: 6 has to be "
+                        "a multiple of the 'num_splits' attribute value: 7"));
     }
 
     try
     {
-        const std::vector<size_t> splits = {4, 2};
         const auto axis = op::Constant::create(element::i64, Shape{}, {-5});
-        const auto split = make_shared<op::Split>(data, axis, splits); // invalid axis
+        const auto split = make_shared<op::v1::Split>(data, axis, 4); // invalid axis
         FAIL() << "Split node was created with incorrect data.";
     }
     catch (const ngraph_error& error)
@@ -53,7 +53,7 @@ TEST(type_prop, split)
     }
 
     const auto axis = op::Constant::create(element::i64, Shape{}, {1});
-    const auto split = make_shared<op::Split>(data, axis, 2);
+    const auto split = make_shared<op::v1::Split>(data, axis, 2);
     EXPECT_EQ(split->outputs().size(), 2);
     EXPECT_EQ(split->get_output_shape(0), (Shape{2, 3}));
     EXPECT_EQ(split->get_output_shape(1), (Shape{2, 3}));
@@ -64,38 +64,17 @@ TEST(type_prop, split)
 TEST(type_prop, split_axis_must_be_scalar)
 {
     const auto data = make_shared<op::Parameter>(element::i32, Shape{2, 6});
-    const std::vector<size_t> splits = {1, 6};
     const auto axis = op::Constant::create(element::i64, Shape{2}, {0, 1});
 
     try
     {
-        const auto split = make_shared<op::Split>(data, axis, splits);
+        const auto split = make_shared<op::v1::Split>(data, axis, 1);
         FAIL() << "Incorrect axis of Split not detected.";
     }
     catch (const NodeValidationFailure& error)
     {
-        EXPECT_HAS_SUBSTRING(error.what(), std::string("The 'axis' input node must be scalar"));
-    }
-    catch (...)
-    {
-        FAIL() << "Deduced type check failed for unexpected reason.";
-    }
-}
-
-TEST(type_prop, split_axis_must_be_constant)
-{
-    const auto data = make_shared<op::Parameter>(element::i32, Shape{2, 6});
-    const std::vector<size_t> splits = {1, 6};
-    const auto axis = make_shared<op::Parameter>(element::i32, Shape{});
-
-    try
-    {
-        const auto split = make_shared<op::Split>(data, axis, splits);
-        FAIL() << "Not constant axis of Split not detected.";
-    }
-    catch (const NodeValidationFailure& error)
-    {
-        EXPECT_HAS_SUBSTRING(error.what(), std::string("The 'axis' input node must be constant"));
+        EXPECT_HAS_SUBSTRING(error.what(),
+                             std::string("The 'axis' input is expected to be a scalar"));
     }
     catch (...)
     {