v1::Pad evaluator (#1771)
authorTomasz Dołbniak <tomasz.dolbniak@intel.com>
Tue, 18 Aug 2020 10:43:54 +0000 (12:43 +0200)
committerGitHub <noreply@github.com>
Tue, 18 Aug 2020 10:43:54 +0000 (13:43 +0300)
* v1::Pad reference implementation

* ut fix: pad_negative_exterior_1d

* ut fix: pad_negative_exterior_1d_check_limits & pad_edge_1d

* Code formatting

* ut fix: pad_edge_1d_top_neg & pad_edge_1d_top_neg_bigger_than_tensor

* More Pad UT fixes

* Pad UT fixes: REFLECT mode

* Fix all Pad UTs

* Switch Pad evaluation in INT backend

* Non-template solution to v1::Pad::evaluate

* Always create v1::Pad with 4 inputs

* VS compilation error fix

* Python test fix

* Remove the v0::Pad constant folding pass

* Some extra checks in v1::Pad evaluator

* Code formatting

* Remove an obsolete CF test

ngraph/core/include/ngraph/op/pad.hpp
ngraph/core/include/ngraph/pass/constant_folding.hpp
ngraph/core/reference/include/ngraph/runtime/reference/pad.hpp
ngraph/core/src/op/pad.cpp
ngraph/core/src/pass/constant_folding_pad.cpp [deleted file]
ngraph/core/src/runtime/reference/pad.cpp [new file with mode: 0644]
ngraph/test/backend/pad.in.cpp
ngraph/test/constant_folding.cpp
ngraph/test/runtime/interpreter/int_executable.hpp
ngraph/test/runtime/pass/opset0_downgrade.cpp
ngraph/test/runtime/pass/opset1_upgrade.cpp

index eae8aca..099e1ba 100644 (file)
@@ -148,6 +148,9 @@ namespace ngraph
                 /// \return The padding mode.
                 PadMode get_pad_mode() const { return m_pad_mode; }
                 void set_pad_mode(PadMode pad_mode) { m_pad_mode = pad_mode; }
+                bool evaluate(const HostTensorVector& outputs,
+                              const HostTensorVector& inputs) const override;
+
             private:
                 PadMode m_pad_mode;
             };
index b5bca10..8350ddc 100644 (file)
@@ -38,7 +38,6 @@ public:
     {
         m_cfmap = cfmap;
         m_enable_shape_inference = true;
-        construct_constant_pad();
         construct_constant_quantize();
         construct_constant_dequantize();
         construct_constant_convert();
@@ -52,7 +51,6 @@ public:
     }
 
 private:
-    void construct_constant_pad();
     void construct_constant_quantize();
     void construct_constant_dequantize();
     void construct_constant_convert();
index f9ada12..dd73a18 100644 (file)
@@ -29,176 +29,15 @@ namespace ngraph
     {
         namespace reference
         {
-            template <typename T>
-            void pad(const T* arg0,
-                     const T* arg1,
-                     T* out,
-                     const Shape& arg0_shape,
+            void pad(const char* data,
+                     const char* pad_value,
+                     char* out,
+                     const size_t elem_size,
+                     const Shape& data_shape,
                      const Shape& out_shape,
                      const CoordinateDiff& padding_below,
                      const CoordinateDiff& padding_above,
-                     op::PadMode pad_mode)
-            {
-                Coordinate input_start(arg0_shape.size(), 0); // start at (0,0,...,0)
-                Coordinate input_end = out_shape; // end at (d'0,d'1,...,d'n), the outer corner of
-                                                  // the post-padding shape
-
-                Strides input_strides(arg0_shape.size(), 1);
-
-                AxisVector input_axis_order(arg0_shape.size());
-                for (size_t i = 0; i < arg0_shape.size(); i++)
-                {
-                    input_axis_order[i] = i;
-                }
-
-                CoordinateTransform input_transform(arg0_shape,
-                                                    input_start,
-                                                    input_end,
-                                                    input_strides,
-                                                    input_axis_order,
-                                                    padding_below,
-                                                    padding_above);
-                CoordinateTransform output_transform(out_shape);
-
-                CoordinateTransform::Iterator output_it = output_transform.begin();
-
-                NGRAPH_CHECK(shape_size(input_transform.get_target_shape()) ==
-                             shape_size(output_transform.get_target_shape()));
-
-                for (const Coordinate& in_coord : input_transform)
-                {
-                    const Coordinate& out_coord = *output_it;
-
-                    T v(0);
-
-                    switch (pad_mode)
-                    {
-                    case op::PadMode::CONSTANT:
-                        // If the coordinate is out of bounds, substitute *arg1.
-                        v = input_transform.has_source_coordinate(in_coord)
-                                ? arg0[input_transform.index(in_coord)]
-                                : *arg1;
-                        break;
-                    case op::PadMode::EDGE:
-                    {
-                        Coordinate c = in_coord; // have to copy because in_coord is const
-
-                        // Truncate each out-of-bound dimension.
-                        for (size_t i = 0; i < c.size(); i++)
-                        {
-                            if (static_cast<ptrdiff_t>(c[i]) < padding_below[i])
-                            {
-                                c[i] = padding_below[i];
-                            }
-
-                            if (static_cast<ptrdiff_t>(c[i]) >=
-                                (padding_below[i] + static_cast<ptrdiff_t>(arg0_shape[i])))
-                            {
-                                c[i] = static_cast<size_t>(
-                                    padding_below[i] + static_cast<ptrdiff_t>(arg0_shape[i]) - 1);
-                            }
-                        }
-                        v = arg0[input_transform.index(c)];
-                        break;
-                    }
-                    case op::PadMode::REFLECT:
-                    {
-                        // clang-format off
-                        // The algorithm here is a bit complicated because if the padding is
-                        // bigger than the tensor, we may reflect multiple times.
-                        //
-                        // Example:
-                        //
-                        // Input shape:     [2]
-                        // Padding:         6 below, 6 above
-                        // Output shape:    [14]
-                        //
-                        // Input:                       a b
-                        // Expected output: a b a b a b a b a b a b a b
-                        //
-                        // Computation for coordinate 13 of output:
-                        //
-                        //         . . . . . . a b . . . . .[.] -> (oob above by 6 spaces, so reflection is at top-6)
-                        //         .[.]. . . . a b . . . . . .  -> (oob below by 5 spaces, so reflection is at bottom+5)
-                        //         . . . . . . a b . . .[.]. .  -> (oob above by 4 spaces, so reflection is at top-4)
-                        //         . . .[.]. . a b . . . . . .  -> (oob below by 3 spaces, so reflection is at bottom+3)
-                        //         . . . . . . a b .[.]. . . .  -> (oob above by 2 spaces, so reflection is at top-2)
-                        //         . . . . .[.]a b . . . . . .  -> (oob below by 1 space,  so reflection is at bottom+1)
-                        //         . . . . . . a[b]. . . . . .  -> (no longer oob, so copy from here)
-                        //
-                        // Note that this algorithm works because REFLECT padding only makes sense
-                        // if each dim is >= 2.
-                        // clang-format on
-                        Coordinate c = in_coord; // have to copy because in_coord is const
-
-                        for (size_t i = 0; i < c.size(); i++)
-                        {
-                            ptrdiff_t new_dim = c[i];
-                            bool done_reflecting = false;
-
-                            while (!done_reflecting)
-                            {
-                                if (new_dim < padding_below[i])
-                                {
-                                    ptrdiff_t distance_oob = padding_below[i] - new_dim;
-                                    new_dim = padding_below[i] + distance_oob;
-                                }
-                                else if (new_dim >=
-                                         padding_below[i] + static_cast<ptrdiff_t>(arg0_shape[i]))
-                                {
-                                    ptrdiff_t distance_oob =
-                                        new_dim - padding_below[i] -
-                                        (static_cast<ptrdiff_t>(arg0_shape[i]) - 1);
-                                    new_dim = padding_below[i] +
-                                              static_cast<ptrdiff_t>(arg0_shape[i]) - distance_oob -
-                                              1;
-                                }
-                                else
-                                {
-                                    done_reflecting = true;
-                                }
-                            }
-
-                            c[i] = static_cast<size_t>(new_dim);
-                        }
-                        v = arg0[input_transform.index(c)];
-                        break;
-                    }
-                    case op::PadMode::SYMMETRIC:
-                    {
-                        Coordinate c = in_coord; // have to copy because in_coord is const
-                        for (size_t i = 0; i < c.size(); i++)
-                        {
-                            ptrdiff_t pos = padding_below[i] - (c[i] + 1);
-                            if (pos >= 0)
-                            {
-                                c[i] = static_cast<size_t>(pos + padding_below[i]);
-                            }
-                            else
-                            {
-                                pos = -(pos + 1);
-                                ptrdiff_t src_dim = static_cast<ptrdiff_t>(arg0_shape[i]);
-                                if (pos < src_dim)
-                                {
-                                    c[i] = static_cast<size_t>(pos + padding_below[i]);
-                                }
-                                else
-                                {
-                                    c[i] = static_cast<size_t>(padding_below[i] + src_dim +
-                                                               padding_above[i] - pos);
-                                }
-                            }
-                        }
-                        v = arg0[input_transform.index(c)];
-                        break;
-                    }
-                    }
-
-                    out[output_transform.index(out_coord)] = v;
-
-                    ++output_it;
-                }
-            }
+                     const op::PadMode pad_mode);
         }
     }
 }
index f653cdc..f50f2d9 100644 (file)
@@ -20,6 +20,7 @@
 #include "ngraph/op/broadcast.hpp"
 #include "ngraph/op/constant.hpp"
 #include "ngraph/op/util/op_types.hpp"
+#include "ngraph/runtime/reference/pad.hpp"
 
 using namespace std;
 using namespace ngraph;
@@ -155,7 +156,7 @@ op::v1::Pad::Pad(const Output<Node>& arg,
                  const Output<Node>& pads_begin,
                  const Output<Node>& pads_end,
                  PadMode pad_mode)
-    : Op({arg, pads_begin, pads_end})
+    : Op({arg, pads_begin, pads_end, op::Constant::create(arg.get_element_type(), Shape{}, {0})})
     , m_pad_mode{pad_mode}
 {
     constructor_validate_and_infer_types();
@@ -197,8 +198,7 @@ void op::v1::Pad::validate_and_infer_types()
     const auto& pads_begin_element_type = get_input_element_type(1);
     const auto& pads_end_element_type = get_input_element_type(2);
 
-    const auto arg_pad_value_provided = get_input_size() == 4;
-    if (m_pad_mode == PadMode::CONSTANT && arg_pad_value_provided)
+    if (m_pad_mode == PadMode::CONSTANT && get_input_size() == 4)
     {
         const auto& arg_pad_element_type = get_input_element_type(3);
         const auto& arg_pad_shape = get_input_partial_shape(3);
@@ -310,8 +310,7 @@ void op::v1::Pad::validate_and_infer_types()
 shared_ptr<Node> op::v1::Pad::clone_with_new_inputs(const OutputVector& new_args) const
 {
     check_new_args_count(this, new_args);
-    const auto arg_pad_value_provided = get_input_size() == 4;
-    if (arg_pad_value_provided)
+    if (get_input_size() == 4)
     {
         return make_shared<v1::Pad>(
             new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3), m_pad_mode);
@@ -321,3 +320,33 @@ shared_ptr<Node> op::v1::Pad::clone_with_new_inputs(const OutputVector& new_args
         return make_shared<v1::Pad>(new_args.at(0), new_args.at(1), new_args.at(2), m_pad_mode);
     }
 }
+
+bool op::v1::Pad::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
+{
+    const auto& data = inputs[0];
+    const auto elem_size = data->get_element_type().size();
+
+    const char* pad_value = nullptr;
+    const std::vector<char> pad_zero_value(elem_size, 0);
+    if (get_input_size() == 4)
+    {
+        pad_value = inputs[3]->get_data_ptr<char>();
+    }
+    else
+    {
+        pad_value = pad_zero_value.data();
+    }
+    const auto& out = outputs[0];
+
+    ngraph::runtime::reference::pad(data->get_data_ptr<char>(),
+                                    pad_value,
+                                    out->get_data_ptr<char>(),
+                                    elem_size,
+                                    data->get_shape(),
+                                    out->get_shape(),
+                                    get_pads_begin(),
+                                    get_pads_end(),
+                                    get_pad_mode());
+
+    return true;
+}
diff --git a/ngraph/core/src/pass/constant_folding_pad.cpp b/ngraph/core/src/pass/constant_folding_pad.cpp
deleted file mode 100644 (file)
index bc0c150..0000000
+++ /dev/null
@@ -1,158 +0,0 @@
-//*****************************************************************************
-// Copyright 2017-2020 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//*****************************************************************************
-
-#include "constant_folding.hpp"
-#include "ngraph/op/pad.hpp"
-#include "ngraph/runtime/reference/pad.hpp"
-
-using namespace std;
-using namespace ngraph;
-
-template <class T>
-shared_ptr<op::Constant> fold_constant_pad(shared_ptr<op::Constant> constant,
-                                           shared_ptr<op::Pad> pad,
-                                           NodeExecutorTy func)
-{
-    const Shape& out_shape = pad->get_shape();
-    runtime::AlignedBuffer buffer(shape_size(out_shape) * sizeof(T));
-    T* data_ptr = buffer.get_ptr<T>();
-    auto pad_value = std::static_pointer_cast<op::Constant>(pad->get_input_node_shared_ptr(1));
-
-    if (func != nullptr)
-    {
-        vector<void*> inputs;
-        inputs.push_back(const_cast<void*>(constant->get_data_ptr()));
-        inputs.push_back(const_cast<void*>(pad_value->get_data_ptr()));
-
-        vector<void*> outputs;
-        outputs.push_back(data_ptr);
-
-        func(inputs, outputs);
-    }
-    else
-    {
-        runtime::reference::pad<T>(constant->get_data_ptr<T>(),
-                                   pad_value->get_data_ptr<T>(),
-                                   data_ptr,
-                                   constant->get_shape(),
-                                   out_shape,
-                                   pad->get_padding_below(),
-                                   pad->get_padding_above(),
-                                   pad->get_pad_mode());
-    }
-
-    return make_shared<op::Constant>(constant->get_element_type(), out_shape, data_ptr);
-}
-
-void pass::ConstantFolding::construct_constant_pad()
-{
-    auto is_constant = pattern::has_class<op::Constant>();
-    auto constant_label = make_shared<pattern::op::Label>(element::f32, Shape{6}, is_constant);
-
-    auto pad_value_label = make_shared<pattern::op::Label>(element::f32, Shape{}, is_constant);
-
-    CoordinateDiff padding_below{0};
-    CoordinateDiff padding_above{0};
-    op::PadMode pad_mode{op::PadMode::CONSTANT};
-
-    auto pad = make_shared<op::Pad>(
-        constant_label, pad_value_label, padding_below, padding_above, pad_mode);
-
-    auto constant_pad_callback = [&, constant_label](pattern::Matcher& m) {
-        NGRAPH_DEBUG << "In callback for constant_pad_callback against node = "
-                     << m.get_match_root()->get_name();
-
-        auto pattern_map = m.get_pattern_map();
-
-        auto constant_match = static_pointer_cast<op::Constant>(pattern_map[constant_label]);
-        auto pad_match = static_pointer_cast<op::Pad>(m.get_match_root());
-
-        if (cf_is_disabled(pad_match))
-            return false;
-
-        NGRAPH_CHECK(revalidate_and_ensure_static(pad_match));
-
-        NodeExecutorTy func = nullptr;
-        if (!m_cfmap.empty())
-        {
-            auto handler = m_cfmap.find(type_index(typeid(ngraph::op::Pad)));
-            NGRAPH_CHECK(handler != m_cfmap.end(), "constant folding map should have pad entry");
-            func = handler->second(pad_match.get());
-        }
-
-        std::shared_ptr<Node> replacement;
-        auto type = constant_match->get_element_type();
-        switch (type)
-        {
-        case element::Type_t::undefined:
-            NGRAPH_CHECK(false, "Encountered 'undefined' element type in constant_pad_callback");
-            break;
-        case element::Type_t::dynamic:
-            NGRAPH_CHECK(false, "Encountered 'dynamic' element type in constant_pad_callback");
-            break;
-        case element::Type_t::u1:
-            NGRAPH_CHECK(false, "Encountered 'u1' element type in constant_pad_callback");
-            break;
-        case element::Type_t::boolean:
-            replacement = fold_constant_pad<char>(constant_match, pad_match, func);
-            break;
-        case element::Type_t::bf16:
-            replacement = fold_constant_pad<bfloat16>(constant_match, pad_match, func);
-            break;
-        case element::Type_t::f16:
-            replacement = fold_constant_pad<float16>(constant_match, pad_match, func);
-            break;
-        case element::Type_t::f32:
-            replacement = fold_constant_pad<float>(constant_match, pad_match, func);
-            break;
-        case element::Type_t::f64:
-            replacement = fold_constant_pad<double>(constant_match, pad_match, func);
-            break;
-        case element::Type_t::i8:
-            replacement = fold_constant_pad<int8_t>(constant_match, pad_match, func);
-            break;
-        case element::Type_t::i16:
-            replacement = fold_constant_pad<int16_t>(constant_match, pad_match, func);
-            break;
-        case element::Type_t::i32:
-            replacement = fold_constant_pad<int32_t>(constant_match, pad_match, func);
-            break;
-        case element::Type_t::i64:
-            replacement = fold_constant_pad<int64_t>(constant_match, pad_match, func);
-            break;
-        case element::Type_t::u8:
-            replacement = fold_constant_pad<uint8_t>(constant_match, pad_match, func);
-            break;
-        case element::Type_t::u16:
-            replacement = fold_constant_pad<uint16_t>(constant_match, pad_match, func);
-            break;
-        case element::Type_t::u32:
-            replacement = fold_constant_pad<uint32_t>(constant_match, pad_match, func);
-            break;
-        case element::Type_t::u64:
-            replacement = fold_constant_pad<uint64_t>(constant_match, pad_match, func);
-            break;
-        }
-
-        replace_node(m.get_match_root(), replacement);
-        return true;
-    };
-
-    auto pad_matcher = make_shared<pattern::Matcher>(pad, "ConstantFolding.ConstantPad");
-    NGRAPH_SUPPRESS_DEPRECATED_START
-    this->add_matcher(pad_matcher, constant_pad_callback, PassProperty::CHANGE_DYNAMIC_STATE);
-    NGRAPH_SUPPRESS_DEPRECATED_END
-}
diff --git a/ngraph/core/src/runtime/reference/pad.cpp b/ngraph/core/src/runtime/reference/pad.cpp
new file mode 100644 (file)
index 0000000..9527432
--- /dev/null
@@ -0,0 +1,211 @@
+//*****************************************************************************
+// Copyright 2017-2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//*****************************************************************************
+
+#include "ngraph/runtime/reference/pad.hpp"
+
+namespace ngraph
+{
+    namespace runtime
+    {
+        namespace reference
+        {
+            void pad(const char* data,
+                     const char* pad_value,
+                     char* out,
+                     const size_t elem_size,
+                     const Shape& data_shape,
+                     const Shape& out_shape,
+                     const CoordinateDiff& padding_below,
+                     const CoordinateDiff& padding_above,
+                     const op::PadMode pad_mode)
+            {
+                Coordinate input_start(data_shape.size(), 0); // start at (0,0,...,0)
+                Coordinate input_end = out_shape; // end at (d'0,d'1,...,d'n), the outer corner of
+                                                  // the post-padding shape
+
+                Strides input_strides(data_shape.size(), 1);
+
+                AxisVector input_axis_order(data_shape.size());
+                for (size_t i = 0; i < data_shape.size(); i++)
+                {
+                    input_axis_order[i] = i;
+                }
+
+                CoordinateTransform input_transform(data_shape,
+                                                    input_start,
+                                                    input_end,
+                                                    input_strides,
+                                                    input_axis_order,
+                                                    padding_below,
+                                                    padding_above);
+                CoordinateTransform output_transform(out_shape);
+
+                CoordinateTransform::Iterator output_it = output_transform.begin();
+
+                NGRAPH_CHECK(shape_size(input_transform.get_target_shape()) ==
+                             shape_size(output_transform.get_target_shape()));
+
+                // depending on the data tensor element type, allocate enough bytes to fit a
+                // single value of this type
+                std::vector<char> v(elem_size, 0);
+
+                for (const Coordinate& in_coord : input_transform)
+                {
+                    const Coordinate& out_coord = *output_it;
+
+                    std::fill(v.begin(), v.end(), 0);
+
+                    switch (pad_mode)
+                    {
+                    case op::PadMode::CONSTANT:
+                        // If the coordinate is out of bounds, substitute *pad_value.
+                        if (input_transform.has_source_coordinate(in_coord))
+                        {
+                            const auto* offset = data + input_transform.index(in_coord) * elem_size;
+                            std::copy(offset, offset + elem_size, v.begin());
+                        }
+                        else
+                        {
+                            std::copy(pad_value, pad_value + elem_size, v.begin());
+                        }
+                        break;
+                    case op::PadMode::EDGE:
+                    {
+                        Coordinate c = in_coord; // have to copy because in_coord is const
+
+                        // Truncate each out-of-bound dimension.
+                        for (size_t i = 0; i < c.size(); i++)
+                        {
+                            if (static_cast<ptrdiff_t>(c[i]) < padding_below[i])
+                            {
+                                c[i] = padding_below[i];
+                            }
+
+                            if (static_cast<ptrdiff_t>(c[i]) >=
+                                (padding_below[i] + static_cast<ptrdiff_t>(data_shape[i])))
+                            {
+                                c[i] = static_cast<size_t>(
+                                    padding_below[i] + static_cast<ptrdiff_t>(data_shape[i]) - 1);
+                            }
+                        }
+                        const auto* offset = data + input_transform.index(c) * elem_size;
+                        std::copy(offset, offset + elem_size, v.begin());
+                        break;
+                    }
+                    case op::PadMode::REFLECT:
+                    {
+                        // clang-format off
+                        // The algorithm here is a bit complicated because if the padding is
+                        // bigger than the tensor, we may reflect multiple times.
+                        //
+                        // Example:
+                        //
+                        // Input shape:     [2]
+                        // Padding:         6 below, 6 above
+                        // Output shape:    [14]
+                        //
+                        // Input:                       a b
+                        // Expected output: a b a b a b a b a b a b a b
+                        //
+                        // Computation for coordinate 13 of output:
+                        //
+                        //         . . . . . . a b . . . . .[.] -> (oob above by 6 spaces, so reflection is at top-6)
+                        //         .[.]. . . . a b . . . . . .  -> (oob below by 5 spaces, so reflection is at bottom+5)
+                        //         . . . . . . a b . . .[.]. .  -> (oob above by 4 spaces, so reflection is at top-4)
+                        //         . . .[.]. . a b . . . . . .  -> (oob below by 3 spaces, so reflection is at bottom+3)
+                        //         . . . . . . a b .[.]. . . .  -> (oob above by 2 spaces, so reflection is at top-2)
+                        //         . . . . .[.]a b . . . . . .  -> (oob below by 1 space,  so reflection is at bottom+1)
+                        //         . . . . . . a[b]. . . . . .  -> (no longer oob, so copy from here)
+                        //
+                        // Note that this algorithm works because REFLECT padding only makes sense
+                        // if each dim is >= 2.
+                        // clang-format on
+                        Coordinate c = in_coord; // have to copy because in_coord is const
+
+                        for (size_t i = 0; i < c.size(); i++)
+                        {
+                            ptrdiff_t new_dim = c[i];
+                            bool done_reflecting = false;
+
+                            while (!done_reflecting)
+                            {
+                                if (new_dim < padding_below[i])
+                                {
+                                    ptrdiff_t distance_oob = padding_below[i] - new_dim;
+                                    new_dim = padding_below[i] + distance_oob;
+                                }
+                                else if (new_dim >=
+                                         padding_below[i] + static_cast<ptrdiff_t>(data_shape[i]))
+                                {
+                                    ptrdiff_t distance_oob =
+                                        new_dim - padding_below[i] -
+                                        (static_cast<ptrdiff_t>(data_shape[i]) - 1);
+                                    new_dim = padding_below[i] +
+                                              static_cast<ptrdiff_t>(data_shape[i]) - distance_oob -
+                                              1;
+                                }
+                                else
+                                {
+                                    done_reflecting = true;
+                                }
+                            }
+
+                            c[i] = static_cast<size_t>(new_dim);
+                        }
+                        const auto* offset = data + input_transform.index(c) * elem_size;
+                        std::copy(offset, offset + elem_size, v.begin());
+                        break;
+                    }
+                    case op::PadMode::SYMMETRIC:
+                    {
+                        Coordinate c = in_coord; // have to copy because in_coord is const
+                        for (size_t i = 0; i < c.size(); i++)
+                        {
+                            ptrdiff_t pos = padding_below[i] - (c[i] + 1);
+                            if (pos >= 0)
+                            {
+                                c[i] = static_cast<size_t>(pos + padding_below[i]);
+                            }
+                            else
+                            {
+                                pos = -(pos + 1);
+                                ptrdiff_t src_dim = static_cast<ptrdiff_t>(data_shape[i]);
+                                if (pos < src_dim)
+                                {
+                                    c[i] = static_cast<size_t>(pos + padding_below[i]);
+                                }
+                                else
+                                {
+                                    c[i] = static_cast<size_t>(padding_below[i] + src_dim +
+                                                               padding_above[i] - pos);
+                                }
+                            }
+                        }
+                        const auto* offset = data + input_transform.index(c) * elem_size;
+                        std::copy(offset, offset + elem_size, v.begin());
+                        break;
+                    }
+                    }
+
+                    std::copy(
+                        v.begin(), v.end(), out + output_transform.index(out_coord) * elem_size);
+
+                    ++output_it;
+                }
+            }
+        }
+    }
+}
index af2fcf3..7ffbd97 100644 (file)
@@ -32,732 +32,672 @@ static string s_manifest = "${MANIFEST}";
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_1d)
 {
-    Shape shape_a{6};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{15};
-    CoordinateDiff padding_below{4};
-    CoordinateDiff padding_above{5};
-    auto f = make_shared<Function>(make_shared<op::Pad>(A, B, padding_below, padding_above),
-                                   ParameterVector{A, B});
+    const Shape data_shape{6};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {4});
+    const auto pads_end = op::Constant::create(element::i64, Shape{1}, {5});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
+    auto f = make_shared<Function>(
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    copy_data(a, test::NDArray<float, 1>({1, 2, 3, 4, 5, 6}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    copy_data(a, std::vector<float>({1, 2, 3, 4, 5, 6}));
+    auto result = backend->create_tensor(element::f32, Shape{15});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f(
-        (test::NDArray<float, 1>(
-             {2112, 2112, 2112, 2112, 1, 2, 3, 4, 5, 6, 2112, 2112, 2112, 2112, 2112})
-             .get_vector()),
-        read_vector<float>(result),
-        MIN_FLOAT_TOLERANCE_BITS));
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(
+        test::all_close_f({2112, 2112, 2112, 2112, 1, 2, 3, 4, 5, 6, 2112, 2112, 2112, 2112, 2112},
+                          read_vector<float>(result),
+                          MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_1d)
 {
-    Shape shape_a{6};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{8};
-    CoordinateDiff padding_below{4};
-    CoordinateDiff padding_above{-2};
-    auto f = make_shared<Function>(make_shared<op::Pad>(A, B, padding_below, padding_above),
-                                   ParameterVector{A, B});
+    const Shape data_shape{6};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {4});
+    const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-2});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
+    auto f = make_shared<Function>(
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    copy_data(a, test::NDArray<float, 1>({1, 2, 3, 4, 5, 6}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    copy_data(a, std::vector<float>({1, 2, 3, 4, 5, 6}));
+    auto result = backend->create_tensor(element::f32, Shape{8});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f(
-        (test::NDArray<float, 1>({2112, 2112, 2112, 2112, 1, 2, 3, 4}).get_vector()),
-        read_vector<float>(result),
-        MIN_FLOAT_TOLERANCE_BITS));
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f({2112, 2112, 2112, 2112, 1, 2, 3, 4},
+                                  read_vector<float>(result),
+                                  MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_1d_check_limits)
 {
-    Shape shape_a{6};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{3};
-    CoordinateDiff padding_below{4};
-    CoordinateDiff padding_above{-7};
-    auto f = make_shared<Function>(make_shared<op::Pad>(A, B, padding_below, padding_above),
-                                   ParameterVector{A, B});
+    const Shape data_shape{6};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {4});
+    const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-7});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
+    auto f = make_shared<Function>(
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    copy_data(a, test::NDArray<float, 1>({1, 2, 3, 4, 5, 6}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    copy_data(a, std::vector<float>({1, 2, 3, 4, 5, 6}));
+    auto result = backend->create_tensor(element::f32, Shape{3});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f((test::NDArray<float, 1>({2112, 2112, 2112}).get_vector()),
-                                  read_vector<float>(result),
-                                  MIN_FLOAT_TOLERANCE_BITS));
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f(
+        {2112, 2112, 2112}, read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d)
 {
-    Shape shape_a{6};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{11};
-    CoordinateDiff padding_below{2};
-    CoordinateDiff padding_above{3};
+    const Shape data_shape{6};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2});
+    const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
     auto f = make_shared<Function>(
-        make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::EDGE),
-        ParameterVector{A, B});
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    copy_data(a, test::NDArray<float, 1>({1, 2, 3, 4, 5, 6}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    copy_data(a, std::vector<float>({1, 2, 3, 4, 5, 6}));
+    auto result = backend->create_tensor(element::f32, Shape{11});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(
-        test::all_close_f((test::NDArray<float, 1>({1, 1, 1, 2, 3, 4, 5, 6, 6, 6, 6}).get_vector()),
-                          read_vector<float>(result),
-                          MIN_FLOAT_TOLERANCE_BITS));
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f(
+        {1, 1, 1, 2, 3, 4, 5, 6, 6, 6, 6}, read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_top_neg)
 {
-    Shape shape_a{6};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{5};
-    CoordinateDiff padding_below{2};
-    CoordinateDiff padding_above{-3};
+    const Shape data_shape{6};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2});
+    const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-3});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
     auto f = make_shared<Function>(
-        make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::EDGE),
-        ParameterVector{A, B});
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    copy_data(a, test::NDArray<float, 1>({1, 2, 3, 4, 5, 6}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    copy_data(a, std::vector<float>({1, 2, 3, 4, 5, 6}));
+    auto result = backend->create_tensor(element::f32, Shape{5});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f((test::NDArray<float, 1>({1, 1, 1, 2, 3}).get_vector()),
-                                  read_vector<float>(result),
-                                  MIN_FLOAT_TOLERANCE_BITS));
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(
+        test::all_close_f({1, 1, 1, 2, 3}, read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_top_neg_bigger_than_tensor)
 {
-    Shape shape_a{6};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{1};
-    CoordinateDiff padding_below{2};
-    CoordinateDiff padding_above{-7};
+    const Shape data_shape{6};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2});
+    const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-7});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
     auto f = make_shared<Function>(
-        make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::EDGE),
-        ParameterVector{A, B});
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    copy_data(a, test::NDArray<float, 1>({1, 2, 3, 4, 5, 6}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    copy_data(a, std::vector<float>({1, 2, 3, 4, 5, 6}));
+    auto result = backend->create_tensor(element::f32, Shape{1});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f((test::NDArray<float, 1>({1}).get_vector()),
-                                  read_vector<float>(result),
-                                  MIN_FLOAT_TOLERANCE_BITS));
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f({1}, read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_bottom_neg)
 {
-    Shape shape_a{6};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{7};
-    CoordinateDiff padding_below{-2};
-    CoordinateDiff padding_above{3};
+    const Shape data_shape{6};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {-2});
+    const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
     auto f = make_shared<Function>(
-        make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::EDGE),
-        ParameterVector{A, B});
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    copy_data(a, test::NDArray<float, 1>({1, 2, 3, 4, 5, 6}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    copy_data(a, std::vector<float>({1, 2, 3, 4, 5, 6}));
+    auto result = backend->create_tensor(element::f32, Shape{7});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f((test::NDArray<float, 1>({3, 4, 5, 6, 6, 6, 6}).get_vector()),
-                                  read_vector<float>(result),
-                                  MIN_FLOAT_TOLERANCE_BITS));
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f(
+        {3, 4, 5, 6, 6, 6, 6}, read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_bottom_neg_bigger_than_tensor)
 {
-    Shape shape_a{6};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{2};
-    CoordinateDiff padding_below{-7};
-    CoordinateDiff padding_above{3};
+    const Shape data_shape{6};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {-7});
+    const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
     auto f = make_shared<Function>(
-        make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::EDGE),
-        ParameterVector{A, B});
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    copy_data(a, test::NDArray<float, 1>({1, 2, 3, 4, 5, 6}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    copy_data(a, std::vector<float>({1, 2, 3, 4, 5, 6}));
+    auto result = backend->create_tensor(element::f32, Shape{2});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f((test::NDArray<float, 1>({6, 6}).get_vector()),
-                                  read_vector<float>(result),
-                                  MIN_FLOAT_TOLERANCE_BITS));
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f({6, 6}, read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_edge_2d)
 {
-    Shape shape_a{3, 4};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{6, 9};
-    CoordinateDiff padding_below{2, 3};
-    CoordinateDiff padding_above{1, 2};
+    const Shape data_shape{3, 4};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, 3});
+    const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
     auto f = make_shared<Function>(
-        make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::EDGE),
-        ParameterVector{A, B});
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    copy_data(a,
-              test::NDArray<float, 2>({{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    copy_data(a, std::vector<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}));
+    auto result = backend->create_tensor(element::f32, Shape{6, 9});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f((test::NDArray<float, 2>({{1, 1, 1, 1, 2, 3, 4, 4, 4},
-                                                            {1, 1, 1, 1, 2, 3, 4, 4, 4},
-                                                            {1, 1, 1, 1, 2, 3, 4, 4, 4},
-                                                            {5, 5, 5, 5, 6, 7, 8, 8, 8},
-                                                            {9, 9, 9, 9, 10, 11, 12, 12, 12},
-                                                            {9, 9, 9, 9, 10, 11, 12, 12, 12}})
-                                       .get_vector()),
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f(test::NDArray<float, 2>({{1, 1, 1, 1, 2, 3, 4, 4, 4},
+                                                           {1, 1, 1, 1, 2, 3, 4, 4, 4},
+                                                           {1, 1, 1, 1, 2, 3, 4, 4, 4},
+                                                           {5, 5, 5, 5, 6, 7, 8, 8, 8},
+                                                           {9, 9, 9, 9, 10, 11, 12, 12, 12},
+                                                           {9, 9, 9, 9, 10, 11, 12, 12, 12}})
+                                      .get_vector(),
                                   read_vector<float>(result),
                                   MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_edge_2d_with_neg)
 {
-    Shape shape_a{3, 4};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{6, 5};
-    CoordinateDiff padding_below{2, -1};
-    CoordinateDiff padding_above{1, 2};
+    const Shape data_shape{3, 4};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, -1});
+    const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
     auto f = make_shared<Function>(
-        make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::EDGE),
-        ParameterVector{A, B});
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    copy_data(a,
-              test::NDArray<float, 2>({{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    copy_data(a, std::vector<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}));
+    auto result = backend->create_tensor(element::f32, Shape{6, 5});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f((test::NDArray<float, 2>({{2, 3, 4, 4, 4},
-                                                            {2, 3, 4, 4, 4},
-                                                            {2, 3, 4, 4, 4},
-                                                            {6, 7, 8, 8, 8},
-                                                            {10, 11, 12, 12, 12},
-                                                            {10, 11, 12, 12, 12}})
-                                       .get_vector()),
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f(test::NDArray<float, 2>({{2, 3, 4, 4, 4},
+                                                           {2, 3, 4, 4, 4},
+                                                           {2, 3, 4, 4, 4},
+                                                           {6, 7, 8, 8, 8},
+                                                           {10, 11, 12, 12, 12},
+                                                           {10, 11, 12, 12, 12}})
+                                      .get_vector(),
                                   read_vector<float>(result),
                                   MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d)
 {
-    Shape shape_a{6};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{11};
-    CoordinateDiff padding_below{2};
-    CoordinateDiff padding_above{3};
+    const Shape data_shape{6};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2});
+    const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
     auto f = make_shared<Function>(
-        make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::REFLECT),
-        ParameterVector{A, B});
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    copy_data(a, test::NDArray<float, 1>({1, 2, 3, 4, 5, 6}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    copy_data(a, std::vector<float>({1, 2, 3, 4, 5, 6}));
+    auto result = backend->create_tensor(element::f32, Shape{11});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(
-        test::all_close_f((test::NDArray<float, 1>({3, 2, 1, 2, 3, 4, 5, 6, 5, 4, 3}).get_vector()),
-                          read_vector<float>(result),
-                          MIN_FLOAT_TOLERANCE_BITS));
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f(std::vector<float>({3, 2, 1, 2, 3, 4, 5, 6, 5, 4, 3}),
+                                  read_vector<float>(result),
+                                  MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_top_neg)
 {
-    Shape shape_a{6};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{5};
-    CoordinateDiff padding_below{2};
-    CoordinateDiff padding_above{-3};
+    const Shape data_shape{6};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2});
+    const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-3});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
     auto f = make_shared<Function>(
-        make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::REFLECT),
-        ParameterVector{A, B});
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    copy_data(a, test::NDArray<float, 1>({1, 2, 3, 4, 5, 6}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    copy_data(a, std::vector<float>({1, 2, 3, 4, 5, 6}));
+    auto result = backend->create_tensor(element::f32, Shape{5});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f((test::NDArray<float, 1>({3, 2, 1, 2, 3}).get_vector()),
-                                  read_vector<float>(result),
-                                  MIN_FLOAT_TOLERANCE_BITS));
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f(
+        std::vector<float>({3, 2, 1, 2, 3}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_top_neg_bigger_than_tensor)
 {
-    Shape shape_a{6};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{1};
-    CoordinateDiff padding_below{2};
-    CoordinateDiff padding_above{-7};
+    const Shape data_shape{6};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2});
+    const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-7});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
     auto f = make_shared<Function>(
-        make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::REFLECT),
-        ParameterVector{A, B});
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    copy_data(a, test::NDArray<float, 1>({1, 2, 3, 4, 5, 6}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    copy_data(a, std::vector<float>({1, 2, 3, 4, 5, 6}));
+    auto result = backend->create_tensor(element::f32, Shape{1});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f((test::NDArray<float, 1>({3}).get_vector()),
-                                  read_vector<float>(result),
-                                  MIN_FLOAT_TOLERANCE_BITS));
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f(
+        std::vector<float>({3}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_bottom_neg)
 {
-    Shape shape_a{6};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{7};
-    CoordinateDiff padding_below{-2};
-    CoordinateDiff padding_above{3};
+    const Shape data_shape{6};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {-2});
+    const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
     auto f = make_shared<Function>(
-        make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::REFLECT),
-        ParameterVector{A, B});
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    copy_data(a, test::NDArray<float, 1>({1, 2, 3, 4, 5, 6}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    copy_data(a, std::vector<float>({1, 2, 3, 4, 5, 6}));
+    auto result = backend->create_tensor(element::f32, Shape{7});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f((test::NDArray<float, 1>({3, 4, 5, 6, 5, 4, 3}).get_vector()),
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f(std::vector<float>({3, 4, 5, 6, 5, 4, 3}),
                                   read_vector<float>(result),
                                   MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_bottom_neg_bigger_than_tensor)
 {
-    Shape shape_a{6};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{2};
-    CoordinateDiff padding_below{-7};
-    CoordinateDiff padding_above{3};
+    const Shape data_shape{6};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {-7});
+    const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
     auto f = make_shared<Function>(
-        make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::REFLECT),
-        ParameterVector{A, B});
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    copy_data(a, test::NDArray<float, 1>({1, 2, 3, 4, 5, 6}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    copy_data(a, std::vector<float>({1, 2, 3, 4, 5, 6}));
+    auto result = backend->create_tensor(element::f32, Shape{2});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f((test::NDArray<float, 1>({4, 3}).get_vector()),
-                                  read_vector<float>(result),
-                                  MIN_FLOAT_TOLERANCE_BITS));
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f(
+        std::vector<float>({4, 3}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_multi_reflect)
 {
-    Shape shape_a{3};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{22};
-    CoordinateDiff padding_below{10};
-    CoordinateDiff padding_above{9};
+    const Shape data_shape{3};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {10});
+    const auto pads_end = op::Constant::create(element::i64, Shape{1}, {9});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
     auto f = make_shared<Function>(
-        make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::REFLECT),
-        ParameterVector{A, B});
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    copy_data(a, test::NDArray<float, 1>({1, 2, 3}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    copy_data(a, std::vector<float>({1, 2, 3}));
+    auto result = backend->create_tensor(element::f32, Shape{22});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
+    handle->call_with_validate({result}, {a});
     EXPECT_TRUE(test::all_close_f(
-        (test::NDArray<float, 1>({3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2})
-             .get_vector()),
+        std::vector<float>({3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2}),
         read_vector<float>(result),
         MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_2d)
 {
-    Shape shape_a{3, 4};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{6, 9};
-    CoordinateDiff padding_below{2, 3};
-    CoordinateDiff padding_above{1, 2};
+    const Shape data_shape{3, 4};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, 3});
+    const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
     auto f = make_shared<Function>(
-        make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::REFLECT),
-        ParameterVector{A, B});
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
+    auto a = backend->create_tensor(element::f32, data_shape);
     copy_data(a,
               test::NDArray<float, 2>({{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto result = backend->create_tensor(element::f32, Shape{6, 9});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f((test::NDArray<float, 2>({{12, 11, 10, 9, 10, 11, 12, 11, 10},
-                                                            {8, 7, 6, 5, 6, 7, 8, 7, 6},
-                                                            {4, 3, 2, 1, 2, 3, 4, 3, 2},
-                                                            {8, 7, 6, 5, 6, 7, 8, 7, 6},
-                                                            {12, 11, 10, 9, 10, 11, 12, 11, 10},
-                                                            {8, 7, 6, 5, 6, 7, 8, 7, 6}})
-                                       .get_vector()),
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f(test::NDArray<float, 2>({{12, 11, 10, 9, 10, 11, 12, 11, 10},
+                                                           {8, 7, 6, 5, 6, 7, 8, 7, 6},
+                                                           {4, 3, 2, 1, 2, 3, 4, 3, 2},
+                                                           {8, 7, 6, 5, 6, 7, 8, 7, 6},
+                                                           {12, 11, 10, 9, 10, 11, 12, 11, 10},
+                                                           {8, 7, 6, 5, 6, 7, 8, 7, 6}})
+                                      .get_vector(),
                                   read_vector<float>(result),
                                   MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_2d_with_neg)
 {
-    Shape shape_a{3, 4};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{6, 5};
-    CoordinateDiff padding_below{2, -1};
-    CoordinateDiff padding_above{1, 2};
+    const Shape data_shape{3, 4};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, -1});
+    const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
     auto f = make_shared<Function>(
-        make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::REFLECT),
-        ParameterVector{A, B});
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
+    auto a = backend->create_tensor(element::f32, data_shape);
     copy_data(a,
               test::NDArray<float, 2>({{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto result = backend->create_tensor(element::f32, Shape{6, 5});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-
-    EXPECT_TRUE(test::all_close_f((test::NDArray<float, 2>({{10, 11, 12, 11, 10},
-                                                            {6, 7, 8, 7, 6},
-                                                            {2, 3, 4, 3, 2},
-                                                            {6, 7, 8, 7, 6},
-                                                            {10, 11, 12, 11, 10},
-                                                            {6, 7, 8, 7, 6}})
-                                       .get_vector()),
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f(test::NDArray<float, 2>({{10, 11, 12, 11, 10},
+                                                           {6, 7, 8, 7, 6},
+                                                           {2, 3, 4, 3, 2},
+                                                           {6, 7, 8, 7, 6},
+                                                           {10, 11, 12, 11, 10},
+                                                           {6, 7, 8, 7, 6}})
+                                      .get_vector(),
                                   read_vector<float>(result),
                                   MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_2d)
 {
-    Shape shape_a{2, 3};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{5, 2};
-    CoordinateDiff padding_below{1, -1};
-    CoordinateDiff padding_above{2, 0};
-    auto f = make_shared<Function>(make_shared<op::Pad>(A, B, padding_below, padding_above),
-                                   ParameterVector{A, B});
+    const Shape data_shape{2, 3};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {1, -1});
+    const auto pads_end = op::Constant::create(element::i64, Shape{2}, {2, 0});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {9});
+
+    auto f = make_shared<Function>(
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
+    auto a = backend->create_tensor(element::f32, data_shape);
     copy_data(a, test::NDArray<float, 2>({{1, 2, 3}, {4, 5, 6}}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{9});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto result = backend->create_tensor(element::f32, Shape{5, 2});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
+    handle->call_with_validate({result}, {a});
     EXPECT_TRUE(test::all_close_f(
-        (test::NDArray<float, 2>({{9, 9}, {2, 3}, {5, 6}, {9, 9}, {9, 9}}).get_vector()),
+        test::NDArray<float, 2>({{9, 9}, {2, 3}, {5, 6}, {9, 9}, {9, 9}}).get_vector(),
         read_vector<float>(result),
         MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_2d_all_negative)
 {
-    Shape shape_a{3, 3};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{1, 1};
-    CoordinateDiff padding_below{-1, -1};
-    CoordinateDiff padding_above{-1, -1};
-    auto f = make_shared<Function>(make_shared<op::Pad>(A, B, padding_below, padding_above),
-                                   ParameterVector{A, B});
+    const Shape data_shape{3, 3};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {-1, -1});
+    const auto pads_end = op::Constant::create(element::i64, Shape{2}, {-1, -1});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {9});
+
+    auto f = make_shared<Function>(
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
+    auto a = backend->create_tensor(element::f32, data_shape);
     copy_data(a, test::NDArray<float, 2>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{9});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto result = backend->create_tensor(element::f32, Shape{1, 1});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f((test::NDArray<float, 2>({{5}}).get_vector()),
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f(test::NDArray<float, 2>({{5}}).get_vector(),
                                   read_vector<float>(result),
                                   MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_0x0)
 {
-    Shape shape_a{0, 0};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{5, 5};
-    CoordinateDiff padding_below{2, 3};
-    CoordinateDiff padding_above{3, 2};
-    auto f = make_shared<Function>(make_shared<op::Pad>(A, B, padding_below, padding_above),
-                                   ParameterVector{A, B});
+    const Shape data_shape{0, 0};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, 3});
+    const auto pads_end = op::Constant::create(element::i64, Shape{2}, {3, 2});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
+    auto f = make_shared<Function>(
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    // copy_data(a, test::NDArray<float, 2>({{}}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    auto result = backend->create_tensor(element::f32, Shape{5, 5});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f((test::NDArray<float, 2>({{2112, 2112, 2112, 2112, 2112},
-                                                            {2112, 2112, 2112, 2112, 2112},
-                                                            {2112, 2112, 2112, 2112, 2112},
-                                                            {2112, 2112, 2112, 2112, 2112},
-                                                            {2112, 2112, 2112, 2112, 2112}})
-                                       .get_vector()),
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f(test::NDArray<float, 2>({{2112, 2112, 2112, 2112, 2112},
+                                                           {2112, 2112, 2112, 2112, 2112},
+                                                           {2112, 2112, 2112, 2112, 2112},
+                                                           {2112, 2112, 2112, 2112, 2112},
+                                                           {2112, 2112, 2112, 2112, 2112}})
+                                      .get_vector(),
                                   read_vector<float>(result),
                                   MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_0x3)
 {
-    Shape shape_a{0, 3};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{5, 5};
-    CoordinateDiff padding_below{2, 1};
-    CoordinateDiff padding_above{3, 1};
-    auto f = make_shared<Function>(make_shared<op::Pad>(A, B, padding_below, padding_above),
-                                   ParameterVector{A, B});
+    const Shape data_shape{0, 3};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, 1});
+    const auto pads_end = op::Constant::create(element::i64, Shape{2}, {3, 1});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
+    auto f = make_shared<Function>(
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    // copy_data(a, test::NDArray<float, 2>({}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    auto result = backend->create_tensor(element::f32, Shape{5, 5});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f((test::NDArray<float, 2>({{2112, 2112, 2112, 2112, 2112},
-                                                            {2112, 2112, 2112, 2112, 2112},
-                                                            {2112, 2112, 2112, 2112, 2112},
-                                                            {2112, 2112, 2112, 2112, 2112},
-                                                            {2112, 2112, 2112, 2112, 2112}})
-                                       .get_vector()),
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f(test::NDArray<float, 2>({{2112, 2112, 2112, 2112, 2112},
+                                                           {2112, 2112, 2112, 2112, 2112},
+                                                           {2112, 2112, 2112, 2112, 2112},
+                                                           {2112, 2112, 2112, 2112, 2112},
+                                                           {2112, 2112, 2112, 2112, 2112}})
+                                      .get_vector(),
                                   read_vector<float>(result),
                                   MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_3x0)
 {
-    Shape shape_a{3, 0};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{5, 5};
-    CoordinateDiff padding_below{1, 3};
-    CoordinateDiff padding_above{1, 2};
-    auto f = make_shared<Function>(make_shared<op::Pad>(A, B, padding_below, padding_above),
-                                   ParameterVector{A, B});
+    const Shape data_shape{3, 0};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {1, 3});
+    const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
+    auto f = make_shared<Function>(
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
-    // copy_data(a, test::NDArray<float, 2>({}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto a = backend->create_tensor(element::f32, data_shape);
+    auto result = backend->create_tensor(element::f32, Shape{5, 5});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
-    EXPECT_TRUE(test::all_close_f((test::NDArray<float, 2>({{2112, 2112, 2112, 2112, 2112},
-                                                            {2112, 2112, 2112, 2112, 2112},
-                                                            {2112, 2112, 2112, 2112, 2112},
-                                                            {2112, 2112, 2112, 2112, 2112},
-                                                            {2112, 2112, 2112, 2112, 2112}})
-                                       .get_vector()),
+    handle->call_with_validate({result}, {a});
+    EXPECT_TRUE(test::all_close_f(test::NDArray<float, 2>({{2112, 2112, 2112, 2112, 2112},
+                                                           {2112, 2112, 2112, 2112, 2112},
+                                                           {2112, 2112, 2112, 2112, 2112},
+                                                           {2112, 2112, 2112, 2112, 2112},
+                                                           {2112, 2112, 2112, 2112, 2112}})
+                                      .get_vector(),
                                   read_vector<float>(result),
                                   MIN_FLOAT_TOLERANCE_BITS));
 }
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_4d_1x2x2x2)
 {
-    Shape shape_a{1, 2, 2, 2};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{1, 2, 4, 4};
-    CoordinateDiff padding_below{0, 0, 1, 1};
-    CoordinateDiff padding_above{0, 0, 1, 1};
-    auto f = make_shared<Function>(make_shared<op::Pad>(A, B, padding_below, padding_above),
-                                   ParameterVector{A, B});
+    const Shape data_shape{1, 2, 2, 2};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{4}, {0, 0, 1, 1});
+    const auto pads_end = op::Constant::create(element::i64, Shape{4}, {0, 0, 1, 1});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {42});
+
+    auto f = make_shared<Function>(
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
+    auto a = backend->create_tensor(element::f32, data_shape);
     // clang-format off
     copy_data(a, test::NDArray<float, 4>(
         {
@@ -773,14 +713,10 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_4d_1x2x2x2)
             }
         }).get_vector());
     // clang-format on
-
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{42});
-
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto result = backend->create_tensor(element::f32, Shape{1, 2, 4, 4});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
+    handle->call_with_validate({result}, {a});
     // clang-format off
     EXPECT_TRUE(test::all_close_f((test::NDArray<float, 4>(
         {
@@ -805,20 +741,21 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_4d_1x2x2x2)
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_4d)
 {
-    Shape shape_a{1, 3, 2, 2};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{1, 1, 4, 4};
-    CoordinateDiff padding_below{0, -1, 1, 1};
-    CoordinateDiff padding_above{0, -1, 1, 1};
-    auto f = make_shared<Function>(make_shared<op::Pad>(A, B, padding_below, padding_above),
-                                   ParameterVector{A, B});
+    const Shape data_shape{1, 3, 2, 2};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{4}, {0, -1, 1, 1});
+    const auto pads_end = op::Constant::create(element::i64, Shape{4}, {0, -1, 1, 1});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {42});
+
+    auto f = make_shared<Function>(
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
+    auto a = backend->create_tensor(element::f32, data_shape);
     // clang-format off
     copy_data(a, test::NDArray<float, 4>(
         {
@@ -839,13 +776,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_4d)
         }).get_vector());
     // clang-format on
 
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{42});
-
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto result = backend->create_tensor(element::f32, Shape{1, 1, 4, 4});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
+    handle->call_with_validate({result}, {a});
+
     // clang-format off
     EXPECT_TRUE(test::all_close_f((test::NDArray<float, 4>(
         {
@@ -866,21 +801,22 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_4d)
 // bug has been found on nvGPU side now covered by this test
 NGRAPH_TEST(${BACKEND_NAME}, pad_2channel_2image_asym)
 {
-    Shape shape_a{2, 2, 4, 4};
-    auto window_movement_strides = Strides{2, 2};
-    CoordinateDiff padding_below{0, 0, 0, 0};
-    CoordinateDiff padding_above{0, 0, 2, 2};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{2, 2, 6, 6};
-    auto f = make_shared<Function>(make_shared<op::Pad>(A, B, padding_below, padding_above),
-                                   ParameterVector{A, B});
+    const Shape data_shape{2, 2, 4, 4};
+    const auto window_movement_strides = Strides{2, 2};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0});
+    const auto pads_end = op::Constant::create(element::i64, Shape{4}, {0, 0, 2, 2});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {42});
+
+    auto f = make_shared<Function>(
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
+    auto a = backend->create_tensor(element::f32, data_shape);
     copy_data(a,
               test::NDArray<float, 4>({{{{0, 1, 0, 2}, // img 0 chan 0
                                          {0, 3, 2, 0},
@@ -903,13 +839,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_2channel_2image_asym)
                                          {1, 0, 0, 0}}}})
                   .get_vector());
 
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{42});
-
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto result = backend->create_tensor(element::f32, Shape{2, 2, 6, 6});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
+    handle->call_with_validate({result}, {a});
+
     EXPECT_TRUE(test::all_close_f((test::NDArray<float, 4>({{{{0, 1, 0, 2, 42, 42}, // img 0 chan 0
                                                               {0, 3, 2, 0, 42, 42},
                                                               {2, 0, 0, 0, 42, 42},
@@ -944,28 +878,26 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_2channel_2image_asym)
 
 NGRAPH_TEST(${BACKEND_NAME}, pad_symmetric)
 {
-    Shape shape_a{2, 3};
-    auto A = make_shared<op::Parameter>(element::f32, shape_a);
-    Shape shape_b{};
-    auto B = make_shared<op::Parameter>(element::f32, shape_b);
-    Shape shape_r{4, 7};
-    CoordinateDiff padding_below{1, 2};
-    CoordinateDiff padding_above{1, 2};
+    const Shape data_shape{2, 3};
+    const auto data = make_shared<op::Parameter>(element::f32, data_shape);
+
+    const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {1, 2});
+    const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2});
+    const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112});
+
     auto f = make_shared<Function>(
-        make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::SYMMETRIC),
-        ParameterVector{A, B});
+        make_shared<op::v1::Pad>(data, pads_begin, pads_end, pad_val, op::PadMode::SYMMETRIC),
+        ParameterVector{data});
 
     auto backend = runtime::Backend::create("${BACKEND_NAME}");
 
     // Create some tensors for input/output
-    auto a = backend->create_tensor(element::f32, shape_a);
+    auto a = backend->create_tensor(element::f32, data_shape);
     copy_data(a, test::NDArray<float, 2>({{1, 2, 3}, {4, 5, 6}}).get_vector());
-    auto b = backend->create_tensor(element::f32, shape_b);
-    copy_data(b, vector<float>{2112});
-    auto result = backend->create_tensor(element::f32, shape_r);
+    auto result = backend->create_tensor(element::f32, Shape{4, 7});
 
     auto handle = backend->compile(f);
-    handle->call_with_validate({result}, {a, b});
+    handle->call_with_validate({result}, {a});
     EXPECT_TRUE(test::all_close_f((test::NDArray<float, 2>({{2, 1, 1, 2, 3, 3, 2},
                                                             {2, 1, 1, 2, 3, 3, 2},
                                                             {5, 4, 4, 5, 6, 6, 5},
index 5e1bead..2c73bfb 100644 (file)
@@ -352,36 +352,6 @@ TEST(constant_folding, constant_broadcast_v1_numpy)
     ASSERT_EQ(values_expected, values_out);
 }
 
-TEST(constant_folding, constant_pad_exterior)
-{
-    Shape shape_in{2};
-
-    vector<int> values_in{777, 888};
-    auto constant = make_shared<op::Constant>(element::i32, shape_in, values_in);
-    auto pad_value = make_shared<op::Constant>(element::i32, Shape{}, vector<int>{111});
-
-    CoordinateDiff padding_below{1};
-    CoordinateDiff padding_above{2};
-
-    auto broadcast = make_shared<op::Pad>(constant, pad_value, padding_below, padding_above);
-    auto f = make_shared<Function>(broadcast, ParameterVector{});
-
-    pass::Manager pass_manager;
-    pass_manager.register_pass<pass::ConstantFolding>();
-    pass_manager.run_passes(f);
-
-    ASSERT_EQ(count_ops_of_type<op::Pad>(f), 0);
-    ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
-
-    auto new_const =
-        as_type_ptr<op::Constant>(f->get_results().at(0)->input_value(0).get_node_shared_ptr());
-    ASSERT_TRUE(new_const);
-    auto values_out = new_const->get_vector<int>();
-
-    vector<int> padded_values{111, 777, 888, 111, 111};
-    ASSERT_EQ(padded_values, values_out);
-}
-
 TEST(constant_folding, constant_unary_binary)
 {
     vector<int> values_a{1, 2, 3, 4};
@@ -3209,4 +3179,4 @@ TEST(constant_folding, disable_constant_folding)
 
     ASSERT_EQ(count_ops_of_type<op::v1::Reshape>(f), 1);
     ASSERT_EQ(count_ops_of_type<op::Constant>(f), 1);
-}
\ No newline at end of file
+}
index 3bf6783..f1ffc61 100644 (file)
@@ -737,20 +737,6 @@ protected:
             break;
         }
         case OP_TYPEID::Parameter: break;
-        case OP_TYPEID::Pad:
-        {
-            const op::Pad* pad = static_cast<const op::Pad*>(&node);
-
-            reference::pad(args[0]->get_data_ptr<const T>(),
-                           args[1]->get_data_ptr<const T>(),
-                           out[0]->get_data_ptr<T>(),
-                           node.get_input_shape(0),
-                           node.get_output_shape(0),
-                           pad->get_padding_below(),
-                           pad->get_padding_above(),
-                           pad->get_pad_mode());
-            break;
-        }
         case OP_TYPEID::Quantize:
         {
             const op::Quantize* quantize = static_cast<const op::Quantize*>(&node);
@@ -1302,6 +1288,7 @@ protected:
         case OP_TYPEID::NonZero_v3:
         case OP_TYPEID::NotEqual:
         case OP_TYPEID::Or:
+        case OP_TYPEID::Pad:
         case OP_TYPEID::Power:
         case OP_TYPEID::Product:
         case OP_TYPEID::Range:
index f6dd906..dae8374 100644 (file)
@@ -467,26 +467,6 @@ namespace
         return replacement_node;
     }
 
-    shared_ptr<Node> op_cast(shared_ptr<op::v1::Pad> node)
-    {
-        const auto pad_arg = node->input_value(0);
-        Output<Node> pad_value;
-        if (node->get_input_size() == 4)
-        {
-            pad_value = node->input_value(3);
-        }
-        else
-        {
-            pad_value =
-                make_shared<op::Constant>(pad_arg.get_element_type(), Shape{}, vector<float>{0.f});
-        }
-        auto replacement_node = make_shared<op::v0::Pad>(
-            pad_arg, pad_value, node->get_pads_begin(), node->get_pads_end(), node->get_pad_mode());
-
-        replace_node(node, replacement_node);
-        return replacement_node;
-    }
-
     shared_ptr<Node> op_cast(shared_ptr<op::v1::Power> node)
     {
         return op_cast_binary_elementwise_node<op::v0::Power, op::v1::Power>(node);
index db0836e..b1366f4 100644 (file)
@@ -346,25 +346,6 @@ namespace
         return op_cast_binary_elementwise_node<op::v0::Or, op::v1::LogicalOr>(node);
     }
 
-    shared_ptr<Node> op_cast(shared_ptr<op::Pad> node)
-    {
-        auto padding_below = node->get_padding_below();
-        auto pads_begin_node =
-            make_shared<op::Constant>(element::i64, Shape{padding_below.size()}, padding_below);
-        auto padding_above = node->get_padding_above();
-        auto pads_end_node =
-            make_shared<op::Constant>(element::i64, Shape{padding_above.size()}, padding_above);
-
-        auto replacement_node = make_shared<op::v1::Pad>(node->input_value(0),
-                                                         pads_begin_node,
-                                                         pads_end_node,
-                                                         node->input_value(1),
-                                                         node->get_pad_mode());
-
-        replace_node(node, replacement_node);
-        return replacement_node;
-    }
-
     shared_ptr<Node> op_cast(shared_ptr<op::Power> node)
     {
         return op_cast_binary_elementwise_node<op::v0::Power, op::v1::Power>(node);