using namespace std;
using namespace ngraph;
-constexpr NodeTypeInfo op::v0::MaxPool::type_info;
-
-op::v0::MaxPool::MaxPool(const Output<Node>& arg,
- const Shape& window_shape,
- const Strides& window_movement_strides,
- const Shape& padding_below,
- const Shape& padding_above,
- const PadType& pad_type,
- bool ceil_mode)
- : Op({arg})
- , m_window_shape(window_shape)
- , m_window_movement_strides(window_movement_strides)
- , m_padding_below(padding_below)
- , m_padding_above(padding_above)
- , m_pad_type(pad_type)
- , m_ceil_mode(ceil_mode)
-{
- constructor_validate_and_infer_types();
-}
-
-op::v0::MaxPool::MaxPool(const Output<Node>& arg,
- const Shape& window_shape,
- const Strides& window_movement_strides,
- const Shape& padding_below,
- const Shape& padding_above,
- const PadType& pad_type)
- : v0::MaxPool(
- arg, window_shape, window_movement_strides, padding_below, padding_above, pad_type, false)
-{
-}
-
-op::v0::MaxPool::MaxPool(const Output<Node>& arg,
- const Shape& window_shape,
- const Strides& window_movement_strides,
- const Shape& padding_below,
- const Shape& padding_above)
- : v0::MaxPool(arg,
- window_shape,
- window_movement_strides,
- padding_below,
- padding_above,
- PadType::EXPLICIT)
-{
-}
-
-void op::v0::MaxPool::validate_and_infer_types()
-{
- if (0 == m_window_movement_strides.size())
- {
- m_window_movement_strides = Strides(m_window_shape.size(), 1);
- }
-
- if (0 == m_padding_below.size())
- {
- m_padding_below = Shape(m_window_shape.size(), 0);
- }
-
- if (0 == m_padding_above.size())
- {
- m_padding_above = Shape(m_window_shape.size(), 0);
- }
-
- const PartialShape& arg_shape = get_input_partial_shape(0);
-
- update_auto_padding(arg_shape, m_padding_above, m_padding_below);
-
- // infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for
- // now still take Shape (no negative padding).
- CoordinateDiff padding_below(m_padding_below.begin(), m_padding_below.end());
- CoordinateDiff padding_above(m_padding_above.begin(), m_padding_above.end());
-
- set_output_type(0,
- get_input_element_type(0),
- infer_batched_pooling_forward(this,
- arg_shape,
- padding_below,
- padding_above,
- m_window_shape,
- m_window_movement_strides,
- true,
- m_ceil_mode));
-}
-
-void op::v0::MaxPool::update_auto_padding(const PartialShape& in_shape,
- Shape& new_padding_above,
- Shape& new_padding_below)
-{
- if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER)
- {
- if (in_shape.is_static())
- {
- CoordinateDiff padding_above, padding_below;
- infer_auto_padding(in_shape.to_shape(),
- m_window_shape,
- m_window_movement_strides,
- Strides(m_window_shape.size(), 1), // No dilation
- m_pad_type,
- padding_above,
- padding_below);
- new_padding_above = Shape(padding_above.begin(), padding_above.end());
- new_padding_below = Shape(padding_below.begin(), padding_below.end());
- }
- }
-}
-
bool op::v1::MaxPool::update_auto_padding(const PartialShape& in_shape,
Shape& new_pads_end,
Shape& new_pads_begin)
return update_auto_padding_succeed;
}
-op::v0::MaxPool::MaxPool(const Output<Node>& arg,
- const Shape& window_shape,
- const Strides& window_movement_strides)
- : v0::MaxPool(arg, window_shape, window_movement_strides, Shape(), Shape())
-{
-}
-
-op::v0::MaxPool::MaxPool(const Output<Node>& arg, const Shape& window_shape)
- : v0::MaxPool(arg, window_shape, Strides(), Shape(), Shape())
-{
-}
-
-shared_ptr<Node> op::v0::MaxPool::clone_with_new_inputs(const OutputVector& new_args) const
-{
- check_new_args_count(this, new_args);
- return make_shared<v0::MaxPool>(new_args.at(0),
- m_window_shape,
- m_window_movement_strides,
- m_padding_below,
- m_padding_above,
- m_pad_type,
- m_ceil_mode);
-}
-
-shared_ptr<Node> op::v0::MaxPool::get_default_value() const
-{
- return ngraph::make_constant_from_string("0", get_element_type(), get_shape());
-}
-
constexpr NodeTypeInfo op::v1::MaxPool::type_info;
op::v1::MaxPool::MaxPool(const Output<Node>& arg,
}
} // namespace
-bool op::v0::MaxPool::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs)
-{
- auto arg_shape = inputs[0]->get_partial_shape();
- auto padding_below_s = get_padding_below();
- auto padding_above_s = get_padding_above();
- update_auto_padding(arg_shape, padding_above_s, padding_below_s);
- CoordinateDiff padding_below(padding_below_s.begin(), padding_below_s.end());
- CoordinateDiff padding_above(padding_above_s.begin(), padding_above_s.end());
- auto out_shape = infer_batched_pooling_forward(this,
- arg_shape,
- padding_below,
- padding_above,
- get_window_shape(),
- get_window_movement_strides(),
- true,
- get_ceil_mode());
- return evaluate_maxpool(inputs[0],
- outputs[0],
- out_shape.get_shape(),
- get_window_shape(),
- get_window_movement_strides(),
- get_padding_below(),
- get_padding_above());
-}
-
bool op::v1::MaxPool::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs)
{
auto arg_shape = inputs[0]->get_partial_shape();
{
namespace op
{
- namespace v0
- {
- /// \brief Batched max pooling operation, with optional padding and window stride.
- class NGRAPH_API MaxPool : public Op
- {
- public:
- static constexpr NodeTypeInfo type_info{"MaxPool", 0};
- const NodeTypeInfo& get_type_info() const override { return type_info; }
- /// \brief Constructs a batched max pooling operation.
- MaxPool() = default;
-
- /// \brief Constructs a batched max pooling operation.
- ///
- /// \param arg The node producing the input data batch tensor.
- /// \param window_shape The window shape.
- /// \param window_movement_strides The window movement strides.
- /// \param padding_below The below-padding shape.
- /// \param padding_above The above-padding shape.
- /// \param pad_type The pad type for automatically computing padding sizes
- /// \param ceil_mode Whether to use ceiling while computing output shape.
- MaxPool(const Output<Node>& arg,
- const Shape& window_shape,
- const Strides& window_movement_strides,
- const Shape& padding_below,
- const Shape& padding_above,
- const PadType& pad_type,
- bool ceil_mode);
-
- /// \brief Constructs a batched max pooling operation.
- ///
- /// \param arg The node producing the input data batch tensor.
- /// \param window_shape The window shape.
- /// \param window_movement_strides The window movement strides.
- /// \param padding_below The below-padding shape.
- /// \param padding_above The above-padding shape.
- /// \param pad_type The pad type for automatically computing padding sizes
- MaxPool(const Output<Node>& arg,
- const Shape& window_shape,
- const Strides& window_movement_strides,
- const Shape& padding_below,
- const Shape& padding_above,
- const PadType& pad_type);
-
- /// \brief Constructs a batched max pooling operation.
- ///
- /// \param arg The node producing the input data batch tensor.
- /// \param window_shape The window shape.
- /// \param window_movement_strides The window movement strides.
- /// \param padding_below The below-padding shape.
- /// \param padding_above The above-padding shape.
- MaxPool(const Output<Node>& arg,
- const Shape& window_shape,
- const Strides& window_movement_strides,
- const Shape& padding_below,
- const Shape& padding_above);
-
- void validate_and_infer_types() override;
-
- /// \brief Constructs a batched, unpadded max pooling operation (i.e., all padding
- /// shapes are set to 0).
- ///
- /// \param arg The node producing the input data batch tensor.
- /// \param window_shape The window shape.
- /// \param window_movement_strides The window movement strides.
- MaxPool(const Output<Node>& arg,
- const Shape& window_shape,
- const Strides& window_movement_strides);
-
- /// \brief Constructs an unstrided batched max pooling operation (i.e., all window
- /// movement strides are 1 and all padding shapes are set to 0).
- ///
- /// \param arg The node producing the input data batch tensor.
- /// \param window_shape The window shape.
- MaxPool(const Output<Node>& arg, const Shape& window_shape);
-
- virtual std::shared_ptr<Node>
- clone_with_new_inputs(const OutputVector& new_args) const override;
-
- /// \return The window shape.
- const Shape& get_window_shape() const { return m_window_shape; }
- void set_window_shape(const Shape& window_shape) { m_window_shape = window_shape; }
- /// \return The window movement strides.
- const Strides& get_window_movement_strides() const
- {
- return m_window_movement_strides;
- }
- void set_window_movement_strides(const Strides& window_movement_strides)
- {
- m_window_movement_strides = window_movement_strides;
- }
- /// \return The below-padding shape.
- const Shape& get_padding_below() const { return m_padding_below; }
- void set_padding_below(const Shape& padding_below)
- {
- m_padding_below = padding_below;
- }
- /// \return The above-padding shape.
- const Shape& get_padding_above() const { return m_padding_above; }
- void set_adding_above(const Shape& padding_above)
- {
- m_padding_above = padding_above;
- }
- /// \return The pad type for pooling.
- const PadType& get_pad_type() const { return m_pad_type; }
- void set_pad_type(const PadType& pad_type) { m_pad_type = pad_type; }
- /// \return The ceiling mode being used for output shape computations
- bool get_ceil_mode() const { return m_ceil_mode; }
- void set_ceil_mode(bool ceil_mode) { m_ceil_mode = ceil_mode; }
- /// \return The default value for MaxPool.
- virtual std::shared_ptr<Node> get_default_value() const override;
-
- bool evaluate(const HostTensorVector& outputs,
- const HostTensorVector& inputs) override;
-
- protected:
- Shape m_window_shape;
- Strides m_window_movement_strides;
- Shape m_padding_below;
- Shape m_padding_above;
- PadType m_pad_type;
- bool m_ceil_mode{false};
-
- private:
- void update_auto_padding(const PartialShape& in_shape,
- Shape& new_padding_above,
- Shape& new_padding_below);
- };
- } // namespace v0
-
namespace v1
{
/// \brief Batched max pooling operation.
Shape& new_pads_begin);
};
} // namespace v1
-
- using v0::MaxPool;
- } // namespace op
+ } // namespace op
} // namespace ngraph
NGRAPH_OP(MVN, ngraph::op::v0, 0)
NGRAPH_OP(MatMul, ngraph::op::v0, 0)
NGRAPH_OP(Max, ngraph::op::v0, 0)
-NGRAPH_OP(MaxPool, ngraph::op::v0, 0)
NGRAPH_OP(MaxPool, ngraph::op::v1, 1)
NGRAPH_OP(Maximum, ngraph::op::v0, 0)
NGRAPH_OP(Maximum, ngraph::op::v1, 1)
this->add_matcher(m, callback, PassProperty::REQUIRE_STATIC_SHAPE);
}
-// conv(56w3s1) conv(28w3s2)
-// | |
-// conv(56w1s1) ==> conv(28w1s1)
-// | |
-// elt------------56 elt------------pool(28s2)
-// | | | |
-// conv(28w1s2) conv(28w1s2) conv(28w1s1) conv(28w1s1)
-void pass::CoreFusion::construct_optimized_strided_conv()
-{
- Shape win_size_1{1, 1, 1, 1};
- auto is_bc = pattern::has_class<op::Broadcast>();
- auto data_stride3 = make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 128, 128});
- auto weights_stride3 = make_shared<pattern::op::Label>(element::f32, win_size_1);
-
- auto conv_stride3 = make_shared<op::Convolution>(data_stride3, weights_stride3);
-
- auto conv_stride3_label =
- make_shared<pattern::op::Label>(conv_stride3, nullptr, NodeVector{conv_stride3});
-
- auto broadcast_w3_label = make_shared<pattern::op::Label>(conv_stride3_label, is_bc);
- auto add_w3 = make_shared<op::Add>(conv_stride3_label, broadcast_w3_label);
- auto relu_w3 = make_shared<op::Relu>(add_w3);
-
- auto weights_stride1 = make_shared<pattern::op::Label>(element::f32, win_size_1);
- auto conv_stride1 = make_shared<op::Convolution>(relu_w3, weights_stride1);
- auto conv_stride1_label =
- make_shared<pattern::op::Label>(conv_stride1, nullptr, NodeVector{conv_stride1});
- auto broadcast_w1_label = make_shared<pattern::op::Label>(conv_stride1_label, is_bc);
- auto add_w1 = make_shared<op::Add>(conv_stride1_label, broadcast_w1_label);
-
- auto eltwise_arg_label =
- make_shared<pattern::op::Label>(element::f32, conv_stride1->get_shape());
- auto add_two_convs = make_shared<op::Add>(add_w1, eltwise_arg_label);
-
- auto relu_two_convs = make_shared<op::Relu>(add_two_convs);
-
- auto eltwise_label =
- make_shared<pattern::op::Label>(relu_two_convs, nullptr, NodeVector{relu_two_convs});
-
- auto weights_eltwise = make_shared<pattern::op::Label>(element::f32, win_size_1);
- auto eltwise_conv = make_shared<op::Convolution>(eltwise_label, weights_eltwise);
-
- auto callback = [win_size_1,
- eltwise_label,
- conv_stride1_label,
- conv_stride3_label,
- eltwise_arg_label,
- broadcast_w3_label,
- broadcast_w1_label](pattern::Matcher& m) {
- NGRAPH_DEBUG << "In a callback for construct_conv_skip against "
- << m.get_match_root()->get_name();
-
- if (m.get_match_root()->get_users().empty())
- {
- NGRAPH_DEBUG << m.get_match_root()
- << " has already been replaced by a preceding callback";
- return false;
- }
-
- auto pattern_map = m.get_pattern_map();
- auto m_eltwise = pattern_map[eltwise_label];
-
- vector<shared_ptr<Node>> strided_convs;
- for (auto n : m_eltwise->get_users())
- {
- if (is_used(n.get()))
- {
- if (!is_type<op::Convolution>(n))
- {
- NGRAPH_DEBUG << "Not all live users of element wise operation are Convolution";
- return false;
- }
- strided_convs.push_back(n);
- }
- }
-
- if (strided_convs.size() != 2)
- {
- NGRAPH_DEBUG << "Number of live users of element wise operation isn't equal to 2";
- return false;
- }
-
- Shape supported_shapes[] = {Shape{56, 56}, Shape{28, 28}, Shape{14, 14}, Shape{7, 7}};
- Shape shape_1{1, 1};
- Shape shape_3{3, 3};
- Strides stride_2{2, 2};
- Strides stride_1{1, 1};
- CoordinateDiff pad_0{0, 0};
- CoordinateDiff pad_1{1, 1};
- Shape win_size_3{1, 1, 3, 3};
-
- size_t sparse_shape_index = 0;
- NodeVector sconvs;
- for (auto sc : strided_convs)
- {
- if (sc->get_argument(0) != m_eltwise)
- {
- NGRAPH_DEBUG << "element-wise isn't data";
- return false;
- }
- auto sconv = static_pointer_cast<op::Convolution>(sc);
- sparse_shape_index = shape_to_index(sconv->get_shape());
- if (sparse_shape_index == 0)
- {
- NGRAPH_DEBUG << "Unsupported shape of " << sconv->get_name();
- return false;
- }
- if (!are_img_dims_equal(sconv->get_shape(), supported_shapes[sparse_shape_index]) ||
- !are_img_dims_equal(sconv->get_input_shape(1), shape_1) ||
- sconv->get_window_movement_strides() != stride_2 || !is_trivial_convolution(sconv))
- {
- NGRAPH_DEBUG << sconv->get_name() << " and its weights are of the wrong shape (not "
- << vector_to_string(supported_shapes[sparse_shape_index])
- << " and 1x1) and strides (2x2)";
- return false;
- }
- sconvs.push_back(sconv);
- }
-
- const size_t full_shape_index = sparse_shape_index - 1;
-
- auto m_conv_stride1 = static_pointer_cast<op::Convolution>(pattern_map[conv_stride1_label]);
-
- if (!are_img_dims_equal(m_conv_stride1->get_shape(), supported_shapes[full_shape_index]) ||
- !are_img_dims_equal(m_conv_stride1->get_input_shape(1), win_size_1) ||
- m_conv_stride1->get_window_movement_strides() != stride_1 ||
- !is_trivial_convolution(m_conv_stride1))
- {
- NGRAPH_DEBUG << m_conv_stride1->get_name()
- << " and its weights are of the wrong shape (not "
- << vector_to_string(supported_shapes[full_shape_index])
- << " and 1x1) and strides (1x1)";
- return false;
- }
-
- auto m_conv_stride3 = static_pointer_cast<op::Convolution>(pattern_map[conv_stride3_label]);
-
- if (!are_img_dims_equal(m_conv_stride3->get_shape(), supported_shapes[full_shape_index]) ||
- !are_img_dims_equal(m_conv_stride3->get_input_shape(1), shape_3) ||
- m_conv_stride3->get_window_movement_strides() != stride_1 ||
- !is_trivial_convolution(m_conv_stride3, true))
- {
- NGRAPH_DEBUG << m_conv_stride3->get_name()
- << " and its weights are of the wrong shape (not "
- << vector_to_string(supported_shapes[full_shape_index])
- << " and 3x3) and strides (1x1)";
- return false;
- }
-
- auto conv_28w3s2 = make_shared<op::Convolution>(m_conv_stride3->get_argument(0),
- m_conv_stride3->get_argument(1),
- stride_2,
- stride_1,
- pad_1,
- pad_1);
-
- auto new_add_conv_28w3s2 =
- make_shared<op::Add>(conv_28w3s2, reduce_broadcast(pattern_map[broadcast_w3_label]));
- auto new_relu_28w3s2 = make_shared<op::Relu>(new_add_conv_28w3s2);
-
- auto conv_28w1s1 = make_shared<op::Convolution>(
- new_relu_28w3s2, m_conv_stride1->get_argument(1), stride_1, stride_1);
-
- auto new_add_conv28s1 =
- make_shared<op::Add>(conv_28w1s1, reduce_broadcast(pattern_map[broadcast_w1_label]));
-
- auto maxpool =
- make_shared<op::MaxPool>(pattern_map[eltwise_arg_label], Shape{1, 1}, stride_2);
- auto new_add_two_convs = make_shared<op::Add>(new_add_conv28s1, maxpool);
- auto new_relu_two_convs = make_shared<op::Relu>(new_add_two_convs);
-
- for (auto sconv : sconvs)
- {
- auto sconv_28w1s1 = make_shared<op::Convolution>(
- new_relu_two_convs, sconv->get_argument(1), stride_1, stride_1);
- NGRAPH_DEBUG << "Replacing " << sconv->get_name() << " with "
- << sconv_28w1s1->get_name();
- replace_node(sconv, sconv_28w1s1);
- }
- return true;
- };
-
- auto m = make_shared<pattern::Matcher>(eltwise_conv, "CoreFusion.OptimizedStridedConv");
- this->add_matcher(m, callback, PassProperty::REQUIRE_STATIC_SHAPE);
-}
-
void pass::CoreFusion::construct_reshape_softmax_reshape()
{
Shape input_shape{10, 20};
construct_folded_batch_norm();
construct_conv_affine_folding();
construct_sigmoid();
- construct_optimized_strided_conv();
construct_reshape_broadcast();
construct_reshape_softmax_reshape();
construct_zero_padded_reshaped_conv();
void construct_folded_batch_norm();
void construct_conv_affine_folding();
void construct_sigmoid();
- void construct_optimized_strided_conv();
void construct_reshape_broadcast();
void construct_reshape_softmax_reshape();
void construct_zero_padded_reshaped_conv();
node = make_shared<op::Max>(args[0], reduction_axes);
break;
}
- case OP_TYPEID::MaxPool:
- {
- auto window_shape = node_js.at("window_shape").get<vector<size_t>>();
- auto window_movement_strides =
- node_js.at("window_movement_strides").get<vector<size_t>>();
- // For backwards compatibility, both (but not just one) of the padding_ fields may
- // be omitted.
- auto padding_below_maybe = get_or_default(node_js, "padding_below", json{});
- auto padding_above_maybe = get_or_default(node_js, "padding_above", json{});
- op::PadType pad_type = read_pad_type(node_js);
- if (padding_below_maybe.empty() && !padding_above_maybe.empty())
- {
- throw runtime_error(
- "MaxPool: padding_below is absent but padding_above is present");
- }
- else if (!padding_below_maybe.empty() && padding_above_maybe.empty())
- {
- throw runtime_error(
- "MaxPool: padding_below is present but padding_above is absent");
- }
- else if (!padding_below_maybe.empty() && !padding_above_maybe.empty())
- {
- auto padding_below = padding_below_maybe.get<vector<size_t>>();
- auto padding_above = padding_above_maybe.get<vector<size_t>>();
- node = make_shared<op::v0::MaxPool>(args[0],
- window_shape,
- window_movement_strides,
- padding_below,
- padding_above,
- pad_type);
- }
- else
- {
- node = make_shared<op::v0::MaxPool>(args[0], window_shape, window_movement_strides);
- }
-
- break;
- }
case OP_TYPEID::Maximum:
{
node = make_shared<op::v0::Maximum>(
node["reduction_axes"] = serialize_axis_set(tmp->get_reduction_axes());
break;
}
- case OP_TYPEID::MaxPool:
- {
- auto tmp = static_cast<const op::v0::MaxPool*>(&n);
- node["window_shape"] = tmp->get_window_shape();
- node["window_movement_strides"] = tmp->get_window_movement_strides();
- node["padding_below"] = tmp->get_padding_below();
- node["padding_above"] = tmp->get_padding_above();
- node["pad_type"] = tmp->get_pad_type();
- break;
- }
case OP_TYPEID::Maximum:
{
const op::util::BinaryElementwiseArithmetic* tmp = nullptr;
opset_pass/one_hot_opset_pass.cpp
opset_pass/gather_opset_pass.cpp
opset_pass/pad_opset_pass.cpp
- opset_pass/poolings_opset_pass.cpp
opset_pass/reduction_opset_pass.cpp
opset_pass/reverse_opset_pass.cpp
opset_pass/select_opset_pass.cpp
backend/pad.in.cpp
backend/parameter_as_output.in.cpp
backend/partial_slice.in.cpp
- backend/pool.in.cpp
backend/power.in.cpp
backend/product.in.cpp
backend/quantize_dequantize.in.cpp
+++ /dev/null
-//*****************************************************************************
-// Copyright 2017-2020 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//*****************************************************************************
-
-#include <algorithm>
-#include <cinttypes>
-#include <cmath>
-#include <cstdlib>
-#include <random>
-#include <string>
-
-#include "gtest/gtest.h"
-#include "ngraph/ngraph.hpp"
-#include "ngraph/runtime/tensor.hpp"
-#include "runtime/backend.hpp"
-#include "util/all_close.hpp"
-#include "util/all_close_f.hpp"
-#include "util/float_util.hpp"
-#include "util/ndarray.hpp"
-#include "util/random.hpp"
-#include "util/test_control.hpp"
-#include "util/test_tools.hpp"
-
-using namespace std;
-using namespace ngraph;
-
-static string s_manifest = "${MANIFEST}";
-
-NGRAPH_TEST(${BACKEND_NAME}, max_pool_1d_1channel_1image)
-{
- Shape shape_a{1, 1, 14};
- Shape window_shape{3};
- auto A = make_shared<op::Parameter>(element::f32, shape_a);
- Shape shape_r{1, 1, 12};
- auto f = make_shared<Function>(make_shared<op::MaxPool>(A, window_shape), ParameterVector{A});
-
- auto backend = runtime::Backend::create("${BACKEND_NAME}");
-
- // Create some tensors for input/output
- auto a = backend->create_tensor(element::f32, shape_a);
- copy_data(a,
- test::NDArray<float, 3>{{{0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0}}}.get_vector());
- auto result = backend->create_tensor(element::f32, shape_r);
-
- auto handle = backend->compile(f);
- handle->call_with_validate({result}, {a});
- EXPECT_TRUE(test::all_close_f(
- (test::NDArray<float, 3>({{{1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 0}}}).get_vector()),
- read_vector<float>(result),
- MIN_FLOAT_TOLERANCE_BITS));
-}
-
-NGRAPH_TEST(${BACKEND_NAME}, max_pool_uint8)
-{
- vector<uint8_t> a_data = {0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0, 1};
- Shape shape_a{1, 1, 3, 5};
- Shape window_shape{2, 3};
- auto window_movement_strides = Strides{1, 1};
- Shape padding_below{0, 0};
- Shape padding_above{0, 0};
- Shape shape_r{1, 1, 2, 3};
- auto A = make_shared<op::Parameter>(element::u8, shape_a);
- auto QMP = make_shared<ngraph::op::MaxPool>(
- A, window_shape, window_movement_strides, padding_below, padding_above);
- auto f = make_shared<Function>(NodeVector{QMP}, ParameterVector{A});
- auto backend = runtime::Backend::create("${BACKEND_NAME}");
- // Create some tensors for input/output
- auto a = backend->create_tensor(element::u8, shape_a);
- copy_data(a, a_data);
- auto result = backend->create_tensor(element::u8, shape_r);
- auto handle = backend->compile(f);
- handle->call_with_validate({result}, {a});
- EXPECT_EQ((vector<uint8_t>{3, 3, 2, 3, 3, 2}), read_vector<uint8_t>(result));
-}
-
-NGRAPH_TEST(${BACKEND_NAME}, max_pool_int8)
-{
- vector<int8_t> a_data = {0, 1, 0, -2, 1, 0, -3, 2, 0, 0, 2, 0, 0, 0, 1};
- Shape shape_a{1, 1, 3, 5};
- Shape window_shape{2, 3};
- auto window_movement_strides = Strides{1, 1};
- Shape padding_below{0, 0};
- Shape padding_above{0, 0};
- Shape shape_r{1, 1, 2, 3};
- auto A = make_shared<op::Parameter>(element::i8, shape_a);
- auto QMP = make_shared<ngraph::op::MaxPool>(
- A, window_shape, window_movement_strides, padding_below, padding_above);
- auto f = make_shared<Function>(NodeVector{QMP}, ParameterVector{A});
- auto backend = runtime::Backend::create("${BACKEND_NAME}");
- // Create some tensors for input/output
- auto a = backend->create_tensor(element::i8, shape_a);
- copy_data(a, a_data);
- auto result = backend->create_tensor(element::i8, shape_r);
- auto handle = backend->compile(f);
- handle->call_with_validate({result}, {a});
- EXPECT_EQ((vector<int8_t>{2, 2, 2, 2, 2, 2}), read_vector<int8_t>(result));
-}
-
-NGRAPH_TEST(${BACKEND_NAME}, max_pool_1d_1channel_2image)
-{
- Shape shape_a{2, 1, 14};
- Shape window_shape{3};
- auto A = make_shared<op::Parameter>(element::f32, shape_a);
- Shape shape_r{2, 1, 12};
- auto f = make_shared<Function>(make_shared<op::MaxPool>(A, window_shape), ParameterVector{A});
-
- auto backend = runtime::Backend::create("${BACKEND_NAME}");
-
- // Create some tensors for input/output
- auto a = backend->create_tensor(element::f32, shape_a);
- copy_data(a,
- test::NDArray<float, 3>({{{0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0}},
- {{0, 2, 1, 1, 0, 0, 0, 2, 0, 1, 0, 0, 1, 2}}})
- .get_vector());
- auto result = backend->create_tensor(element::f32, shape_r);
-
- auto handle = backend->compile(f);
- handle->call_with_validate({result}, {a});
- EXPECT_TRUE(test::all_close_f((test::NDArray<float, 3>({{{1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 0}},
- {{2, 2, 1, 1, 0, 2, 2, 2, 1, 1, 1, 2}}})
- .get_vector()),
- read_vector<float>(result),
- MIN_FLOAT_TOLERANCE_BITS));
-}
-
-NGRAPH_TEST(${BACKEND_NAME}, max_pool_1d_2channel_2image)
-{
- Shape shape_a{2, 2, 14};
- Shape window_shape{3};
- auto A = make_shared<op::Parameter>(element::f32, shape_a);
- Shape shape_r{2, 2, 12};
- auto f = make_shared<Function>(make_shared<op::MaxPool>(A, window_shape), ParameterVector{A});
-
- auto backend = runtime::Backend::create("${BACKEND_NAME}");
-
- // Create some tensors for input/output
- auto a = backend->create_tensor(element::f32, shape_a);
- copy_data(a,
- test::NDArray<float, 3>({{{0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0},
- {0, 0, 0, 2, 0, 0, 2, 3, 0, 1, 2, 0, 1, 0}},
-
- {{0, 2, 1, 1, 0, 0, 0, 2, 0, 1, 0, 0, 1, 2},
- {2, 1, 0, 0, 1, 0, 2, 0, 0, 0, 1, 1, 2, 0}}})
- .get_vector());
- auto result = backend->create_tensor(element::f32, shape_r);
-
- auto handle = backend->compile(f);
- handle->call_with_validate({result}, {a});
- EXPECT_TRUE(test::all_close_f(
- (test::NDArray<float, 3>(
- {{{1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 0}, {0, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 1}},
-
- {{2, 2, 1, 1, 0, 2, 2, 2, 1, 1, 1, 2}, {2, 1, 1, 1, 2, 2, 2, 0, 1, 1, 2, 2}}})
- .get_vector()),
- read_vector<float>(result),
- MIN_FLOAT_TOLERANCE_BITS));
-}
-
-NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_2channel_2image)
-{
- Shape shape_a{2, 2, 5, 5};
- Shape window_shape{2, 3};
- auto A = make_shared<op::Parameter>(element::f32, shape_a);
- Shape shape_r{2, 2, 4, 3};
- auto f = make_shared<Function>(make_shared<op::MaxPool>(A, window_shape), ParameterVector{A});
-
- auto backend = runtime::Backend::create("${BACKEND_NAME}");
-
- // Create some tensors for input/output
- auto a = backend->create_tensor(element::f32, shape_a);
- copy_data(a,
- test::NDArray<float, 4>({{{{0, 1, 0, 2, 1}, // img 0 chan 0
- {0, 3, 2, 0, 0},
- {2, 0, 0, 0, 1},
- {2, 0, 1, 1, 2},
- {0, 2, 1, 0, 0}},
-
- {{0, 0, 0, 2, 0}, // img 0 chan 1
- {0, 2, 3, 0, 1},
- {2, 0, 1, 0, 2},
- {3, 1, 0, 0, 0},
- {2, 0, 0, 0, 0}}},
-
- {{{0, 2, 1, 1, 0}, // img 1 chan 0
- {0, 0, 2, 0, 1},
- {0, 0, 1, 2, 3},
- {2, 0, 0, 3, 0},
- {0, 0, 0, 0, 0}},
-
- {{2, 1, 0, 0, 1}, // img 1 chan 1
- {0, 2, 0, 0, 0},
- {1, 1, 2, 0, 2},
- {1, 1, 1, 0, 1},
- {1, 0, 0, 0, 2}}}})
- .get_vector());
- auto result = backend->create_tensor(element::f32, shape_r);
-
- auto handle = backend->compile(f);
- handle->call_with_validate({result}, {a});
- EXPECT_TRUE(test::all_close_f((test::NDArray<float, 4>({{{{3, 3, 2}, // img 0 chan 0
- {3, 3, 2},
- {2, 1, 2},
- {2, 2, 2}},
-
- {{3, 3, 3}, // img 0 chan 1
- {3, 3, 3},
- {3, 1, 2},
- {3, 1, 0}}},
-
- {{{2, 2, 2}, // img 1 chan 0
- {2, 2, 3},
- {2, 3, 3},
- {2, 3, 3}},
-
- {{2, 2, 1}, // img 1 chan 1
- {2, 2, 2},
- {2, 2, 2},
- {1, 1, 2}}}})
- .get_vector()),
- read_vector<float>(result),
- MIN_FLOAT_TOLERANCE_BITS));
-}
-
-// this test cover the case with multiple image and with asymetric pad
-// one bug been found on GPU side is covered by this test
-NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_2channel_2image_asym_pad)
-{
- Shape shape_a{2, 2, 4, 4};
- Shape window_shape{3, 3};
- auto window_movement_strides = Strides{2, 2};
- Shape padding_below{0, 0};
- Shape padding_above{1, 1};
- auto A = make_shared<op::Parameter>(element::f32, shape_a);
- Shape shape_r{2, 2, 2, 2};
- auto f = make_shared<Function>(
- make_shared<op::MaxPool>(
- A, window_shape, window_movement_strides, padding_below, padding_above),
- ParameterVector{A});
-
- auto backend = runtime::Backend::create("${BACKEND_NAME}");
-
- // Create some tensors for input/output
- auto a = backend->create_tensor(element::f32, shape_a);
- copy_data(a,
- test::NDArray<float, 4>({{{{0, 1, 0, 2}, // img 0 chan 0
- {0, 3, 2, 0},
- {2, 0, 0, 0},
- {0, 2, 1, 0}},
-
- {{0, 0, 0, 2}, // img 0 chan 1
- {0, 2, 3, 0},
- {2, 0, 1, 0},
- {2, 0, 0, 0}}},
-
- {{{0, 2, 1, 1}, // img 1 chan 0
- {0, 0, 2, 0},
- {0, 0, 1, 2},
- {0, 0, 0, 0}},
-
- {{2, 1, 0, 0}, // img 1 chan 1
- {0, 2, 0, 0},
- {1, 1, 2, 0},
- {1, 0, 0, 0}}}})
- .get_vector());
- auto result = backend->create_tensor(element::f32, shape_r);
-
- auto handle = backend->compile(f);
- handle->call_with_validate({result}, {a});
- EXPECT_TRUE(test::all_close_f((test::NDArray<float, 4>({{{{3, 2}, // img 0 chan 0
- {2, 1}},
-
- {{3, 3}, // img 0 chan 1
- {2, 1}}},
-
- {{{2, 2}, // img 1 chan 0
- {1, 2}},
-
- {{2, 2}, // img 1 chan 1
- {2, 2}}}})
- .get_vector()),
- read_vector<float>(result),
- MIN_FLOAT_TOLERANCE_BITS));
-}
-
-// MaxPool2D1ChannelTests test fixture for test setup reuse
-class MaxPool2D1ChannelTests : public testing::Test
-{
-public:
- Shape shape_a{1, 1, 5, 5};
- Shape window_shape{2, 3};
- Strides window_movement_strides{1, 1};
-
-protected:
- virtual void SetUp() override {}
-};
-
-NGRAPH_TEST_F(${BACKEND_NAME}, MaxPool2D1ChannelTests, max_pool_2d_1channel_1image_overpadded)
-{
- Shape padding_below{2, 0};
- Shape padding_above{1, 2};
- auto A = make_shared<op::Parameter>(element::f32, shape_a);
- Shape shape_r{1, 1, 7, 5};
- auto f = make_shared<Function>(
- make_shared<op::MaxPool>(
- A, window_shape, window_movement_strides, padding_below, padding_above),
- ParameterVector{A});
-
- auto backend = runtime::Backend::create("${BACKEND_NAME}");
-
- // Create some tensors for input/output
- auto a = backend->create_tensor(element::f32, shape_a);
- copy_data(a,
- test::NDArray<float, 4>({{{{0, 1, 0, 2, 1},
- {0, 3, 2, 0, 0},
- {2, 0, 0, 0, 1},
- {2, 0, 1, 1, 2},
- {0, 2, 1, 0, 0}}}})
- .get_vector());
- auto result = backend->create_tensor(element::f32, shape_r);
-
- auto handle = backend->compile(f);
- handle->call_with_validate({result}, {a});
- auto min = std::numeric_limits<float>::lowest();
- EXPECT_TRUE(test::all_close_f(test::NDArray<float, 4>({{{{min, min, min, min, min},
- {1, 2, 2, 2, 1},
- {3, 3, 2, 2, 1},
- {3, 3, 2, 1, 1},
- {2, 1, 2, 2, 2},
- {2, 2, 2, 2, 2},
- {2, 2, 1, 0, 0}}}})
- .get_vector(),
- read_vector<float>(result),
- MIN_FLOAT_TOLERANCE_BITS));
-}
-
-NGRAPH_TEST_F(${BACKEND_NAME}, MaxPool2D1ChannelTests, max_pool_2d_1channel_1image_padded)
-{
- Shape padding_below{1, 0};
- Shape padding_above{1, 2};
- auto A = make_shared<op::Parameter>(element::f32, shape_a);
- Shape shape_r{1, 1, 6, 5};
- auto f = make_shared<Function>(
- make_shared<op::MaxPool>(
- A, window_shape, window_movement_strides, padding_below, padding_above),
- ParameterVector{A});
-
- auto backend = runtime::Backend::create("${BACKEND_NAME}");
-
- // Create some tensors for input/output
- auto a = backend->create_tensor(element::f32, shape_a);
- copy_data(a,
- test::NDArray<float, 4>({{{{0, 1, 0, 2, 1},
- {0, 3, 2, 0, 0},
- {2, 0, 0, 0, 1},
- {2, 0, 1, 1, 2},
- {0, 2, 1, 0, 0}}}})
- .get_vector());
- auto result = backend->create_tensor(element::f32, shape_r);
-
- auto handle = backend->compile(f);
- handle->call_with_validate({result}, {a});
- EXPECT_TRUE(test::all_close_f((test::NDArray<float, 4>({{{{1, 2, 2, 2, 1},
- {3, 3, 2, 2, 1},
- {3, 3, 2, 1, 1},
- {2, 1, 2, 2, 2},
- {2, 2, 2, 2, 2},
- {2, 2, 1, 0, 0}}}})
- .get_vector()),
- read_vector<float>(result),
- MIN_FLOAT_TOLERANCE_BITS));
-}
-
-// Test to make sure that negative elements and padding are handled properly. Added this because
-// mkldnn calls its padding "zero padding" but apparently that is not technically true (negative
-// values still "win" versus out-of-bounds values), which is good.
-NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_1channel_1image_padded_negative_values)
-{
- auto shape_a = Shape{1, 1, 1, 14}; // 1 image, 1 channel, 1 row, 14 columns (if it's 1D we don't
- // get mkldnn as of this writing)
- Shape window_shape{1, 3};
- auto window_movement_strides = Strides{1, 1};
- Shape padding_below{0, 1};
- Shape padding_above{0, 2};
- auto A = make_shared<op::Parameter>(element::f32, shape_a);
- Shape shape_r{1, 1, 1, 15};
- auto f = make_shared<Function>(
- make_shared<op::MaxPool>(
- A, window_shape, window_movement_strides, padding_below, padding_above),
- ParameterVector{A});
-
- auto backend = runtime::Backend::create("${BACKEND_NAME}");
-
- // Create some tensors for input/output
- auto a = backend->create_tensor(element::f32, shape_a);
- copy_data(a,
- test::NDArray<float, 4>{{{{-1, -2, -3, -3, -2, -1, -3, -2, -2, -2, -2, -3, -4, -5}}}}
- .get_vector());
- auto result = backend->create_tensor(element::f32, shape_r);
-
- auto handle = backend->compile(f);
- handle->call_with_validate({result}, {a});
- EXPECT_TRUE(test::all_close_f(
- (test::NDArray<float, 4>({{{{-1, -1, -2, -2, -1, -1, -1, -2, -2, -2, -2, -2, -3, -4, -5}}}})
- .get_vector()),
- read_vector<float>(result),
- MIN_FLOAT_TOLERANCE_BITS));
-}
-
-NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_1channel_1image_strided)
-{
- Shape shape_a{1, 1, 8, 8};
- Shape window_shape{2, 3};
- auto window_movement_strides = Strides{3, 2};
- auto A = make_shared<op::Parameter>(element::f32, shape_a);
- Shape shape_r{1, 1, 3, 3};
- auto f = make_shared<Function>(
- make_shared<op::MaxPool>(A, window_shape, window_movement_strides), ParameterVector{A});
-
- auto backend = runtime::Backend::create("${BACKEND_NAME}");
-
- // Create some tensors for input/output
- auto a = backend->create_tensor(element::f32, shape_a);
- copy_data(a,
- test::NDArray<float, 4>({{{{0, 1, 0, 2, 1, 2, 0, 0},
- {0, 3, 2, 0, 0, 0, 1, 0},
- {2, 0, 0, 0, 1, 0, 0, 0},
- {2, 0, 1, 1, 2, 2, 3, 0},
- {0, 2, 1, 0, 0, 0, 1, 0},
- {2, 0, 3, 1, 0, 0, 0, 0},
- {1, 2, 0, 0, 0, 1, 2, 0},
- {1, 0, 2, 0, 0, 0, 1, 0}}}})
- .get_vector());
- auto result = backend->create_tensor(element::f32, shape_r);
-
- auto handle = backend->compile(f);
- handle->call_with_validate({result}, {a});
- EXPECT_TRUE(test::all_close_f(
- (test::NDArray<float, 4>({{{{3, 2, 2}, {2, 2, 3}, {2, 2, 2}}}}).get_vector()),
- read_vector<float>(result),
- MIN_FLOAT_TOLERANCE_BITS));
-}
-
-NGRAPH_TEST(${BACKEND_NAME}, max_pool_3d)
-{
- Shape shape_a{64, 3, 7, 8, 10};
- Shape window_shape{2, 3, 2};
- auto move_strides = Strides{2, 3, 4};
- Shape padding_below{5, 6, 4};
- Shape padding_above{6, 4, 5};
- auto A = make_shared<op::Parameter>(element::f32, shape_a);
- auto B = make_shared<op::Parameter>(element::f32, shape_a);
-
- auto cpu_f = make_shared<Function>(
- make_shared<op::MaxPool>(A, window_shape, move_strides, padding_below, padding_above),
- ParameterVector{A});
- auto int_f = make_shared<Function>(
- make_shared<op::MaxPool>(B, window_shape, move_strides, padding_below, padding_above),
- ParameterVector{B});
- test::Uniform<float> rng(0.0f, 1.0f);
- vector<vector<float>> args;
-
- for (shared_ptr<op::Parameter> param : int_f->get_parameters())
- {
- vector<float> tensor_val(shape_size(param->get_shape()));
- rng.initialize(tensor_val);
- args.push_back(tensor_val);
- }
- auto int_results = execute(int_f, args, "INTERPRETER");
- auto cpu_results = execute(cpu_f, args, "${BACKEND_NAME}");
- for (size_t i = 0; i < cpu_results.size(); i++)
- {
- EXPECT_TRUE(
- test::all_close_f(cpu_results.at(i), int_results.at(i), MIN_FLOAT_TOLERANCE_BITS));
- }
-}
}
}
-TEST(eval, max_pool_v0_dynamic)
-{
- Shape window_shape{3};
- auto A = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
- auto f =
- make_shared<Function>(make_shared<op::v0::MaxPool>(A, window_shape), ParameterVector{A});
- auto result_tensor = make_shared<HostTensor>();
-
- ASSERT_TRUE(f->evaluate({result_tensor},
- {make_host_tensor<element::Type_t::f32>(
- {1, 1, 14}, {0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0})}));
-
- EXPECT_EQ(result_tensor->get_element_type(), element::f32);
- EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{1, 1, 12}));
- auto cval = read_vector<float>(result_tensor);
- vector<float> out{1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 0};
- ASSERT_EQ(cval, out);
-}
-
TEST(eval, max_pool_v1_dynamic)
{
Shape window_shape{3};
pass_manager.register_pass<pass::Opset0Downgrade>();
pass_manager.run_passes(function);
- test_provenance_tags<op::v0::TopK>(function, "<ONNX TopK (TOPK -> values, indices)>");
- test_provenance_tags<op::v0::TopK>(function, "<Opset1_Downgrade (v3 TopK)>");
- test_provenance_tags<op::v0::TopK>(function, "<Opset0_Downgrade (v1 TopK)>");
+ test_provenance_tags<ngraph::op::v0::TopK>(function, "<ONNX TopK (TOPK -> values, indices)>");
+ test_provenance_tags<ngraph::op::v0::TopK>(function, "<Opset1_Downgrade (v3 TopK)>");
+ test_provenance_tags<ngraph::op::v0::TopK>(function, "<Opset0_Downgrade (v1 TopK)>");
}
#include "ngraph/ngraph.hpp"
#include "ngraph/validation_util.hpp"
-#include "op/and.hpp"
-#include "op/atan2.hpp"
#include "util/test_tools.hpp"
using namespace ngraph;
EXPECT_FALSE(node.is_binary_elementwise_logical());
}
- void op_is_And()
- {
- op::v0::And node;
- EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
- EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
- EXPECT_FALSE(node.is_binary_elementwise_comparison());
- EXPECT_TRUE(node.is_binary_elementwise_logical());
- }
-
void op_is_Any()
{
op::Any node;
EXPECT_FALSE(node.is_binary_elementwise_logical());
}
- void op_is_Atan2()
- {
- op::v0::Atan2 node;
- EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
- EXPECT_TRUE(node.is_binary_elementwise_arithmetic());
- EXPECT_FALSE(node.is_binary_elementwise_comparison());
- EXPECT_FALSE(node.is_binary_elementwise_logical());
- }
-
void op_is_AvgPool()
{
op::AvgPool node;
EXPECT_FALSE(node.is_binary_elementwise_logical());
}
- void op_is_MaxPool()
- {
- op::MaxPool node;
- EXPECT_FALSE(node.is_unary_elementwise_arithmetic());
- EXPECT_FALSE(node.is_binary_elementwise_arithmetic());
- EXPECT_FALSE(node.is_binary_elementwise_comparison());
- EXPECT_FALSE(node.is_binary_elementwise_logical());
- }
-
void op_is_Min()
{
op::Min node;
+++ /dev/null
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-
-#include "ngraph/ngraph.hpp"
-#include "ngraph/pass/manager.hpp"
-#include "opset0_downgrade.hpp"
-#include "opset1_upgrade.hpp"
-#include "util/test_control.hpp"
-#include "util/type_prop.hpp"
-
-using namespace std;
-using namespace ngraph;
-
-TEST(opset_transform, opset1_maxpool_upgrade_pass_fllor)
-{
- auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 3, 6, 9});
- Shape pads_begin{0, 0};
- Shape pads_end{0, 0};
- Strides strides{1, 1};
- Shape kernel_shape{3, 3};
- bool ceil_mode = false;
- op::PadType pad_mode = op::PadType::EXPLICIT;
-
- auto maxpool_v0 = make_shared<op::v0::MaxPool>(
- arg, kernel_shape, strides, pads_begin, pads_end, pad_mode, ceil_mode);
- auto result = make_shared<op::Result>(maxpool_v0);
- auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
-
- ngraph::pass::Manager pass_manager;
- pass_manager.register_pass<pass::Opset1Upgrade>();
- pass_manager.run_passes(f);
-
- auto maxpool_s1_result = f->get_results().at(0);
- auto node = maxpool_s1_result->get_input_node_shared_ptr(0);
- auto max_pool_v1_node = as_type_ptr<op::v1::MaxPool>(node);
- ASSERT_TRUE(max_pool_v1_node);
-
- EXPECT_EQ(max_pool_v1_node->get_pads_begin(), pads_begin);
- EXPECT_EQ(max_pool_v1_node->get_pads_end(), pads_end);
- EXPECT_EQ(max_pool_v1_node->get_strides(), strides);
- EXPECT_EQ(max_pool_v1_node->get_kernel(), kernel_shape);
- EXPECT_EQ(max_pool_v1_node->get_rounding_type(), op::RoundingType::FLOOR);
- EXPECT_EQ(max_pool_v1_node->get_auto_pad(), pad_mode);
-}
-
-TEST(opset_transform, opset1_maxpool_upgrade_pass_ceil)
-{
- auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 3, 6, 9});
- Shape pads_begin{0, 0};
- Shape pads_end{0, 0};
- Strides strides{1, 1};
- Shape kernel_shape{3, 3};
- bool ceil_mode = true;
- op::PadType pad_mode = op::PadType::EXPLICIT;
-
- auto maxpool_v0 = make_shared<op::v0::MaxPool>(
- arg, kernel_shape, strides, pads_begin, pads_end, pad_mode, ceil_mode);
- auto result = make_shared<op::Result>(maxpool_v0);
- auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
-
- ngraph::pass::Manager pass_manager;
- pass_manager.register_pass<pass::Opset1Upgrade>();
- pass_manager.run_passes(f);
-
- auto maxpool_s1_result = f->get_results().at(0);
- auto node = maxpool_s1_result->get_input_node_shared_ptr(0);
- auto max_pool_v1_node = as_type_ptr<op::v1::MaxPool>(node);
- ASSERT_TRUE(max_pool_v1_node);
-
- EXPECT_EQ(max_pool_v1_node->get_pads_begin(), pads_begin);
- EXPECT_EQ(max_pool_v1_node->get_pads_end(), pads_end);
- EXPECT_EQ(max_pool_v1_node->get_strides(), strides);
- EXPECT_EQ(max_pool_v1_node->get_kernel(), kernel_shape);
- EXPECT_EQ(max_pool_v1_node->get_rounding_type(), op::RoundingType::CEIL);
- EXPECT_EQ(max_pool_v1_node->get_auto_pad(), pad_mode);
-}
-
-TEST(opset_transform, opset1_maxpool_downgrade_pass)
-{
- auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 3, 6, 9});
- Shape padding_below{1, 0};
- Shape padding_above{0, 1};
- Strides window_movement_strides{1, 1};
- Shape window_shape{3, 3};
- auto rounding_type = op::RoundingType::FLOOR;
- op::PadType pad_type = op::PadType::EXPLICIT;
-
- auto maxpool_v1 = make_shared<op::v1::MaxPool>(arg,
- window_movement_strides,
- padding_below,
- padding_above,
- window_shape,
- rounding_type,
- pad_type);
- auto result = make_shared<op::Result>(maxpool_v1);
- auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
-
- ngraph::pass::Manager pass_manager;
- pass_manager.register_pass<pass::Opset0Downgrade>();
- pass_manager.run_passes(f);
-
- auto maxpool_s0_result = f->get_results().at(0);
- auto node = maxpool_s0_result->get_input_node_shared_ptr(0);
- auto max_pool_v0_node = as_type_ptr<op::v0::MaxPool>(node);
- ASSERT_TRUE(max_pool_v0_node);
-
- EXPECT_EQ(max_pool_v0_node->get_padding_below(), padding_below);
- EXPECT_EQ(max_pool_v0_node->get_padding_above(), padding_above);
- EXPECT_EQ(max_pool_v0_node->get_window_movement_strides(), window_movement_strides);
- EXPECT_EQ(max_pool_v0_node->get_window_shape(), window_shape);
- EXPECT_EQ(max_pool_v0_node->get_ceil_mode(), false);
- EXPECT_EQ(max_pool_v0_node->get_pad_type(), pad_type);
-}
performance_counter.hpp
dynamic/dynamic_backend.cpp
dynamic/dynamic_backend.hpp
- op/and.cpp
- op/and.hpp
- op/atan2.cpp
- op/atan2.hpp
op/avg_pool.cpp
op/avg_pool.hpp
)
#include "ngraph/pass/manager.hpp"
#include "ngraph/serializer.hpp"
#include "ngraph/util.hpp"
-#include "op/and.hpp"
-#include "op/atan2.hpp"
#include "opset0_downgrade.hpp"
#include "opset1_downgrade.hpp"
args[0]->get_data_ptr<const T>(), out[0]->get_data_ptr<T>(), element_count);
break;
}
- case OP_TYPEID::Atan2:
- {
- size_t element_count = shape_size(node.get_output_shape(0));
- reference::atan2<T>(args[0]->get_data_ptr<const T>(),
- args[1]->get_data_ptr<const T>(),
- out[0]->get_data_ptr<T>(),
- element_count);
- break;
- }
case OP_TYPEID::Elu:
{
const op::Elu* elu_node = static_cast<const op::Elu*>(&node);
case OP_TYPEID::UnknownOp:
throw unsupported_op("Unsupported op '" + node.description() + "'");
case OP_TYPEID::Add:
- case OP_TYPEID::And:
case OP_TYPEID::Broadcast:
case OP_TYPEID::Clamp:
case OP_TYPEID::Concat:
case OP_TYPEID::MatMul:
case OP_TYPEID::Max:
case OP_TYPEID::Maximum:
- case OP_TYPEID::MaxPool:
case OP_TYPEID::Min:
case OP_TYPEID::Minimum:
case OP_TYPEID::Multiply:
+++ /dev/null
-//*****************************************************************************
-// Copyright 2017-2020 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//*****************************************************************************
-
-#include "and.hpp"
-#include "ngraph/runtime/host_tensor.hpp"
-#include "ngraph/runtime/reference/and.hpp"
-
-using namespace std;
-using namespace ngraph;
-
-namespace
-{
- template <element::Type_t ET>
- bool evaluate(const HostTensorPtr& arg0,
- const HostTensorPtr& arg1,
- const HostTensorPtr& out,
- const op::AutoBroadcastSpec& broadcast_spec)
- {
- runtime::reference::logical_and(arg0->get_data_ptr<ET>(),
- arg1->get_data_ptr<ET>(),
- out->get_data_ptr<ET>(),
- arg0->get_shape(),
- arg1->get_shape(),
- broadcast_spec);
- return true;
- }
-
- bool evaluate_logand(const HostTensorPtr& arg0,
- const HostTensorPtr& arg1,
- const HostTensorPtr& out,
- const op::AutoBroadcastSpec& broadcast_spec)
- {
- bool rc = true;
- out->set_broadcast(broadcast_spec, arg0, arg1);
- switch (arg0->get_element_type())
- {
- TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec);
- break;
- TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
- break;
- TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
- break;
- TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
- break;
- TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
- break;
- TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
- break;
- TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
- break;
- default: rc = false; break;
- }
- return rc;
- }
-}
-
-constexpr NodeTypeInfo op::v0::And::type_info;
-
-op::v0::And::And(const Output<Node>& arg0,
- const Output<Node>& arg1,
- const AutoBroadcastSpec& auto_broadcast)
- : BinaryElementwiseLogical(arg0, arg1, auto_broadcast)
-{
- constructor_validate_and_infer_types();
-}
-
-bool op::v0::And::visit_attributes(AttributeVisitor& visitor)
-{
- BinaryElementwiseLogical::visit_attributes(visitor);
- return true;
-}
-
-shared_ptr<Node> op::v0::And::clone_with_new_inputs(const OutputVector& new_args) const
-{
- check_new_args_count(this, new_args);
- return make_shared<v0::And>(new_args.at(0), new_args.at(1), this->get_autob());
-}
-
-bool op::v0::And::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs)
-{
- return evaluate_logand(inputs[0], inputs[1], outputs[0], get_autob());
-}
+++ /dev/null
-//*****************************************************************************
-// Copyright 2017-2020 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//*****************************************************************************
-
-#pragma once
-
-#include <memory>
-
-#include "backend_visibility.hpp"
-#include "ngraph/op/util/binary_elementwise_logical.hpp"
-
-namespace ngraph
-{
- namespace op
- {
- namespace v0
- {
- /// \brief Elementwise logical-and operation.
- ///
- class BACKEND_API And : public util::BinaryElementwiseLogical
- {
- public:
- static constexpr NodeTypeInfo type_info{"And", 0};
- const NodeTypeInfo& get_type_info() const override { return type_info; }
- /// \brief Constructs a logical-and operation.
- And() = default;
-
- /// \brief Constructs a logical-and operation.
- ///
- /// \param arg0 Output that produces the first input tensor.<br>
- /// `[d0, ...]`
- /// \param arg1 Output that produces the second input tensor.<br>
- /// `[d0, ...]`
- /// \param auto_broadcast Auto broadcast specification
- ///
- /// Output `[d0, ...]`
- ///
- And(const Output<Node>& arg0,
- const Output<Node>& arg1,
- const AutoBroadcastSpec& auto_broadcast = AutoBroadcastSpec());
-
- std::shared_ptr<Node>
- clone_with_new_inputs(const OutputVector& new_args) const override;
- bool visit_attributes(AttributeVisitor& visitor) override;
- virtual bool is_commutative() const override { return true; }
- bool evaluate(const HostTensorVector& outputs,
- const HostTensorVector& inputs) override;
- };
- }
- }
-}
#pragma once
#include "ngraph/ops.hpp"
-#include "op/and.hpp"
-#include "op/atan2.hpp"
#include "op/avg_pool.hpp"
namespace ngraph
#include "ngraph/slice_plan.hpp"
#include "ngraph/type.hpp"
#include "ngraph/validation_util.hpp"
-#include "op/and.hpp"
#include "op/avg_pool.hpp"
#include "opset0_downgrade.hpp"
return op_cast_binary_elementwise_node<op::v0::LessEq, op::v1::LessEqual>(node);
}
- shared_ptr<Node> op_cast(shared_ptr<op::v1::LogicalAnd> node)
- {
- return op_cast_binary_elementwise_node<op::v0::And, op::v1::LogicalAnd>(node);
- }
-
shared_ptr<Node> op_cast(shared_ptr<op::v1::LogicalNot> node)
{
auto replacement_node = make_shared<op::v0::Not>(node->input_value(0));
return op_cast_binary_elementwise_node<op::v0::Maximum, op::v1::Maximum>(node);
}
- shared_ptr<Node> op_cast(shared_ptr<op::v1::MaxPool> node)
- {
- auto const input_arg = node->input_value(0);
- auto ceil_mode = static_cast<bool>(node->get_rounding_type());
- auto pad_type = node->get_auto_pad();
- auto padding_below = node->get_pads_begin();
- auto padding_above = node->get_pads_end();
- auto window_movement_strides = node->get_strides();
- auto window_shape = node->get_kernel();
-
- auto replacement_node = make_shared<op::v0::MaxPool>(input_arg,
- window_shape,
- window_movement_strides,
- padding_below,
- padding_above,
- pad_type,
- ceil_mode);
- replace_node(node, replacement_node);
- return replacement_node;
- }
-
shared_ptr<Node> op_cast(shared_ptr<op::v1::Minimum> node)
{
return op_cast_binary_elementwise_node<op::v0::Minimum, op::v1::Minimum>(node);
NGRAPH_OP(Abs, ngraph::op)
NGRAPH_OP(Acos, ngraph::op)
NGRAPH_OP(Add, ngraph::op)
-NGRAPH_OP(And, ngraph::op::v0)
NGRAPH_OP(Any, ngraph::op)
NGRAPH_OP(Asin, ngraph::op)
NGRAPH_OP(Atan, ngraph::op)
-NGRAPH_OP(Atan2, ngraph::op::v0)
NGRAPH_OP(AvgPool, ngraph::op::v0)
NGRAPH_OP(BatchNormInference, ngraph::op)
NGRAPH_OP(Broadcast, ngraph::op)
NGRAPH_OP(NormalizeL2, ngraph::op)
NGRAPH_OP(Max, ngraph::op)
NGRAPH_OP(Maximum, ngraph::op)
-NGRAPH_OP(MaxPool, ngraph::op)
NGRAPH_OP(Min, ngraph::op)
NGRAPH_OP(Minimum, ngraph::op)
NGRAPH_OP(Multiply, ngraph::op)
#include "ngraph/graph_util.hpp"
#include "ngraph/ops.hpp"
#include "ngraph/provenance.hpp"
-#include "op/and.hpp"
-#include "op/atan2.hpp"
#include "op/avg_pool.hpp"
using namespace std;
return op_cast_binary_elementwise_node<op::v0::Add, op::v1::Add>(node);
}
- shared_ptr<Node> op_cast(shared_ptr<op::v0::And> node)
- {
- return op_cast_binary_elementwise_node<op::v0::And, op::v1::LogicalAnd>(node);
- }
-
shared_ptr<Node> op_cast(shared_ptr<op::Broadcast> node)
{
auto replacement_node = ngraph::builder::opset1::make_broadcast(
return op_cast_binary_elementwise_node<op::v0::Maximum, op::v1::Maximum>(node);
}
- shared_ptr<Node> op_cast(shared_ptr<op::MaxPool> node)
- {
- auto rounding_type =
- node->get_ceil_mode() ? op::RoundingType::CEIL : op::RoundingType::FLOOR;
- auto auto_pad = node->get_pad_type();
- auto pads_begin = node->get_padding_below();
- auto pads_end = node->get_padding_above();
- auto strides = node->get_window_movement_strides();
- auto kernel = node->get_window_shape();
-
- auto replacement_node = make_shared<op::v1::MaxPool>(
- node->input_value(0), strides, pads_begin, pads_end, kernel, rounding_type, auto_pad);
-#if defined(__clang__) && __clang_major__ == 3
- // There are some really by clang 3.9 bugs
- if (node->get_ceil_mode())
- {
- replacement_node->set_rounding_type(op::RoundingType::CEIL);
- }
- else
- {
- replacement_node->set_rounding_type(op::RoundingType::FLOOR);
- }
-#endif
- replace_node(node, replacement_node);
- return replacement_node;
- }
-
shared_ptr<Node> op_cast(shared_ptr<op::Min> node)
{
bool keep_dims = false;
using namespace std;
using namespace ngraph;
-TEST(type_prop, max_pool_1d_deduce)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100});
- Shape window_shape{10};
- auto max_pool = make_shared<op::MaxPool>(param, window_shape);
-
- EXPECT_EQ(max_pool->get_element_type(), element::f32);
- EXPECT_EQ(max_pool->get_shape(), (Shape{64, 3, 91}));
-
- EXPECT_EQ(max_pool->get_window_movement_strides(), Strides{1});
- EXPECT_EQ(max_pool->get_window_shape(), Shape{10});
-}
-
-TEST(type_prop, max_pool_1d_deduce_strided)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100});
- Shape window_shape{10};
- auto move_strides = Strides{2};
- auto max_pool = make_shared<op::MaxPool>(param, window_shape, move_strides);
-
- EXPECT_EQ(max_pool->get_element_type(), element::f32);
- EXPECT_EQ(max_pool->get_shape(), (Shape{64, 3, 46}));
-
- EXPECT_EQ(max_pool->get_window_movement_strides(), Strides{2});
- EXPECT_EQ(max_pool->get_window_shape(), Shape{10});
-}
-
-TEST(type_prop, max_pool_1d_deduce_strided_small_uneven)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{64, 3, 5});
- Shape window_shape{2};
- auto move_strides = Strides{2};
- auto max_pool = make_shared<op::MaxPool>(param, window_shape, move_strides);
-
- EXPECT_EQ(max_pool->get_element_type(), element::f32);
- EXPECT_EQ(max_pool->get_shape(), (Shape{64, 3, 2}));
-
- EXPECT_EQ(max_pool->get_window_movement_strides(), Strides{2});
- EXPECT_EQ(max_pool->get_window_shape(), Shape{2});
-}
-
-TEST(type_prop, max_pool_1d_deduce_strided_small_even)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{64, 3, 6});
- Shape window_shape{2};
- auto move_strides = Strides{2};
- auto max_pool = make_shared<op::MaxPool>(param, window_shape, move_strides);
-
- EXPECT_EQ(max_pool->get_element_type(), element::f32);
- EXPECT_EQ(max_pool->get_shape(), (Shape{64, 3, 3}));
-
- EXPECT_EQ(max_pool->get_window_movement_strides(), Strides{2});
- EXPECT_EQ(max_pool->get_window_shape(), Shape{2});
-}
-
-TEST(type_prop, max_pool_2d_deduce)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100, 150});
- Shape window_shape{10, 20};
- auto max_pool = make_shared<op::MaxPool>(param, window_shape);
-
- EXPECT_EQ(max_pool->get_element_type(), element::f32);
- EXPECT_EQ(max_pool->get_shape(), (Shape{64, 3, 91, 131}));
-
- EXPECT_EQ(max_pool->get_window_movement_strides(), (Strides{1, 1}));
- EXPECT_EQ(max_pool->get_window_shape(), (Shape{10, 20}));
-}
-
-TEST(type_prop, max_pool_2d_deduce_strided)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{64, 3, 100, 150});
- Shape window_shape{10, 20};
- auto move_strides = Strides{2, 3};
- auto max_pool = make_shared<op::MaxPool>(param, window_shape, move_strides);
-
- EXPECT_EQ(max_pool->get_element_type(), element::f32);
- EXPECT_EQ(max_pool->get_shape(), (Shape{64, 3, 46, 44}));
-
- EXPECT_EQ(max_pool->get_window_movement_strides(), (Strides{2, 3}));
- EXPECT_EQ(max_pool->get_window_shape(), (Shape{10, 20}));
-}
-
-TEST(type_prop, max_pool_3d_deduce_strided_small)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{64, 3, 7, 8, 10});
- Shape window_shape{2, 3, 2};
- auto move_strides = Strides{2, 3, 4};
- auto max_pool = make_shared<op::MaxPool>(param, window_shape, move_strides);
-
- EXPECT_EQ(max_pool->get_element_type(), element::f32);
- EXPECT_EQ(max_pool->get_shape(), (Shape{64, 3, 3, 2, 3}));
-
- EXPECT_EQ(max_pool->get_window_movement_strides(), (Strides{2, 3, 4}));
- EXPECT_EQ(max_pool->get_window_shape(), (Shape{2, 3, 2}));
-}
-
-TEST(type_prop, max_pool_ceil_mode)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{64, 3, 10});
- Shape window_shape{2};
- auto move_strides = Strides{4};
- Shape padding_below{4};
- Shape padding_above{5};
- auto max_pool = make_shared<op::MaxPool>(param,
- window_shape,
- move_strides,
- padding_below,
- padding_above,
- op::PadType::EXPLICIT,
- true);
-
- // ceil((10 + 9 - 2)/4) + 1
- EXPECT_EQ(max_pool->get_shape(), (Shape{64, 3, 6}));
-}
-
-TEST(type_prop, max_pool_invalid_0d_input)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{});
- Shape window_shape{};
- try
- {
- auto max_pool = make_shared<op::MaxPool>(param, window_shape);
-
- // Should have thrown, so fail if it didn't
- FAIL() << "Invalid 0D input not detected";
- }
- catch (const NodeValidationFailure& error)
- {
- EXPECT_HAS_SUBSTRING(error.what(), std::string("Data batch must have rank of at least 3"));
- }
- catch (...)
- {
- FAIL() << "Deduced type check failed for unexpected reason";
- }
-}
-
-TEST(type_prop, max_pool_invalid_1d_input)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{2});
- Shape window_shape{};
- try
- {
- auto max_pool = make_shared<op::MaxPool>(param, window_shape);
-
- // Should have thrown, so fail if it didn't
- FAIL() << "Invalid 1D input not detected";
- }
- catch (const NodeValidationFailure& error)
- {
- EXPECT_HAS_SUBSTRING(error.what(), std::string("Data batch must have rank of at least 3"));
- }
- catch (...)
- {
- FAIL() << "Deduced type check failed for unexpected reason";
- }
-}
-
-TEST(type_prop, max_pool_invalid_2d_input)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{2, 6});
- Shape window_shape{};
- try
- {
- auto max_pool = make_shared<op::MaxPool>(param, window_shape);
-
- // Should have thrown, so fail if it didn't
- FAIL() << "Invalid 2D input not detected";
- }
- catch (const NodeValidationFailure& error)
- {
- EXPECT_HAS_SUBSTRING(error.what(), std::string("Data batch must have rank of at least 3"));
- }
- catch (...)
- {
- FAIL() << "Deduced type check failed for unexpected reason";
- }
-}
-
-TEST(type_prop, max_pool_invalid_0_batch_size)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{0, 6, 1});
- Shape window_shape{1};
- try
- {
- auto max_pool = make_shared<op::MaxPool>(param, window_shape);
-
- // Should have thrown, so fail if it didn't
- FAIL() << "Invalid input with 0 batch size not detected";
- }
- catch (const NodeValidationFailure& error)
- {
- EXPECT_HAS_SUBSTRING(error.what(), std::string("Batch size is zero"));
- }
- catch (...)
- {
- FAIL() << "Deduced type check failed for unexpected reason";
- }
-}
-
-TEST(type_prop, max_pool_invalid_0_channels)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{6, 0, 1});
- Shape window_shape{1};
- try
- {
- auto max_pool = make_shared<op::MaxPool>(param, window_shape);
-
- // Should have thrown, so fail if it didn't
- FAIL() << "Invalid input with 0 channels not detected";
- }
- catch (const NodeValidationFailure& error)
- {
- EXPECT_HAS_SUBSTRING(error.what(), std::string("Channel count is zero"));
- }
- catch (...)
- {
- FAIL() << "Deduced type check failed for unexpected reason";
- }
-}
-
-TEST(type_prop, max_pool_invalid_wrong_number_of_window_dimensions_too_many)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 10, 10});
- Shape window_shape{3, 3, 3};
- try
- {
- auto max_pool = make_shared<op::MaxPool>(param, window_shape);
-
- // Should have thrown, so fail if it didn't
- FAIL() << "Invalid input with too many window dimensions not detected";
- }
- catch (const NodeValidationFailure& error)
- {
- EXPECT_HAS_SUBSTRING(
- error.what(),
- std::string("Ranks for data item shape (data batch has shape {6,2,10,10}, so data item "
- "rank is 2), padding below (CoordinateDiff{0, 0, 0}), padding above "
- "(CoordinateDiff{0, 0, 0}), window shape ({3,3,3}), and window strides "
- "(Strides{1, 1, 1}) do not match"));
- }
- catch (...)
- {
- FAIL() << "Deduced type check failed for unexpected reason";
- }
-}
-
-TEST(type_prop, max_pool_invalid_wrong_number_of_window_dimensions_too_few)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 10, 10});
- Shape window_shape{3};
- try
- {
- auto max_pool = make_shared<op::MaxPool>(param, window_shape);
-
- // Should have thrown, so fail if it didn't
- FAIL() << "Invalid input with too few window dimensions not detected";
- }
- catch (const NodeValidationFailure& error)
- {
- EXPECT_HAS_SUBSTRING(
- error.what(),
- std::string("Ranks for data item shape (data batch has shape {6,2,10,10}, so data item "
- "rank is 2), padding below (CoordinateDiff{0}), padding above "
- "(CoordinateDiff{0}), window shape ({3}), and window strides (Strides{1}) "
- "do not match"));
- }
- catch (...)
- {
- FAIL() << "Deduced type check failed for unexpected reason";
- }
-}
-
-TEST(type_prop, max_pool_invalid_movement_stride_rank)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 10, 10});
- Shape window_shape{3, 3};
- auto move_strides = Strides{2, 3, 8};
- try
- {
- auto max_pool = make_shared<op::MaxPool>(param, window_shape, move_strides);
-
- // Should have thrown, so fail if it didn't
- FAIL() << "Invalid input with wrong movement stride rank not detected";
- }
- catch (const NodeValidationFailure& error)
- {
- EXPECT_HAS_SUBSTRING(
- error.what(),
- std::string("Ranks for data item shape (data batch has shape {6,2,10,10}, so data item "
- "rank is 2), padding below (CoordinateDiff{0, 0}), padding above "
- "(CoordinateDiff{0, 0}), window shape ({3,3}), and window strides "
- "(Strides{2, 3, 8}) do not match"));
- }
- catch (...)
- {
- FAIL() << "Deduced type check failed for unexpected reason";
- }
-}
-
-TEST(type_prop, max_pool_invalid_input_data_size_0)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 0, 10});
- Shape window_shape{3, 3};
- try
- {
- auto max_pool = make_shared<op::MaxPool>(param, window_shape);
-
- // Should have thrown, so fail if it didn't
- FAIL() << "Invalid input with zero-length spatial axis not detected";
- }
- catch (const NodeValidationFailure& error)
- {
- EXPECT_HAS_SUBSTRING(error.what(),
- std::string("Data shape after padding and dilation has "
- "dimension less than 1 (dim: 0) at axis 0"));
- }
- catch (...)
- {
- FAIL() << "Deduced type check failed for unexpected reason";
- }
-}
-
-TEST(type_prop, max_pool_invalid_window_size_0)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 10, 10});
- Shape window_shape{3, 0};
- try
- {
- auto max_pool = make_shared<op::MaxPool>(param, window_shape);
-
- // Should have thrown, so fail if it didn't
- FAIL() << "Invalid input with zero-length window axis not detected";
- }
- catch (const NodeValidationFailure& error)
- {
- EXPECT_HAS_SUBSTRING(
- error.what(),
- std::string("Window after dilation has dimension less than 1 (dim: 0) at axis 1"));
- }
- catch (...)
- {
- FAIL() << "Deduced type check failed for unexpected reason";
- }
-}
-
-TEST(type_prop, max_pool_invalid_dilated_too_large)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 8, 8});
- Shape window_shape{9, 9};
- try
- {
- auto max_pool = make_shared<op::MaxPool>(param, window_shape);
-
- // Should have thrown, so fail if it didn't
- FAIL() << "Invalid input with oversized window not detected";
- }
- catch (const NodeValidationFailure& error)
- {
- EXPECT_HAS_SUBSTRING(error.what(),
- std::string("Window after dilation has dimension (dim: 9) larger than "
- "the data shape after padding (dim: 8) at axis 0"));
- }
- catch (...)
- {
- FAIL() << "Deduced type check failed for unexpected reason";
- }
-}
-
-TEST(type_prop, max_pool_invalid_movement_stride_0)
-{
- // Deduce type
- auto param = make_shared<op::Parameter>(element::f32, Shape{6, 2, 10, 10});
- Shape window_shape{3, 3};
- auto move_strides = Strides{0, 1};
- try
- {
- auto max_pool = make_shared<op::MaxPool>(param, window_shape, move_strides);
-
- // Should have thrown, so fail if it didn't
- FAIL() << "Invalid input with 0-length movement stride axis not detected";
- }
- catch (const NodeValidationFailure& error)
- {
- EXPECT_HAS_SUBSTRING(
- error.what(),
- std::string("Window strides (Strides{0, 1}) has zero dimension at axis 0"));
- }
- catch (...)
- {
- FAIL() << "Deduced type check failed for unexpected reason";
- }
-}
-
-TEST(type_prop, max_pool_partial_rank_dynamic_ok)
-{
- PartialShape arg_shape{PartialShape::dynamic()};
- Shape window_shape{2, 3, 4, 5};
- Strides window_movement_strides{1, 1, 1, 1};
- Shape padding_below{0, 0, 0, 0};
- Shape padding_above{0, 0, 0, 0};
-
- auto param = make_shared<op::Parameter>(element::f32, arg_shape);
- auto mp = make_shared<op::MaxPool>(
- param, window_shape, window_movement_strides, padding_below, padding_above);
-
- ASSERT_EQ(mp->get_output_element_type(0), element::f32);
- ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(6)));
-}
-
-TEST(type_prop, max_pool_partial_rank_dynamic_attrib_rank_mismatch)
-{
- PartialShape arg_shape{PartialShape::dynamic()};
- Shape window_shape{2, 3, 4, 5};
- Strides window_movement_strides{1, 1, 1, 1, 1};
- Shape padding_below{0, 0, 0, 0};
- Shape padding_above{0, 0, 0, 0};
-
- auto param = make_shared<op::Parameter>(element::f32, arg_shape);
-
- try
- {
- auto mp = make_shared<op::MaxPool>(
- param, window_shape, window_movement_strides, padding_below, padding_above);
- FAIL() << "Mismatch of attribute ranks not detected";
- }
- catch (const NodeValidationFailure& error)
- {
- EXPECT_HAS_SUBSTRING(
- error.what(),
- std::string("Ranks for data item shape (data batch has shape ?, so data item rank is "
- "?), padding below (CoordinateDiff{0, 0, 0, 0}), padding above "
- "(CoordinateDiff{0, 0, 0, 0}), window shape ({2,3,4,5}), and window "
- "strides (Strides{1, 1, 1, 1, 1}) do not match"));
- }
- catch (...)
- {
- FAIL() << "Deduced type check failed for unexpected reason";
- }
-}
-
-TEST(type_prop, max_pool_partial_rank_static_dynamic_ok)
-{
- PartialShape arg_shape{PartialShape::dynamic(6)};
- Shape window_shape{2, 3, 4, 5};
- Strides window_movement_strides{1, 1, 1, 1};
- Shape padding_below{0, 0, 0, 0};
- Shape padding_above{0, 0, 0, 0};
-
- auto param = make_shared<op::Parameter>(element::f32, arg_shape);
- auto mp = make_shared<op::MaxPool>(
- param, window_shape, window_movement_strides, padding_below, padding_above);
-
- ASSERT_EQ(mp->get_output_element_type(0), element::f32);
- ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(6)));
-}
-
-TEST(type_prop, max_pool_partial_rank_static_dynamic_some_dims_known_ok)
-{
- PartialShape arg_shape{5, Dimension::dynamic(), 8, Dimension::dynamic(), 4, 7};
- Shape window_shape{2, 3, 4, 5};
- Strides window_movement_strides{1, 1, 1, 1};
- Shape padding_below{0, 0, 0, 0};
- Shape padding_above{0, 0, 0, 0};
-
- auto param = make_shared<op::Parameter>(element::f32, arg_shape);
- auto mp = make_shared<op::MaxPool>(
- param, window_shape, window_movement_strides, padding_below, padding_above);
-
- ASSERT_EQ(mp->get_output_element_type(0), element::f32);
- ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(
- PartialShape{5, Dimension::dynamic(), 7, Dimension::dynamic(), 1, 3}));
-}
-
-TEST(type_prop, max_pool_partial_rank_static_dynamic_attrib_rank_mismatch)
-{
- PartialShape arg_shape{5, Dimension::dynamic(), 8, Dimension::dynamic(), 4, 7};
- Shape window_shape{2, 3, 4, 5, 6};
- Strides window_movement_strides{1, 1, 1, 1};
- Shape padding_below{0, 0, 0, 0};
- Shape padding_above{0, 0, 0, 0};
-
- auto param = make_shared<op::Parameter>(element::f32, arg_shape);
-
- try
- {
- auto mp = make_shared<op::MaxPool>(
- param, window_shape, window_movement_strides, padding_below, padding_above);
- FAIL() << "Mismatch of attribute ranks not detected";
- }
- catch (const NodeValidationFailure& error)
- {
- EXPECT_HAS_SUBSTRING(
- error.what(),
- std::string("Ranks for data item shape (data batch has shape {5,?,8,?,4,7}, so data "
- "item rank is 4), padding below (CoordinateDiff{0, 0, 0, 0}), padding "
- "above (CoordinateDiff{0, 0, 0, 0}), window shape ({2,3,4,5,6}), and "
- "window strides (Strides{1, 1, 1, 1}) do not match"));
- }
- catch (...)
- {
- FAIL() << "Deduced type check failed for unexpected reason";
- }
-}
-
-TEST(type_prop, max_pool_partial_rank_static_dynamic_window_not_too_big)
-{
- PartialShape arg_shape{5, Dimension::dynamic(), 8, Dimension::dynamic(), 4, 7};
- Shape window_shape{9, 3, 4, 5};
- Strides window_movement_strides{1, 1, 1, 1};
- Shape padding_below{0, 0, 0, 0};
- Shape padding_above{0, 0, 0, 0};
-
- auto param = make_shared<op::Parameter>(element::f32, arg_shape);
-
- try
- {
- auto mp = make_shared<op::MaxPool>(
- param, window_shape, window_movement_strides, padding_below, padding_above);
- FAIL() << "Oversized window not detected";
- }
- catch (const NodeValidationFailure& error)
- {
- EXPECT_HAS_SUBSTRING(error.what(),
- std::string("Window after dilation has dimension (dim: 9) larger than "
- "the data shape after padding (dim: 8) at axis 0"));
- }
- catch (...)
- {
- FAIL() << "Deduced type check failed for unexpected reason";
- }
-}
-
-TEST(type_prop, max_pool_partial_rank_static_dynamic_padded_window_not_too_big)
-{
- PartialShape arg_shape{5, Dimension::dynamic(), 8, Dimension::dynamic(), 4, 7};
- Shape window_shape{9, 3, 4, 5};
- Strides window_movement_strides{1, 1, 1, 1};
- Shape padding_below{0, 0, 0, 0};
- Shape padding_above{1, 0, 0, 0};
-
- auto param = make_shared<op::Parameter>(element::f32, arg_shape);
- auto mp = make_shared<op::MaxPool>(
- param, window_shape, window_movement_strides, padding_below, padding_above);
-
- ASSERT_EQ(mp->get_output_element_type(0), element::f32);
- ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(
- PartialShape{5, Dimension::dynamic(), 1, Dimension::dynamic(), 1, 3}));
-}
-
TEST(type_prop, max_pool_auto_padding)
{
const PartialShape arg_shape{1, 3, 32, 32};