From 186e00fa2a9fd834b5135f767d84f4bcb46f5194 Mon Sep 17 00:00:00 2001 From: Mateusz Tabaka Date: Wed, 28 Oct 2020 05:12:52 +0100 Subject: [PATCH] Remove obsoleted v0::Product op (#2860) --- .../transformations/algebraic_simplification.cpp | 3 +- ngraph/core/builder/src/builder/reshape.cpp | 1 - ngraph/core/include/ngraph/op/op_version_tbl.hpp | 1 - ngraph/core/include/ngraph/op/product.hpp | 68 ---- ngraph/core/include/ngraph/ops.hpp | 1 - ngraph/core/src/op/product.cpp | 99 ----- .../pass/constant_folding_arithmetic_reduction.cpp | 12 +- ngraph/test/CMakeLists.txt | 1 - ngraph/test/backend/dynamic.in.cpp | 3 +- ngraph/test/backend/product.in.cpp | 430 --------------------- ngraph/test/constant_folding.cpp | 27 -- ngraph/test/op_is.cpp | 4 +- ngraph/test/runtime/interpreter/int_executable.hpp | 1 - ngraph/test/runtime/opset0_tbl.hpp | 1 - ngraph/test/runtime/pass/opset0_downgrade.cpp | 7 - ngraph/test/runtime/pass/opset1_upgrade.cpp | 9 - 16 files changed, 7 insertions(+), 661 deletions(-) delete mode 100644 ngraph/core/include/ngraph/op/product.hpp delete mode 100644 ngraph/core/src/op/product.cpp delete mode 100644 ngraph/test/backend/product.in.cpp diff --git a/inference-engine/tests/functional/inference_engine/transformations/algebraic_simplification.cpp b/inference-engine/tests/functional/inference_engine/transformations/algebraic_simplification.cpp index fdfb659..1e3d5df 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/algebraic_simplification.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/algebraic_simplification.cpp @@ -82,7 +82,8 @@ TEST(algebraic_simplification, multiply_negative_tests) { TEST(algebraic_simplification, multiply_prod_negative) { auto fconst1 = ngraph::op::Constant::create(element::f64, Shape{2}, {1.0, 1.0}); auto broadcast = builder::opset1::make_broadcast(fconst1, Shape{2, 5}, AxisSet{1}); - auto prod_fconst1 = std::make_shared(broadcast, AxisSet{0, 1}); + auto axes = op::Constant::create(element::i64, {2}, {0, 1}); + auto prod_fconst1 = std::make_shared(broadcast, axes); pass::Manager pass_manager; pass_manager.register_pass(); diff --git a/ngraph/core/builder/src/builder/reshape.cpp b/ngraph/core/builder/src/builder/reshape.cpp index 56521ae..cc52942 100644 --- a/ngraph/core/builder/src/builder/reshape.cpp +++ b/ngraph/core/builder/src/builder/reshape.cpp @@ -23,7 +23,6 @@ #include "ngraph/axis_vector.hpp" #include "ngraph/op/concat.hpp" #include "ngraph/op/constant.hpp" -#include "ngraph/op/product.hpp" #include "ngraph/op/reduce_prod.hpp" #include "ngraph/op/reshape.hpp" #include "ngraph/op/shape_of.hpp" diff --git a/ngraph/core/include/ngraph/op/op_version_tbl.hpp b/ngraph/core/include/ngraph/op/op_version_tbl.hpp index ab8a903..0b8effb 100644 --- a/ngraph/core/include/ngraph/op/op_version_tbl.hpp +++ b/ngraph/core/include/ngraph/op/op_version_tbl.hpp @@ -132,7 +132,6 @@ NGRAPH_OP(Power, ngraph::op::v0, 0) NGRAPH_OP(Power, ngraph::op::v1, 1) NGRAPH_OP(PriorBox, ngraph::op::v0, 0) NGRAPH_OP(PriorBoxClustered, ngraph::op::v0, 0) -NGRAPH_OP(Product, ngraph::op::v0, 0) NGRAPH_OP(Proposal, ngraph::op::v0, 0) NGRAPH_OP(Quantize, ngraph::op::v0, 0) NGRAPH_OP(QuantizedConvolution, ngraph::op::v0, 0) diff --git a/ngraph/core/include/ngraph/op/product.hpp b/ngraph/core/include/ngraph/op/product.hpp deleted file mode 100644 index e560f1d..0000000 --- a/ngraph/core/include/ngraph/op/product.hpp +++ /dev/null @@ -1,68 +0,0 @@ -//***************************************************************************** -// Copyright 2017-2020 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** - -#pragma once - -#include "ngraph/op/util/arithmetic_reduction.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief Product reduction operation. - /// - /// Reduces the tensor, eliminating the specified reduction axes by taking the product. - class NGRAPH_DEPRECATED( - "This operation is deprecated and will be removed soon. " - "Use v1::ReduceProd instead of it.") NGRAPH_API Product - : public util::ArithmeticReduction - { - NGRAPH_SUPPRESS_DEPRECATED_START - public: - static constexpr NodeTypeInfo type_info{"Product", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - /// \brief Constructs a product reduction operation. - Product() = default; - /// \brief Constructs a product reduction operation. - /// - /// \param arg The tensor to be reduced. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - Product(const Output& arg, const AxisSet& reduction_axes); - /// \brief Constructs a product reduction operation. - /// - /// \param arg The tensor to be reduced. - /// \param reduction_axes The axis positions (0-based) to be eliminated. - Product(const Output& arg, const Output& reduction_axes); - - /// \return The default value for Product. - virtual std::shared_ptr get_default_value() const override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, - const HostTensorVector& inputs) const override; - NGRAPH_SUPPRESS_DEPRECATED_END - }; - } - // default opset version - NGRAPH_SUPPRESS_DEPRECATED_START - using v0::Product; - NGRAPH_SUPPRESS_DEPRECATED_END - } -} diff --git a/ngraph/core/include/ngraph/ops.hpp b/ngraph/core/include/ngraph/ops.hpp index 6d3983a..45d69bf 100644 --- a/ngraph/core/include/ngraph/ops.hpp +++ b/ngraph/core/include/ngraph/ops.hpp @@ -109,7 +109,6 @@ #include "ngraph/op/prelu.hpp" #include "ngraph/op/prior_box.hpp" #include "ngraph/op/prior_box_clustered.hpp" -#include "ngraph/op/product.hpp" #include "ngraph/op/proposal.hpp" #include "ngraph/op/psroi_pooling.hpp" #include "ngraph/op/quantize.hpp" diff --git a/ngraph/core/src/op/product.cpp b/ngraph/core/src/op/product.cpp deleted file mode 100644 index 9dbd3a0..0000000 --- a/ngraph/core/src/op/product.cpp +++ /dev/null @@ -1,99 +0,0 @@ -//***************************************************************************** -// Copyright 2017-2020 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** - -#include "ngraph/op/product.hpp" -#include "itt.hpp" -#include "ngraph/graph_util.hpp" -#include "ngraph/runtime/host_tensor.hpp" -#include "ngraph/runtime/reference/product.hpp" -#include "ngraph/shape_util.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -using namespace std; -using namespace ngraph; - -constexpr NodeTypeInfo op::v0::Product::type_info; - -op::v0::Product::Product(const Output& arg, const AxisSet& reduction_axes) - : ArithmeticReduction(arg, reduction_axes) -{ - constructor_validate_and_infer_types(); -} - -op::v0::Product::Product(const Output& arg, const Output& reduction_axes) - : ArithmeticReduction(arg, reduction_axes) -{ - constructor_validate_and_infer_types(); -} - -shared_ptr op::v0::Product::clone_with_new_inputs(const OutputVector& new_args) const -{ - check_new_args_count(this, new_args); - return make_shared(new_args.at(0), get_reduction_axes()); -} - -shared_ptr op::v0::Product::get_default_value() const -{ - return ngraph::make_constant_from_string("1", get_element_type(), get_shape()); -} - -namespace product -{ - template - bool evaluate(const HostTensorPtr& arg, - const HostTensorPtr& out, - const AxisSet& axes, - bool keep_dims) - { - out->set_shape(reduce(arg->get_shape(), axes, keep_dims)); - runtime::reference::product( - arg->get_data_ptr(), out->get_data_ptr(), arg->get_shape(), axes, keep_dims); - return true; - } - - bool evaluate_product(const HostTensorPtr& arg, - const HostTensorPtr& out, - const AxisSet& axes, - bool keep_dims) - { - bool rc = true; - switch (arg->get_element_type()) - { - TYPE_CASE(i32)(arg, out, axes, keep_dims); - break; - TYPE_CASE(i64)(arg, out, axes, keep_dims); - break; - TYPE_CASE(u32)(arg, out, axes, keep_dims); - break; - TYPE_CASE(u64)(arg, out, axes, keep_dims); - break; - TYPE_CASE(f16)(arg, out, axes, keep_dims); - break; - TYPE_CASE(f32)(arg, out, axes, keep_dims); - break; - default: rc = false; break; - } - return rc; - } -} - -bool op::v0::Product::evaluate(const HostTensorVector& outputs, - const HostTensorVector& inputs) const -{ - OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Product::evaluate"); - return product::evaluate_product(inputs[0], outputs[0], get_reduction_axes(), false); -} diff --git a/ngraph/core/src/pass/constant_folding_arithmetic_reduction.cpp b/ngraph/core/src/pass/constant_folding_arithmetic_reduction.cpp index 8bab537..8030710 100644 --- a/ngraph/core/src/pass/constant_folding_arithmetic_reduction.cpp +++ b/ngraph/core/src/pass/constant_folding_arithmetic_reduction.cpp @@ -19,7 +19,6 @@ #include "ngraph/op/constant.hpp" #include "ngraph/op/max.hpp" #include "ngraph/op/min.hpp" -#include "ngraph/op/product.hpp" #include "ngraph/op/reduce_mean.hpp" #include "ngraph/op/reduce_prod.hpp" #include "ngraph/op/reduce_sum.hpp" @@ -74,14 +73,6 @@ static shared_ptr constant->get_output_shape(0), reduce_min->get_reduction_axes()); } - else if (auto prod = as_type_ptr(reduction_node)) - { - runtime::reference::product(constant->get_data_ptr(), - data_ptr, - constant->get_output_shape(0), - prod->get_reduction_axes(), - false); - } else if (auto reduce_prod = as_type_ptr(reduction_node)) { runtime::reference::product(constant->get_data_ptr(), @@ -184,8 +175,7 @@ void pass::ConstantFolding::construct_constant_arithmetic_reduction() make_shared(element::i64, Shape{2}, pattern::has_class()); auto is_supported_reduction = [](std::shared_ptr n) { return (pattern::has_class()(n) || pattern::has_class()(n) || - pattern::has_class()(n) || pattern::has_class()(n) || - pattern::has_class()(n) || + pattern::has_class()(n) || pattern::has_class()(n) || pattern::has_class()(n) || pattern::has_class()(n) || pattern::has_class()(n) || diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index d7d2a11..a4d5cdb 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -318,7 +318,6 @@ set(MULTI_TEST_SRC backend/pad.in.cpp backend/parameter_as_output.in.cpp backend/power.in.cpp - backend/product.in.cpp backend/quantize_dequantize.in.cpp backend/quantized_convolution.in.cpp backend/quantized_dot.in.cpp diff --git a/ngraph/test/backend/dynamic.in.cpp b/ngraph/test/backend/dynamic.in.cpp index 906b77b..59cb8de 100644 --- a/ngraph/test/backend/dynamic.in.cpp +++ b/ngraph/test/backend/dynamic.in.cpp @@ -182,7 +182,8 @@ static void to_vector_test(const PartialShape& input_pshape, const std::vector(element::f32, input_pshape); shared_ptr x_new_shape = make_shared(x); - x_new_shape = make_shared(x_new_shape, AxisSet{0}); + auto axes = op::Constant::create(element::i64, {}, {0}); + x_new_shape = make_shared(x_new_shape, axes); x_new_shape = make_shared(x_new_shape, AxisVector{}, Shape{1}); auto x_reshaped = make_shared(x, x_new_shape, true); diff --git a/ngraph/test/backend/product.in.cpp b/ngraph/test/backend/product.in.cpp deleted file mode 100644 index 350dd91..0000000 --- a/ngraph/test/backend/product.in.cpp +++ /dev/null @@ -1,430 +0,0 @@ -//***************************************************************************** -// Copyright 2017-2020 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** - -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "util/all_close.hpp" -#include "util/all_close_f.hpp" -#include "util/known_element_types.hpp" -#include "util/ndarray.hpp" -#include "util/test_control.hpp" -#include "util/test_tools.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START - -using namespace std; -using namespace ngraph; - -static string s_manifest = "${MANIFEST}"; - -// Trivial case with no reduced axes. -NGRAPH_TEST(${BACKEND_NAME}, product_trivial) -{ - Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto f = make_shared(make_shared(A, AxisSet{}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1, 2, 3, 4}), read_vector(result))); -} - -// Failure has been reported at 5D for some reason -NGRAPH_TEST(${BACKEND_NAME}, product_trivial_5d) -{ - Shape shape{2, 2, 2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto f = make_shared(make_shared(A, AxisSet{}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}); - auto result = backend->create_tensor(element::f32, shape); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}), - read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, product_to_scalar) -{ - Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); - copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{24}), read_vector(result))); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE(test::all_close_f((vector{1, 2, 3, 4}), read_vector(a))); -} - -NGRAPH_TEST(${BACKEND_NAME}, product_matrix_columns) -{ - Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{2}; - auto f = make_shared(make_shared(A, AxisSet{0}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{15, 48}), read_vector(result))); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE(test::all_close_f((vector{1, 2, 3, 4, 5, 6}), read_vector(a))); -} - -NGRAPH_TEST(${BACKEND_NAME}, product_matrix_rows) -{ - Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{3}; - auto f = make_shared(make_shared(A, AxisSet{1}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{2, 12, 30}), read_vector(result))); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE(test::all_close_f((vector{1, 2, 3, 4, 5, 6}), read_vector(a))); -} - -NGRAPH_TEST(${BACKEND_NAME}, product_matrix_rows_zero) -{ - Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{3}; - auto f = make_shared(make_shared(A, AxisSet{1}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3, 3, 3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1, 1, 1}), read_vector(result))); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE(test::all_close_f((vector{}), read_vector(a))); -} - -NGRAPH_TEST(${BACKEND_NAME}, product_matrix_cols_zero) -{ - // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). - Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{2}; - auto f = make_shared(make_shared(A, AxisSet{0}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3, 3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1, 1}), read_vector(result))); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE(test::all_close_f((vector{}), read_vector(a))); -} - -NGRAPH_TEST(${BACKEND_NAME}, product_vector_zero) -{ - Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{}; - auto f = make_shared(make_shared(A, AxisSet{0}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1}), read_vector(result))); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE(test::all_close_f((vector{}), read_vector(a))); -} - -NGRAPH_TEST(${BACKEND_NAME}, product_matrix_to_scalar_zero_by_zero) -{ - Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{}; - auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - copy_data(result, vector({3})); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1}), read_vector(result))); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_TRUE(test::all_close_f((vector{}), read_vector(a))); -} - -NGRAPH_TEST(${BACKEND_NAME}, product_3d_to_matrix_most_sig) -{ - Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{3, 3}; - auto f = make_shared(make_shared(A, AxisSet{0}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1 * 10 * 19, - 2 * 11 * 20, - 3 * 12 * 21, - 4 * 13 * 22, - 5 * 14 * 23, - 6 * 15 * 24, - 7 * 16 * 25, - 8 * 17 * 26, - 9 * 18 * 27}), - read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, product_3d_to_matrix_least_sig) -{ - Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{3, 3}; - auto f = make_shared(make_shared(A, AxisSet{2}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1 * 2 * 3, - 4 * 5 * 6, - 7 * 8 * 9, - 10 * 11 * 12, - 13 * 14 * 15, - 16 * 17 * 18, - 19 * 20 * 21, - 22 * 23 * 24, - 25 * 26 * 27}), - read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, product_3d_to_vector) -{ - Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{3}; - auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f( - (vector{1.0f * 10.0f * 19.0f * 4.0f * 13.0f * 22.0f * 7.0f * 16.0f * 25.0f, - 2.0f * 11.0f * 20.0f * 5.0f * 14.0f * 23.0f * 8.0f * 17.0f * 26.0f, - 3.0f * 12.0f * 21.0f * 6.0f * 15.0f * 24.0f * 9.0f * 18.0f * 27.0f}), - read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, product_3d_to_scalar) -{ - Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{}; - auto f = - make_shared(make_shared(A, AxisSet{0, 1, 2}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f(vector{1.0f * 10.0f * 9.0f * 4.0f * 13.0f * 6.0f * 7.0f * - 12.0f * 3.0f * 2.0f * 11.0f * 8.0f * 5.0f * 14.0f * - 5.0f * 8.0f * 11.0f * 2.0f * 3.0f * 12.0f * 7.0f * - 6.0f * 13.0f * 4.0f * 9.0f * 10.0f * 1.0f}, - read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, product_3d_eliminate_zero_dim) -{ - Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); - Shape shape_rt{3, 2}; - auto f = make_shared(make_shared(A, AxisSet{1}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); - copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); - - // Overwrite the initial result vector to make sure we're not just coincidentally getting the - // right value. - copy_data(result, vector{2112, 2112, 2112, 2112, 2112, 2112}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_TRUE(test::all_close_f((vector{1, 1, 1, 1, 1, 1}), read_vector(result))); -} - -NGRAPH_TEST(${BACKEND_NAME}, product_2d_to_scalar_int32) -{ - Shape shape_a{3, 3}; - auto A = make_shared(element::i32, shape_a); - Shape shape_rt{}; - auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); - copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); - auto result = backend->create_tensor(element::i32, shape_rt); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ(vector{1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9}, read_vector(result)); -} - -NGRAPH_TEST(${BACKEND_NAME}, product_to_scalar_int32) -{ - Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); - copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i32, Shape{}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{24}), read_vector(result)); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_EQ((vector{1, 2, 3, 4}), read_vector(a)); -} - -NGRAPH_TEST(${BACKEND_NAME}, product_to_scalar_int8) -{ - Shape shape{2, 2}; - auto A = make_shared(element::i8, shape); - auto f = make_shared(make_shared(A, AxisSet{0, 1}), ParameterVector{A}); - - auto backend = runtime::Backend::create("${BACKEND_NAME}"); - - // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); - copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, Shape{}); - - auto handle = backend->compile(f); - handle->call_with_validate({result}, {a}); - EXPECT_EQ((vector{24}), read_vector(result)); - - // For some reason I'm feeling extra paranoid about making sure reduction doesn't clobber the - // input tensors, so let's do this too. - EXPECT_EQ((vector{1, 2, 3, 4}), read_vector(a)); -} diff --git a/ngraph/test/constant_folding.cpp b/ngraph/test/constant_folding.cpp index 3d49fb2..69aa361 100644 --- a/ngraph/test/constant_folding.cpp +++ b/ngraph/test/constant_folding.cpp @@ -827,33 +827,6 @@ TEST(constant_folding, const_reverse) ASSERT_EQ(values_expected, values_out); } -TEST(constant_folding, const_product) -{ - Shape input_shape{3, 3}; - - vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); - auto convert = make_shared(constant, AxisSet{1}); - convert->set_friendly_name("test"); - auto f = make_shared(convert, ParameterVector{}); - - pass::Manager pass_manager; - pass_manager.register_pass(); - pass_manager.run_passes(f); - - ASSERT_EQ(count_ops_of_type(f), 0); - ASSERT_EQ(count_ops_of_type(f), 1); - - auto new_const = - as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - ASSERT_TRUE(new_const); - ASSERT_EQ(new_const->get_friendly_name(), "test"); - auto values_out = new_const->get_vector(); - - vector values_expected{6, 120, 504}; - ASSERT_EQ(values_expected, values_out); -} - TEST(constant_folding, const_reduceprod) { Shape input_shape{3, 3}; diff --git a/ngraph/test/op_is.cpp b/ngraph/test/op_is.cpp index c6c589c..8112113 100644 --- a/ngraph/test/op_is.cpp +++ b/ngraph/test/op_is.cpp @@ -614,9 +614,9 @@ namespace EXPECT_FALSE(op::is_binary_elementwise_logical(&node)); } - void op_is_Product() + void op_is_ReduceProd() { - op::Product node; + op::v1::ReduceProd node; EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); diff --git a/ngraph/test/runtime/interpreter/int_executable.hpp b/ngraph/test/runtime/interpreter/int_executable.hpp index ec09f55..39037b6 100644 --- a/ngraph/test/runtime/interpreter/int_executable.hpp +++ b/ngraph/test/runtime/interpreter/int_executable.hpp @@ -1445,7 +1445,6 @@ protected: case OP_TYPEID::NotEqual: case OP_TYPEID::Or: case OP_TYPEID::Power: - case OP_TYPEID::Product: case OP_TYPEID::Range: case OP_TYPEID::Reshape: case OP_TYPEID::Result: diff --git a/ngraph/test/runtime/opset0_tbl.hpp b/ngraph/test/runtime/opset0_tbl.hpp index d9c8767..23b817d 100644 --- a/ngraph/test/runtime/opset0_tbl.hpp +++ b/ngraph/test/runtime/opset0_tbl.hpp @@ -108,7 +108,6 @@ NGRAPH_OP(Parameter, ngraph::op) NGRAPH_OP(Power, ngraph::op) NGRAPH_OP(PRelu, ngraph::op) NGRAPH_OP(PriorBox, ngraph::op) -NGRAPH_OP(Product, ngraph::op) NGRAPH_OP(Quantize, ngraph::op) NGRAPH_OP(QuantizedConvolution, ngraph::op) NGRAPH_OP(QuantizedDot, ngraph::op) diff --git a/ngraph/test/runtime/pass/opset0_downgrade.cpp b/ngraph/test/runtime/pass/opset0_downgrade.cpp index 1d7e64e..0396878 100644 --- a/ngraph/test/runtime/pass/opset0_downgrade.cpp +++ b/ngraph/test/runtime/pass/opset0_downgrade.cpp @@ -417,13 +417,6 @@ namespace opset0_downgrade return replacement_node; } - shared_ptr op_cast(shared_ptr node) - { - auto replacement_node = op_cast_reduction_node(node); - replace_node(node, replacement_node); - return replacement_node; - } - shared_ptr op_cast(shared_ptr node) { auto replacement_node = op_cast_reduction_node(node); diff --git a/ngraph/test/runtime/pass/opset1_upgrade.cpp b/ngraph/test/runtime/pass/opset1_upgrade.cpp index 8d852b0..764079e 100644 --- a/ngraph/test/runtime/pass/opset1_upgrade.cpp +++ b/ngraph/test/runtime/pass/opset1_upgrade.cpp @@ -333,15 +333,6 @@ namespace opset1_upgrade return op_cast_binary_elementwise_node(node); } - shared_ptr op_cast(shared_ptr node) - { - bool keep_dims = false; - auto replacement_node = - make_shared(node->input_value(0), node->input_value(1), keep_dims); - replace_node(node, replacement_node); - return replacement_node; - } - shared_ptr op_cast(shared_ptr node) { // creates a Constant node from the v0::Reverse reversed_axes attribute -- 2.7.4