From d8133824b31fcb4cdad0e9690a9e8b05f4b2780c Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 14 Aug 2020 06:27:58 +0300 Subject: [PATCH] Deprecate FusedOp class (#1758) * Deprecate FusedOps * Try to fix windows * Added temp headers --- inference-engine/src/cldnn_engine/cldnn_engine.cpp | 2 +- .../src/mkldnn_plugin/mkldnn_plugin.cpp | 2 +- .../include/transformations/convert_gelu.hpp | 2 +- .../convert_gather_to_gather_ie.hpp | 4 +- .../src/ngraph_ops/hard_sigmoid_ie.cpp | 2 +- .../cnn_network/cnn_ngraph_impl_tests.cpp | 2 +- .../ngraph_depth_to_space_transform_test.cpp | 6 +- .../transformations/ngraph_fq_transpose_test.cpp | 2 +- .../ngraph_mode_decomposition_test.cpp | 2 +- .../layer_transformation.cpp | 2 +- .../layer_transformation.cpp | 2 +- .../depth_to_space_transformation.cpp | 2 +- ngraph/core/include/ngraph/op/clamp.hpp | 72 +++++ ngraph/core/include/ngraph/op/depth_to_space.hpp | 104 ++++++++ ngraph/core/include/ngraph/op/fake_quantize.hpp | 95 +++++++ ngraph/core/include/ngraph/op/fused/clamp.hpp | 51 +--- .../include/ngraph/op/fused/depth_to_space.hpp | 83 +----- .../core/include/ngraph/op/fused/fake_quantize.hpp | 74 +---- ngraph/core/include/ngraph/op/fused/gelu.hpp | 36 +-- ngraph/core/include/ngraph/op/fused/grn.hpp | 42 +-- ngraph/core/include/ngraph/op/fused/gru_cell.hpp | 165 +----------- .../core/include/ngraph/op/fused/hard_sigmoid.hpp | 40 +-- ngraph/core/include/ngraph/op/fused/lstm_cell.hpp | 276 +------------------ .../core/include/ngraph/op/fused/lstm_sequence.hpp | 172 +----------- ngraph/core/include/ngraph/op/fused/matmul.hpp | 49 +--- ngraph/core/include/ngraph/op/fused/mod.hpp | 41 +-- ngraph/core/include/ngraph/op/fused/mvn.hpp | 72 +---- .../core/include/ngraph/op/fused/normalize_l2.hpp | 55 +--- ngraph/core/include/ngraph/op/fused/prelu.hpp | 42 +-- ngraph/core/include/ngraph/op/fused/rnn_cell.hpp | 137 +--------- ngraph/core/include/ngraph/op/fused/selu.hpp | 37 +-- .../include/ngraph/op/fused/shuffle_channels.hpp | 59 +--- .../include/ngraph/op/fused/space_to_depth.hpp | 79 +----- .../include/ngraph/op/fused/squared_difference.hpp | 49 +--- ngraph/core/include/ngraph/op/fused/squeeze.hpp | 35 +-- ngraph/core/include/ngraph/op/fused/unsqueeze.hpp | 36 +-- ngraph/core/include/ngraph/op/gelu.hpp | 57 ++++ .../include/ngraph/op/{fused/stack.hpp => grn.hpp} | 48 ++-- ngraph/core/include/ngraph/op/group_conv.hpp | 4 + ngraph/core/include/ngraph/op/gru_cell.hpp | 186 +++++++++++++ ngraph/core/include/ngraph/op/hard_sigmoid.hpp | 61 +++++ ngraph/core/include/ngraph/op/lstm_cell.hpp | 297 +++++++++++++++++++++ ngraph/core/include/ngraph/op/lstm_sequence.hpp | 193 +++++++++++++ ngraph/core/include/ngraph/op/matmul.hpp | 70 +++++ ngraph/core/include/ngraph/op/mod.hpp | 62 +++++ ngraph/core/include/ngraph/op/mvn.hpp | 93 +++++++ ngraph/core/include/ngraph/op/normalize_l2.hpp | 76 ++++++ ngraph/core/include/ngraph/op/prelu.hpp | 63 +++++ ngraph/core/include/ngraph/op/rnn_cell.hpp | 158 +++++++++++ ngraph/core/include/ngraph/op/selu.hpp | 58 ++++ ngraph/core/include/ngraph/op/shuffle_channels.hpp | 80 ++++++ ngraph/core/include/ngraph/op/space_to_depth.hpp | 100 +++++++ ngraph/core/include/ngraph/op/split.hpp | 4 + .../core/include/ngraph/op/squared_difference.hpp | 70 +++++ ngraph/core/include/ngraph/op/squeeze.hpp | 56 ++++ ngraph/core/include/ngraph/op/tensor_iterator.hpp | 4 + ngraph/core/include/ngraph/op/unsqueeze.hpp | 57 ++++ ngraph/core/include/ngraph/op/util/fused_op.hpp | 5 +- ngraph/core/include/ngraph/ops.hpp | 42 +-- ngraph/core/src/builder/reshape.cpp | 2 +- ngraph/core/src/op/{fused => }/clamp.cpp | 4 +- ngraph/core/src/op/{fused => }/depth_to_space.cpp | 2 + ngraph/core/src/op/{fused => }/fake_quantize.cpp | 2 + ngraph/core/src/op/fused/stack.cpp | 95 ------- ngraph/core/src/op/{fused => }/gelu.cpp | 4 +- ngraph/core/src/op/{fused => }/grn.cpp | 2 + ngraph/core/src/op/group_conv.cpp | 2 + ngraph/core/src/op/{fused => }/gru_cell.cpp | 4 +- ngraph/core/src/op/{fused => }/hard_sigmoid.cpp | 4 +- ngraph/core/src/op/{fused => }/lstm_cell.cpp | 4 +- ngraph/core/src/op/{fused => }/lstm_sequence.cpp | 2 +- ngraph/core/src/op/{fused => }/matmul.cpp | 2 + ngraph/core/src/op/{fused => }/mod.cpp | 4 +- ngraph/core/src/op/{fused => }/mvn.cpp | 2 + ngraph/core/src/op/{fused => }/normalize_l2.cpp | 4 +- ngraph/core/src/op/{fused => }/prelu.cpp | 4 +- ngraph/core/src/op/{fused => }/rnn_cell.cpp | 4 +- ngraph/core/src/op/{fused => }/selu.cpp | 4 +- .../core/src/op/{fused => }/shuffle_channels.cpp | 4 +- ngraph/core/src/op/{fused => }/space_to_depth.cpp | 2 + ngraph/core/src/op/split.cpp | 2 + .../core/src/op/{fused => }/squared_difference.cpp | 4 +- ngraph/core/src/op/{fused => }/squeeze.cpp | 4 +- ngraph/core/src/op/tensor_iterator.cpp | 2 + ngraph/core/src/op/{fused => }/unsqueeze.cpp | 4 +- ngraph/core/src/op/util/activation_functions.cpp | 2 +- ngraph/core/src/op/util/fused_op.cpp | 2 + ngraph/core/src/op/util/op_types.cpp | 2 + ngraph/core/src/op/util/rnn_cell_base.cpp | 2 +- ngraph/core/src/pass/constant_folding_gather.cpp | 2 +- ngraph/core/src/pass/nop_elimination.cpp | 4 +- ngraph/core/src/validation_util.cpp | 4 +- ngraph/frontend/onnx_import/src/op/gemm.cpp | 2 +- ngraph/frontend/onnx_import/src/op/loop.cpp | 2 + ngraph/frontend/onnx_import/src/op/lstm.cpp | 2 +- .../src/op/mean_variance_normalization.cpp | 2 +- ngraph/frontend/onnx_import/src/op/mod.cpp | 2 +- ngraph/frontend/onnx_import/src/op/selu.cpp | 2 +- ngraph/frontend/onnx_import/src/op/squeeze.cpp | 2 +- ngraph/python/tests/__init__.py | 2 +- ngraph/test/attributes.cpp | 2 + ngraph/test/eval.cpp | 4 +- ngraph/test/op_eval/matmul.cpp | 2 +- ngraph/test/op_is.cpp | 2 + ngraph/test/opset1.cpp | 2 + ngraph/test/runtime/op/group_conv.cpp | 2 + ngraph/test/runtime/op/group_conv.hpp | 4 + 107 files changed, 2200 insertions(+), 1800 deletions(-) create mode 100644 ngraph/core/include/ngraph/op/clamp.hpp create mode 100644 ngraph/core/include/ngraph/op/depth_to_space.hpp create mode 100644 ngraph/core/include/ngraph/op/fake_quantize.hpp create mode 100644 ngraph/core/include/ngraph/op/gelu.hpp rename ngraph/core/include/ngraph/op/{fused/stack.hpp => grn.hpp} (54%) create mode 100644 ngraph/core/include/ngraph/op/gru_cell.hpp create mode 100644 ngraph/core/include/ngraph/op/hard_sigmoid.hpp create mode 100644 ngraph/core/include/ngraph/op/lstm_cell.hpp create mode 100644 ngraph/core/include/ngraph/op/lstm_sequence.hpp create mode 100644 ngraph/core/include/ngraph/op/matmul.hpp create mode 100644 ngraph/core/include/ngraph/op/mod.hpp create mode 100644 ngraph/core/include/ngraph/op/mvn.hpp create mode 100644 ngraph/core/include/ngraph/op/normalize_l2.hpp create mode 100644 ngraph/core/include/ngraph/op/prelu.hpp create mode 100644 ngraph/core/include/ngraph/op/rnn_cell.hpp create mode 100644 ngraph/core/include/ngraph/op/selu.hpp create mode 100644 ngraph/core/include/ngraph/op/shuffle_channels.hpp create mode 100644 ngraph/core/include/ngraph/op/space_to_depth.hpp create mode 100644 ngraph/core/include/ngraph/op/squared_difference.hpp create mode 100644 ngraph/core/include/ngraph/op/squeeze.hpp create mode 100644 ngraph/core/include/ngraph/op/unsqueeze.hpp rename ngraph/core/src/op/{fused => }/clamp.cpp (99%) rename ngraph/core/src/op/{fused => }/depth_to_space.cpp (99%) rename ngraph/core/src/op/{fused => }/fake_quantize.cpp (99%) delete mode 100644 ngraph/core/src/op/fused/stack.cpp rename ngraph/core/src/op/{fused => }/gelu.cpp (97%) rename ngraph/core/src/op/{fused => }/grn.cpp (98%) rename ngraph/core/src/op/{fused => }/gru_cell.cpp (99%) rename ngraph/core/src/op/{fused => }/hard_sigmoid.cpp (98%) rename ngraph/core/src/op/{fused => }/lstm_cell.cpp (99%) rename ngraph/core/src/op/{fused => }/lstm_sequence.cpp (99%) rename ngraph/core/src/op/{fused => }/matmul.cpp (99%) rename ngraph/core/src/op/{fused => }/mod.cpp (97%) rename ngraph/core/src/op/{fused => }/mvn.cpp (99%) rename ngraph/core/src/op/{fused => }/normalize_l2.cpp (98%) rename ngraph/core/src/op/{fused => }/prelu.cpp (98%) rename ngraph/core/src/op/{fused => }/rnn_cell.cpp (99%) rename ngraph/core/src/op/{fused => }/selu.cpp (97%) rename ngraph/core/src/op/{fused => }/shuffle_channels.cpp (98%) rename ngraph/core/src/op/{fused => }/space_to_depth.cpp (99%) rename ngraph/core/src/op/{fused => }/squared_difference.cpp (96%) rename ngraph/core/src/op/{fused => }/squeeze.cpp (99%) rename ngraph/core/src/op/{fused => }/unsqueeze.cpp (98%) diff --git a/inference-engine/src/cldnn_engine/cldnn_engine.cpp b/inference-engine/src/cldnn_engine/cldnn_engine.cpp index 050c948..640cb8b 100644 --- a/inference-engine/src/cldnn_engine/cldnn_engine.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_engine.cpp @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp index 66c9b28..d783081 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include #include "ngraph_ops/fully_connected.hpp" diff --git a/inference-engine/src/transformations/include/transformations/convert_gelu.hpp b/inference-engine/src/transformations/include/transformations/convert_gelu.hpp index 67f68cb..b4425f4 100644 --- a/inference-engine/src/transformations/include/transformations/convert_gelu.hpp +++ b/inference-engine/src/transformations/include/transformations/convert_gelu.hpp @@ -11,7 +11,7 @@ #include -#include "ngraph/op/fused/gelu.hpp" +#include "ngraph/op/gelu.hpp" namespace ngraph { namespace pass { diff --git a/inference-engine/src/transformations/include/transformations/convert_opset1_to_legacy/convert_gather_to_gather_ie.hpp b/inference-engine/src/transformations/include/transformations/convert_opset1_to_legacy/convert_gather_to_gather_ie.hpp index 129f580..67735a2 100644 --- a/inference-engine/src/transformations/include/transformations/convert_opset1_to_legacy/convert_gather_to_gather_ie.hpp +++ b/inference-engine/src/transformations/include/transformations/convert_opset1_to_legacy/convert_gather_to_gather_ie.hpp @@ -15,8 +15,8 @@ #include "ngraph/op/gather.hpp" #include "ngraph/op/constant.hpp" -#include "ngraph/op/fused/squeeze.hpp" -#include "ngraph/op/fused/unsqueeze.hpp" +#include "ngraph/op/squeeze.hpp" +#include "ngraph/op/unsqueeze.hpp" namespace ngraph { diff --git a/inference-engine/src/transformations/src/ngraph_ops/hard_sigmoid_ie.cpp b/inference-engine/src/transformations/src/ngraph_ops/hard_sigmoid_ie.cpp index e810432..df32511 100644 --- a/inference-engine/src/transformations/src/ngraph_ops/hard_sigmoid_ie.cpp +++ b/inference-engine/src/transformations/src/ngraph_ops/hard_sigmoid_ie.cpp @@ -7,7 +7,7 @@ #include "ngraph_ops/hard_sigmoid_ie.hpp" -#include "ngraph/op/fused/hard_sigmoid.hpp" +#include "ngraph/op/hard_sigmoid.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/multiply.hpp" diff --git a/inference-engine/tests/functional/inference_engine/cnn_network/cnn_ngraph_impl_tests.cpp b/inference-engine/tests/functional/inference_engine/cnn_network/cnn_ngraph_impl_tests.cpp index 777e2c9..e9aea78 100644 --- a/inference-engine/tests/functional/inference_engine/cnn_network/cnn_ngraph_impl_tests.cpp +++ b/inference-engine/tests/functional/inference_engine/cnn_network/cnn_ngraph_impl_tests.cpp @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include "common_test_utils/file_utils.hpp" diff --git a/inference-engine/tests/functional/inference_engine/transformations/ngraph_depth_to_space_transform_test.cpp b/inference-engine/tests/functional/inference_engine/transformations/ngraph_depth_to_space_transform_test.cpp index 691e19d..5132c5d 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/ngraph_depth_to_space_transform_test.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/ngraph_depth_to_space_transform_test.cpp @@ -12,8 +12,8 @@ #include #include -#include -#include +#include +#include #include #include #include @@ -180,4 +180,4 @@ TEST(TransformationTests, TestSpaceToDepthTransformDepthFirst) { std::vector shape_end_value = shape_end->get_vector(); std::vector shape_end_value_ref{1, 12 * 4, 1080 / 2, 1616 / 2}; ASSERT_EQ(shape_end_value, shape_end_value_ref); -} \ No newline at end of file +} diff --git a/inference-engine/tests/functional/inference_engine/transformations/ngraph_fq_transpose_test.cpp b/inference-engine/tests/functional/inference_engine/transformations/ngraph_fq_transpose_test.cpp index 5bbe606..a8cf29b 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/ngraph_fq_transpose_test.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/ngraph_fq_transpose_test.cpp @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/inference-engine/tests/functional/inference_engine/transformations/ngraph_mode_decomposition_test.cpp b/inference-engine/tests/functional/inference_engine/transformations/ngraph_mode_decomposition_test.cpp index 277b708..81f1d5d 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/ngraph_mode_decomposition_test.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/ngraph_mode_decomposition_test.cpp @@ -13,7 +13,7 @@ #include #include -#include +#include #include #include #include diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp index 99128d4..31f5f5d 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include "ngraph_functions/pass/convert_prc.hpp" diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp index d6e5668..b9d608d 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include "ngraph_functions/pass/convert_prc.hpp" diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp index e2375f4..98a48e2 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp @@ -25,7 +25,7 @@ #include #include #include -#include +#include namespace LayerTestsDefinitions { diff --git a/ngraph/core/include/ngraph/op/clamp.hpp b/ngraph/core/include/ngraph/op/clamp.hpp new file mode 100644 index 0000000..06d5701 --- /dev/null +++ b/ngraph/core/include/ngraph/op/clamp.hpp @@ -0,0 +1,72 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "ngraph/node.hpp" +#include "ngraph/op/op.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// \brief Performs a clipping operation on all elements of the input node + /// + /// All input values that are outside of the range are set to 'min' or 'max' + /// depending on which side of the range they are. The values that fall into + /// this range remain unchanged. + class NGRAPH_API Clamp : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"Clamp", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + Clamp() = default; + /// \brief Constructs a Clamp node. + /// + /// \param data - Node producing the input tensor + /// \param min - the lower bound of the range + /// \param max - the upper bound of the range + Clamp(const Output& data, const double min, const double max); + + void pre_validate_and_infer_types() override; + + virtual OutputVector decompose_op() const override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + bool visit_attributes(AttributeVisitor& visitor) override; + + double get_min() const { return m_min; } + double get_max() const { return m_max; } + bool evaluate(const HostTensorVector& outputs, + const HostTensorVector& inputs) const override; + + private: + double m_min; + double m_max; + }; + } + using v0::Clamp; + } +} + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/depth_to_space.hpp b/ngraph/core/include/ngraph/op/depth_to_space.hpp new file mode 100644 index 0000000..a1a4c9f --- /dev/null +++ b/ngraph/core/include/ngraph/op/depth_to_space.hpp @@ -0,0 +1,104 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "ngraph/node.hpp" +#include "ngraph/op/op.hpp" +#include "ngraph/op/util/attr_types.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// \brief DepthToSpace permutes data from the depth dimension of the input blob into + /// spatial dimensions. + /// + /// \note Values from the depth dimension (assuming NCHW layout) are moved in + /// spatial blocks to the height and width dimensions. + /// + /// Output node produces a tensor with shape: + /// [N, C/(blocksize * blocksize), H * blocksize, W * blocksize] + class NGRAPH_API DepthToSpace : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"DepthToSpace", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + enum class DepthToSpaceMode + { + // The input depth is divided to [block_size, ..., block_size, new_depth] + BLOCKS_FIRST, + // The input depth is divided to [new_depth, block_size, ..., block_size] + DEPTH_FIRST + }; + + DepthToSpace() = default; + /// \brief Constructs a DepthToSpace operation. + /// + /// \param data Node producing the input tensor + /// \param mode Specifies how the input depth dimension is split to block + /// coordinates + /// \param block_size The size of the block of values to be moved + DepthToSpace(const Output& data, + const DepthToSpaceMode& mode, + std::size_t block_size = 1); + + DepthToSpace(const Output& data, + const std::string& mode, + std::size_t block_size = 1); + bool visit_attributes(AttributeVisitor& visitor) override; + + std::size_t get_block_size() const { return m_blocksize; } + DepthToSpaceMode get_mode() const { return m_mode; } + virtual OutputVector decompose_op() const override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + protected: + std::size_t m_blocksize; + DepthToSpaceMode m_mode; + DepthToSpaceMode mode_from_string(const std::string& mode) const; + }; + } + using v0::DepthToSpace; + } + + NGRAPH_API + std::ostream& operator<<(std::ostream& s, const op::v0::DepthToSpace::DepthToSpaceMode& type); + + template <> + class NGRAPH_API AttributeAdapter + : public EnumAttributeAdapterBase + { + public: + AttributeAdapter(op::v0::DepthToSpace::DepthToSpaceMode& value) + : EnumAttributeAdapterBase(value) + { + } + + static constexpr DiscreteTypeInfo type_info{ + "AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { return type_info; } + }; +} + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/fake_quantize.hpp b/ngraph/core/include/ngraph/op/fake_quantize.hpp new file mode 100644 index 0000000..081c9ab --- /dev/null +++ b/ngraph/core/include/ngraph/op/fake_quantize.hpp @@ -0,0 +1,95 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "ngraph/node.hpp" +#include "ngraph/op/util/attr_types.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// + /// \brief Class performing element-wise linear quantization. + /// + /// \note Input floating point values are quantized into a discrete + /// set of floating point values. + /// + /// \paragraph Implementation This class creates a node which performs the following + /// operation: + /// + /// round((data - input_low) / (input_high - input_low) * (levels-1)) / + /// (levels-1) * (output_high - output_low) + output_low + /// + /// + class NGRAPH_API FakeQuantize : public ngraph::op::util::FusedOp + { + public: + NGRAPH_RTTI_DECLARATION; + + FakeQuantize() = default; + /// + /// \brief Constructs a FakeQuantize operation node. + /// + /// \param[in] data The input data tensor. + /// \param[in] input_low The minimum limit for input values. + /// \param[in] input_high The maximum limit for input values. + /// \param[in] output_low The minimum quantized value. + /// \param[in] output_high The maximum quantized value. + /// \param[in] levels The number of quantization levels. + /// \param[in] auto_broadcast AutoBroadcast mode to be used for broadcasting + /// limit values + /// + FakeQuantize(const Output& data, + const Output& input_low, + const Output& input_high, + const Output& output_low, + const Output& output_high, + std::size_t levels, + const AutoBroadcastSpec& auto_broadcast = + AutoBroadcastSpec(AutoBroadcastType::NUMPY)); + + bool visit_attributes(AttributeVisitor& visitor) override; + virtual OutputVector decompose_op() const override; + virtual void validate_and_infer_types() override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + std::size_t get_levels() const { return m_levels; } + void set_levels(std::size_t levels) { m_levels = levels; } + const AutoBroadcastSpec& get_auto_broadcast() const { return m_auto_broadcast; } + void set_auto_broadcast(const AutoBroadcastSpec& auto_broadcast) + { + m_auto_broadcast = auto_broadcast; + } + + private: + std::size_t m_levels; + AutoBroadcastSpec m_auto_broadcast; + }; + } + using v0::FakeQuantize; + } +} + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/fused/clamp.hpp b/ngraph/core/include/ngraph/op/fused/clamp.hpp index 0a1807e..24b1ce7 100644 --- a/ngraph/core/include/ngraph/op/fused/clamp.hpp +++ b/ngraph/core/include/ngraph/op/fused/clamp.hpp @@ -16,53 +16,4 @@ #pragma once -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief Performs a clipping operation on all elements of the input node - /// - /// All input values that are outside of the range are set to 'min' or 'max' - /// depending on which side of the range they are. The values that fall into - /// this range remain unchanged. - class NGRAPH_API Clamp : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"Clamp", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - Clamp() = default; - /// \brief Constructs a Clamp node. - /// - /// \param data - Node producing the input tensor - /// \param min - the lower bound of the range - /// \param max - the upper bound of the range - Clamp(const Output& data, const double min, const double max); - - void pre_validate_and_infer_types() override; - - virtual OutputVector decompose_op() const override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - bool visit_attributes(AttributeVisitor& visitor) override; - - double get_min() const { return m_min; } - double get_max() const { return m_max; } - bool evaluate(const HostTensorVector& outputs, - const HostTensorVector& inputs) const override; - - private: - double m_min; - double m_max; - }; - } - using v0::Clamp; - } -} +#include "ngraph/op/clamp.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/depth_to_space.hpp b/ngraph/core/include/ngraph/op/fused/depth_to_space.hpp index 74d90c8..f8396b4 100644 --- a/ngraph/core/include/ngraph/op/fused/depth_to_space.hpp +++ b/ngraph/core/include/ngraph/op/fused/depth_to_space.hpp @@ -16,85 +16,4 @@ #pragma once -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief DepthToSpace permutes data from the depth dimension of the input blob into - /// spatial dimensions. - /// - /// \note Values from the depth dimension (assuming NCHW layout) are moved in - /// spatial blocks to the height and width dimensions. - /// - /// Output node produces a tensor with shape: - /// [N, C/(blocksize * blocksize), H * blocksize, W * blocksize] - class NGRAPH_API DepthToSpace : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"DepthToSpace", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - enum class DepthToSpaceMode - { - // The input depth is divided to [block_size, ..., block_size, new_depth] - BLOCKS_FIRST, - // The input depth is divided to [new_depth, block_size, ..., block_size] - DEPTH_FIRST - }; - - DepthToSpace() = default; - /// \brief Constructs a DepthToSpace operation. - /// - /// \param data Node producing the input tensor - /// \param mode Specifies how the input depth dimension is split to block - /// coordinates - /// \param block_size The size of the block of values to be moved - DepthToSpace(const Output& data, - const DepthToSpaceMode& mode, - std::size_t block_size = 1); - - DepthToSpace(const Output& data, - const std::string& mode, - std::size_t block_size = 1); - bool visit_attributes(AttributeVisitor& visitor) override; - - std::size_t get_block_size() const { return m_blocksize; } - DepthToSpaceMode get_mode() const { return m_mode; } - virtual OutputVector decompose_op() const override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - protected: - std::size_t m_blocksize; - DepthToSpaceMode m_mode; - DepthToSpaceMode mode_from_string(const std::string& mode) const; - }; - } - using v0::DepthToSpace; - } - - NGRAPH_API - std::ostream& operator<<(std::ostream& s, const op::v0::DepthToSpace::DepthToSpaceMode& type); - - template <> - class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase - { - public: - AttributeAdapter(op::v0::DepthToSpace::DepthToSpaceMode& value) - : EnumAttributeAdapterBase(value) - { - } - - static constexpr DiscreteTypeInfo type_info{ - "AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { return type_info; } - }; -} +#include "ngraph/op/depth_to_space.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/fake_quantize.hpp b/ngraph/core/include/ngraph/op/fused/fake_quantize.hpp index 6f128dd..5d780e0 100644 --- a/ngraph/core/include/ngraph/op/fused/fake_quantize.hpp +++ b/ngraph/core/include/ngraph/op/fused/fake_quantize.hpp @@ -16,76 +16,4 @@ #pragma once -#include "ngraph/node.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// - /// \brief Class performing element-wise linear quantization. - /// - /// \note Input floating point values are quantized into a discrete - /// set of floating point values. - /// - /// \paragraph Implementation This class creates a node which performs the following - /// operation: - /// - /// round((data - input_low) / (input_high - input_low) * (levels-1)) / - /// (levels-1) * (output_high - output_low) + output_low - /// - /// - class NGRAPH_API FakeQuantize : public ngraph::op::util::FusedOp - { - public: - NGRAPH_RTTI_DECLARATION; - - FakeQuantize() = default; - /// - /// \brief Constructs a FakeQuantize operation node. - /// - /// \param[in] data The input data tensor. - /// \param[in] input_low The minimum limit for input values. - /// \param[in] input_high The maximum limit for input values. - /// \param[in] output_low The minimum quantized value. - /// \param[in] output_high The maximum quantized value. - /// \param[in] levels The number of quantization levels. - /// \param[in] auto_broadcast AutoBroadcast mode to be used for broadcasting - /// limit values - /// - FakeQuantize(const Output& data, - const Output& input_low, - const Output& input_high, - const Output& output_low, - const Output& output_high, - std::size_t levels, - const AutoBroadcastSpec& auto_broadcast = - AutoBroadcastSpec(AutoBroadcastType::NUMPY)); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual OutputVector decompose_op() const override; - virtual void validate_and_infer_types() override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - std::size_t get_levels() const { return m_levels; } - void set_levels(std::size_t levels) { m_levels = levels; } - const AutoBroadcastSpec& get_auto_broadcast() const { return m_auto_broadcast; } - void set_auto_broadcast(const AutoBroadcastSpec& auto_broadcast) - { - m_auto_broadcast = auto_broadcast; - } - - private: - std::size_t m_levels; - AutoBroadcastSpec m_auto_broadcast; - }; - } - using v0::FakeQuantize; - } -} +#include "ngraph/op/fake_quantize.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/gelu.hpp b/ngraph/core/include/ngraph/op/fused/gelu.hpp index ed6a50b..606297d 100644 --- a/ngraph/core/include/ngraph/op/fused/gelu.hpp +++ b/ngraph/core/include/ngraph/op/fused/gelu.hpp @@ -16,38 +16,4 @@ #pragma once -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief Gaussian Error Linear Unit - /// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) ) - class NGRAPH_API Gelu : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"Gelu", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - Gelu() = default; - /// \brief Constructs an Gelu operation. - /// - /// \param data Input tensor - Gelu(const Output& data); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual OutputVector decompose_op() const override; - - void pre_validate_and_infer_types() override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - }; - } - using v0::Gelu; - } -} +#include "ngraph/op/gelu.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/grn.hpp b/ngraph/core/include/ngraph/op/fused/grn.hpp index 1d5e3f3..634db49 100644 --- a/ngraph/core/include/ngraph/op/fused/grn.hpp +++ b/ngraph/core/include/ngraph/op/fused/grn.hpp @@ -16,44 +16,4 @@ #pragma once -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief Global Response Normalization with L2 norm (across channels only). - /// - class NGRAPH_API GRN : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"GRN", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - GRN() = default; - /// \brief Constructs a GRN operation. - /// - /// \param data - Node producing the input tensor - /// \param bias - The bias added to the variance. - /// - GRN(const Output& data, float bias); - - bool visit_attributes(AttributeVisitor& visitor) override; - float get_bias() const { return m_bias; } - virtual void pre_validate_and_infer_types() override; - virtual OutputVector decompose_op() const override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - protected: - float m_bias = 1.0f; - }; - } - using v0::GRN; - } -} +#include "ngraph/op/grn.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/gru_cell.hpp b/ngraph/core/include/ngraph/op/fused/gru_cell.hpp index 969f761..9936854 100644 --- a/ngraph/core/include/ngraph/op/fused/gru_cell.hpp +++ b/ngraph/core/include/ngraph/op/fused/gru_cell.hpp @@ -16,167 +16,4 @@ #pragma once -#include -#include -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/util/activation_functions.hpp" -#include "ngraph/op/util/fused_op.hpp" -#include "ngraph/op/util/rnn_cell_base.hpp" - -namespace ngraph -{ - namespace op - { - namespace v3 - { - /// - /// \brief Class for GRU cell node. - /// - /// \note It follows notation and equations defined as in ONNX standard: - /// https://github.com/onnx/onnx/blob/master/docs/Operators.md#GRU - /// - /// Note this class represents only single *cell* and not whole GRU *layer*. - /// - class NGRAPH_API GRUCell : public util::FusedOp, public util::RNNCellBase - { - public: - static constexpr NodeTypeInfo type_info{"GRUCell", 3}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - GRUCell(); - /// - /// \brief Constructs GRUCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: - /// [gates_count * hidden_size, input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [gates_count * hidden_size, hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// - GRUCell(const Output& X, - const Output& initial_hidden_state, - const Output& W, - const Output& R, - std::size_t hidden_size); - - /// - /// \brief Constructs GRUCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: - /// [gates_count * hidden_size, input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [gates_count * hidden_size, hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// - GRUCell(const Output& X, - const Output& initial_hidden_state, - const Output& W, - const Output& R, - std::size_t hidden_size, - const std::vector& activations, - const std::vector& activations_alpha, - const std::vector& activations_beta, - float clip, - bool linear_before_reset); - - /// - /// \brief Constructs GRUCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: [gates_count * - /// hidden_size, input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [gates_count * hidden_size, hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] B The sum of biases (weight and recurrence) for - /// update, reset and hidden gates. - /// If linear_before_reset := true then biases for - /// hidden gates are - /// placed separately (weight and recurrence). - /// Shape: [gates_count * hidden_size] if - /// linear_before_reset := false - /// Shape: [(gates_count + 1) * hidden_size] if - /// linear_before_reset := true - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// \param[in] linear_before_reset Whether or not to apply the linear - /// transformation before multiplying by the - /// output of the reset gate. - /// - GRUCell(const Output& X, - const Output& initial_hidden_state, - const Output& W, - const Output& R, - const Output& B, - std::size_t hidden_size, - const std::vector& activations = - std::vector{"sigmoid", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f, - bool linear_before_reset = false); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual void pre_validate_and_infer_types() override; - virtual OutputVector decompose_op() const override; - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - bool get_linear_before_reset() const { return m_linear_before_reset; } - private: - /// brief Add and initialize bias input to all zeros. - void add_default_bias_input(); - - /// - /// \brief The Activation function f. - /// - util::ActivationFunction m_activation_f; - /// - /// \brief The Activation function g. - /// - util::ActivationFunction m_activation_g; - - static constexpr std::size_t s_gates_count{3}; - /// - /// \brief Control whether or not apply the linear transformation. - /// - /// \note The linear transformation may be applied when computing the output of - /// hidden gate. It's done before multiplying by the output of the reset gate. - /// - bool m_linear_before_reset; - }; - } - using v3::GRUCell; - } -} +#include "ngraph/op/gru_cell.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/hard_sigmoid.hpp b/ngraph/core/include/ngraph/op/fused/hard_sigmoid.hpp index f81d1c7..1cf8c9c 100644 --- a/ngraph/core/include/ngraph/op/fused/hard_sigmoid.hpp +++ b/ngraph/core/include/ngraph/op/fused/hard_sigmoid.hpp @@ -16,42 +16,4 @@ #pragma once -#include "ngraph/node.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief Parameterized, bounded sigmoid-like, piecewise linear - /// function. min(max(alpha*x + beta, 0), 1) - /// - class NGRAPH_API HardSigmoid : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"HardSigmoid", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - HardSigmoid() = default; - - /// \brief Constructs a HardSigmoid operation. - /// - /// \param data Input tensor. - /// \param[in] alpha A scalar value representing the alpha parameter. - /// \param[in] beta A scalar value representing the beta parameter. - /// - HardSigmoid(const Output& data, - const Output& alpha, - const Output& beta); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual void pre_validate_and_infer_types() override; - virtual OutputVector decompose_op() const override; - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - }; - } - using v0::HardSigmoid; - } -} +#include "ngraph/op/hard_sigmoid.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/lstm_cell.hpp b/ngraph/core/include/ngraph/op/fused/lstm_cell.hpp index 0b9151f..10c5edb 100644 --- a/ngraph/core/include/ngraph/op/fused/lstm_cell.hpp +++ b/ngraph/core/include/ngraph/op/fused/lstm_cell.hpp @@ -16,278 +16,4 @@ #pragma once -#include -#include -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/util/activation_functions.hpp" -#include "ngraph/op/util/fused_op.hpp" -#include "ngraph/op/util/rnn_cell_base.hpp" - -namespace ngraph -{ - namespace op - { - enum class LSTMWeightsFormat - { - FICO, // IE - ICOF, // PyTorch - IFCO, // DNNL, TF, MxNet - IFOC, // Caffe - IOFC, // ONNX - }; - - namespace v0 - { - /// - /// \brief Class for single lstm cell node. - /// - /// \note Following implementation supports: - /// \li \c peepholes Gers & Schmidhuber (2000) - /// https://ieeexplore.ieee.org/document/861302 - /// \li Coupling input and forget gates. - /// - /// \note It calculates following equations: - /// - /// it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) - /// ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) - /// ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) - /// Ct = ft (.) Ct-1 + it (.) ct - /// ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) - /// Ht = ot (.) h(Ct) - /// - /// * - Is a dot product, - /// (.) - is a Hadamard product (element-wise), - /// f, g, h - are activation functions. - /// - /// \note This class represents only single *cell* (for current time step) and not - /// the whole LSTM Sequence layer - /// - /// \sa LSTMSequence, RNNCell, GRUCell - /// - class NGRAPH_API LSTMCell : public util::FusedOp, public util::RNNCellBase - { - public: - static constexpr NodeTypeInfo type_info{"LSTMCell", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - LSTMCell(); - /// - /// \brief Constructs LSTMCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] initial_cell_state The cell state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The gate weights tensor with shape: - /// [4*hidden_size, input_size]. - /// \param[in] R The recurrence weights tensor with shape: - /// [4*hidden_size, hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] weights_format The order of gates in weights tensors. The - /// default format is IFCO since it is used by - /// DNNL. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// \param[in] input_forget Controls coupling input and forget gates. - /// - LSTMCell(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& W, - const Output& R, - std::size_t hidden_size, - LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, - const std::vector& activations = - std::vector{"sigmoid", "tanh", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f, - bool input_forget = false); - - /// - /// \brief Constructs LSTMCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] initial_cell_state The cell state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: [4*hidden_size, - /// input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [4*hidden_size, hidden_size]. - /// \param[in] B The bias tensor for gates with shape: - /// [4*hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] weights_format The order of gates in weights tensors. The - /// default format is IFCO since it is used by - /// DNNL. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// \param[in] input_forget Controls coupling input and forget gates. - /// - LSTMCell(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& W, - const Output& R, - const Output& B, - std::size_t hidden_size, - LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, - const std::vector& activations = - std::vector{"sigmoid", "tanh", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f, - bool input_forget = false); - - /// - /// \brief Constructs LSTMCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] initial_cell_state The cell state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: [4*hidden_size, - /// input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [4*hidden_size, hidden_size]. - /// \param[in] B The bias tensor for gates with shape: - /// [4*hidden_size]. - /// \param[in] P The weight tensor for peepholes with shape: - /// [3*hidden_size] - 3 equals to only iof gates. - /// The order is: input, output, forget gates. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] weights_format The order of gates in weights tensors. The - /// default format is IFCO since it is used by - /// DNNL. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// \param[in] input_forget Controls coupling input and forget gates. - /// - LSTMCell(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& W, - const Output& R, - const Output& B, - const Output& P, - std::size_t hidden_size, - LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, - const std::vector& activations = - std::vector{"sigmoid", "tanh", "tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f, - bool input_forget = false); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual void pre_validate_and_infer_types() override; - virtual OutputVector decompose_op() const override; - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - bool get_input_forget() const { return m_input_forget; } - LSTMWeightsFormat get_weights_format() const { return m_weights_format; } - /// - /// \brief Change data format of provided node into IFCO. - /// - /// \node The IFCO format was chosen because it's default DNNL format. - /// - /// \param[in] node The input node to be permuted. - /// - /// \return Node representing reshaped tensor according to IFCO weights format. - /// - std::shared_ptr convert_node_format(const Output& node) const; - - private: - /// - /// \brief Creates the default bias input initialized with zeros. - /// - /// \return The object of Output class. - /// - Output get_default_bias_input() const; - - /// - /// \brief Creates the default peepholes input initialized with zeros. - /// - /// \return The object of Output class. - /// - Output get_default_peepholes_input() const; - /// - /// \brief The Activation function f. - /// - util::ActivationFunction m_activation_f; - /// - /// \brief The Activation function g. - /// - util::ActivationFunction m_activation_g; - /// - /// \brief The Activation function h. - /// - util::ActivationFunction m_activation_h; - /// - /// \brief Controls whether to couple input and forget gates. - /// - bool m_input_forget = false; - - /// - /// \brief The order of gates in weights tensors. - /// - LSTMWeightsFormat m_weights_format; - - static constexpr std::size_t s_gates_count{4}; - static constexpr std::size_t s_peepholes_count{3}; - }; - } - using v0::LSTMCell; - } // namespace op - - NGRAPH_API - std::ostream& operator<<(std::ostream& s, const op::LSTMWeightsFormat& type); - - template <> - class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase - { - public: - AttributeAdapter(op::LSTMWeightsFormat& value) - : EnumAttributeAdapterBase(value) - { - } - - static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; - const DiscreteTypeInfo& get_type_info() const override { return type_info; } - }; -} // namespace ngraph +#include "ngraph/op/lstm_cell.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/lstm_sequence.hpp b/ngraph/core/include/ngraph/op/fused/lstm_sequence.hpp index b121a06..8456b88 100644 --- a/ngraph/core/include/ngraph/op/fused/lstm_sequence.hpp +++ b/ngraph/core/include/ngraph/op/fused/lstm_sequence.hpp @@ -16,174 +16,4 @@ #pragma once -#include -#include -#include -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/fused/lstm_cell.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// - /// \brief Class for lstm sequence node. - /// - /// \note It follows notation and equations defined as in ONNX standard: - /// https://github.com/onnx/onnx/blob/master/docs/Operators.md#LSTM - /// - /// \sa LSTMCell, RNNCell, GRUCell - /// - /// - class NGRAPH_API LSTMSequence : public util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"LSTMSequence", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - LSTMSequence() = default; - - using direction = RecurrentSequenceDirection; - - size_t get_default_output_index() const override { return no_default_index(); } - explicit LSTMSequence(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& sequence_lengths, - const Output& W, - const Output& R, - const Output& B, - const Output& P, - const std::int64_t hidden_size, - const direction lstm_direction, - LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, - const std::vector activations_alpha = {}, - const std::vector activations_beta = {}, - const std::vector activations = {"sigmoid", - "tanh", - "tanh"}, - const float clip_threshold = 0, - const bool input_forget = false) - : FusedOp({X, - initial_hidden_state, - initial_cell_state, - sequence_lengths, - W, - R, - B, - P}) - , m_activations_alpha(activations_alpha) - , m_activations_beta(activations_beta) - , m_activations(activations) - , m_clip_threshold(clip_threshold) - , m_direction(lstm_direction) - , m_hidden_size(hidden_size) - , m_input_forget(input_forget) - , m_weights_format(weights_format) - { - constructor_validate_and_infer_types(); - } - - explicit LSTMSequence(const Output& X, - const Output& initial_hidden_state, - const Output& initial_cell_state, - const Output& sequence_lengths, - const Output& W, - const Output& R, - const Output& B, - const std::int64_t hidden_size, - const direction lstm_direction, - LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, - const std::vector activations_alpha = {}, - const std::vector activations_beta = {}, - const std::vector activations = {"sigmoid", - "tanh", - "tanh"}, - const float clip_threshold = 0, - const bool input_forget = false) - : LSTMSequence( - X, - initial_hidden_state, - initial_cell_state, - sequence_lengths, - W, - R, - B, - Constant::create( - element::f32, - Shape{(lstm_direction == direction::BIDIRECTIONAL ? 2UL : 1UL), - 3UL * static_cast(hidden_size)}, - std::vector{0.f}), - hidden_size, - lstm_direction, - weights_format, - activations_alpha, - activations_beta, - activations, - clip_threshold, - input_forget) - { - } - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual OutputVector decompose_op() const override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - std::vector get_activations_alpha() const { return m_activations_alpha; } - std::vector get_activations_beta() const { return m_activations_beta; } - std::vector get_activations() const { return m_activations; } - float get_clip_threshold() const { return m_clip_threshold; } - direction get_direction() const { return m_direction; } - std::int64_t get_hidden_size() const { return m_hidden_size; } - bool get_input_forget() const { return m_input_forget; } - LSTMWeightsFormat get_weights_format() const { return m_weights_format; } - private: - /// - /// \brief Gets the masked value according to sequence lenght in a batch. - /// - /// \note Zeros out values or sets them to default value for inputs with - /// sequence lenght shorter than currently procssed time step. - /// - /// \param[in] data The input value. - /// \param[in] time_step The current time step denoting sequence lenght. - /// \param[in] batch_axis The batch axis index of data tensor. - /// \param[in] default_value The default value for masked elements. - /// - /// \return The masked value. - /// - std::shared_ptr - get_masked_node(const Output& data, - std::int32_t time_step, - std::size_t batch_axis = 0, - const Output& default_value = Output()) const; - - OutputVector lstm_pass(bool is_reverse = false) const; - - // Split(bi-directional) and squeeze input data to remove 'num_direction' dimension. - std::shared_ptr prepare_input(Output node, - bool is_reverse, - size_t num_direction_axis = 0) const; - - std::vector m_activations_alpha; - std::vector m_activations_beta; - std::vector m_activations; - float m_clip_threshold; - direction m_direction; - std::int64_t m_hidden_size; - bool m_input_forget; - LSTMWeightsFormat m_weights_format; - }; - } - using v0::LSTMSequence; - } // namespace op - -} // namespace ngraph +#include "ngraph/op/lstm_sequence.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/matmul.hpp b/ngraph/core/include/ngraph/op/fused/matmul.hpp index 8691359..683d7ad 100644 --- a/ngraph/core/include/ngraph/op/fused/matmul.hpp +++ b/ngraph/core/include/ngraph/op/fused/matmul.hpp @@ -16,51 +16,4 @@ #pragma once -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief Operator performing Matrix Multiplication. - class NGRAPH_API MatMul : public ngraph::op::util::FusedOp - { - public: - NGRAPH_RTTI_DECLARATION; - MatMul() = default; - /// \brief Constructs an Matrix Multiplication operation. - /// - /// \param A Matrix A - /// \param B Matrix B - /// \param transpose_a If matrix A should be transposed. - /// \param transpose_b If matrix B should be transposed. - MatMul(const Output& A, - const Output& B, - const bool& transpose_a = 0, - const bool& transpose_b = 0); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual void pre_validate_and_infer_types() override; - - virtual OutputVector decompose_op() const override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - bool evaluate(const HostTensorVector& outputs, - const HostTensorVector& inputs) const override; - - bool get_transpose_a() const { return m_transpose_a; } - bool get_transpose_b() const { return m_transpose_b; } - private: - bool m_transpose_a; - bool m_transpose_b; - }; - } - using v0::MatMul; - } // namespace op -} // namespace ngraph +#include "ngraph/op/matmul.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/mod.hpp b/ngraph/core/include/ngraph/op/fused/mod.hpp index f359781..b4e2c13 100644 --- a/ngraph/core/include/ngraph/op/fused/mod.hpp +++ b/ngraph/core/include/ngraph/op/fused/mod.hpp @@ -16,43 +16,4 @@ #pragma once -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v1 - { - /// \brief Mod returns an element-wise division reminder with two given tensors applying - /// multi-directional broadcast rules. - class NGRAPH_API Mod : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"Mod", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - Mod() = default; - /// \brief Constructs a Mod node. - /// - /// \param A - Dividend tensor - /// \param B - Divisor tensor - /// \param auto_broadcast Auto broadcast specification - Mod(const Output& A, - const Output& B, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastType::NUMPY); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual OutputVector decompose_op() const override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - const AutoBroadcastSpec& get_auto_broadcast() const { return m_auto_broadcast; } - private: - AutoBroadcastSpec m_auto_broadcast; - }; - } - } -} +#include "ngraph/op/mod.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/mvn.hpp b/ngraph/core/include/ngraph/op/fused/mvn.hpp index 6e87eb5..627e1a3 100644 --- a/ngraph/core/include/ngraph/op/fused/mvn.hpp +++ b/ngraph/core/include/ngraph/op/fused/mvn.hpp @@ -16,74 +16,4 @@ #pragma once -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief Operator performing Mean Variance Normalization - /// - class NGRAPH_API MVN : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"MVN", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - MVN() = default; - /// \brief Constructs an MVN operation. - /// - /// \param data Input tensor with data - /// \param normalize_variance flag that denotes whether to perform variance - /// normalization. - /// \param across_channels flag that denotes if mean values are shared across - /// channels. - /// \param eps the number to be added to the variance to avoid division by zero when - /// normalizing the value - /// - MVN(const Output& data, - bool across_channels = true, - bool normalize_variance = true, - double eps = 1e-9); - - /// \brief Constructs an MVN operation. - /// - /// \param data Input tensor with data - /// \param reduction_axes A list of axes, along which to reduce. - /// \param normalize_variance flag that denotes whether to perform variance - /// normalization. - /// \param eps the number to be added to the variance to avoid division by zero when - /// normalizing the value - /// - MVN(const Output& data, - AxisSet reduction_axes, - bool normalize_variance = true, - double eps = 1e-9); - - virtual OutputVector decompose_op() const override; - - virtual void validate_and_infer_types() override; - - virtual bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - double get_eps() const { return m_eps; } - bool get_across_channels() const { return m_across_channels; } - bool get_normalize_variance() const { return m_normalize_variance; } - AxisSet get_reduction_axes() const { return m_reduction_axes; } - void set_reduction_axes(AxisSet axes) { m_reduction_axes = axes; } - private: - double m_eps = 1e-9; - bool m_across_channels; - bool m_normalize_variance; - AxisSet m_reduction_axes; - }; - } - using v0::MVN; - } // namespace op -} // namespace ngraph +#include "ngraph/op/mvn.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/normalize_l2.hpp b/ngraph/core/include/ngraph/op/fused/normalize_l2.hpp index 938c974..8bcb99b 100644 --- a/ngraph/core/include/ngraph/op/fused/normalize_l2.hpp +++ b/ngraph/core/include/ngraph/op/fused/normalize_l2.hpp @@ -16,57 +16,4 @@ #pragma once -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/util/attr_types.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief Normalization input tensor with L2 norm. - /// - class NGRAPH_API NormalizeL2 : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"NormalizeL2", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - NormalizeL2() = default; - /// - /// \brief Constructs a Normalize operation. - /// - /// \param data - Node producing the input tensor - /// \param axes - Node indicating axes along which reduction is - /// calculated - /// \param eps - The epsilon added to L2 norm. - /// \param eps_mode - Specifies how eps is combined with L2 value - /// calculated - /// before division - /// - NormalizeL2(const Output& data, - const Output& axes, - float eps, - EpsMode eps_mode); - - bool visit_attributes(AttributeVisitor& visitor) override; - float get_eps() const { return m_eps; } - EpsMode get_eps_mode() const { return m_eps_mode; } - virtual OutputVector decompose_op() const override; - virtual void pre_validate_and_infer_types() override; - AxisSet get_reduction_axes() const; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - protected: - float m_eps; - EpsMode m_eps_mode; - }; - } - using v0::NormalizeL2; - } -} +#include "ngraph/op/normalize_l2.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/prelu.hpp b/ngraph/core/include/ngraph/op/fused/prelu.hpp index e0986d2..dc31526 100644 --- a/ngraph/core/include/ngraph/op/fused/prelu.hpp +++ b/ngraph/core/include/ngraph/op/fused/prelu.hpp @@ -16,44 +16,4 @@ #pragma once -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief Parametrized Relu - /// x < 0 => f(x) = x * slope - /// x >= 0 => f(x) = x - /// - class NGRAPH_API PRelu : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"PRelu", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - PRelu() = default; - /// \brief Constructs a PRelu operation. - /// - /// \param data Input tensor - /// \param slope Multipliers for negative values - PRelu(const Output& data, const Output& slope); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual OutputVector decompose_op() const override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - void pre_validate_and_infer_types() override; - - bool evaluate(const HostTensorVector& outputs, - const HostTensorVector& inputs) const override; - }; - } - using v0::PRelu; - } -} +#include "ngraph/op/prelu.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/rnn_cell.hpp b/ngraph/core/include/ngraph/op/fused/rnn_cell.hpp index a0e3dab..7e1b464 100644 --- a/ngraph/core/include/ngraph/op/fused/rnn_cell.hpp +++ b/ngraph/core/include/ngraph/op/fused/rnn_cell.hpp @@ -16,139 +16,4 @@ #pragma once -#include -#include -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/util/activation_functions.hpp" -#include "ngraph/op/util/fused_op.hpp" -#include "ngraph/op/util/rnn_cell_base.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// - /// \brief Class for single RNN cell node. - /// - /// \note It follows notation and equations defined as in ONNX standard: - /// https://github.com/onnx/onnx/blob/master/docs/Operators.md#RNN - /// - /// \note It calculates following equations: - /// - /// Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) - /// - /// * - Is a dot product, - /// f - is activation functions. - /// - /// \note This class represents only single *cell* (for current time step) - /// and not the whole RNN Sequence layer - /// - /// \sa LSTMSequence, LSTMCell, GRUCell - /// - class NGRAPH_API RNNCell : public util::FusedOp, public util::RNNCellBase - { - public: - static constexpr NodeTypeInfo type_info{"RNNCell", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - RNNCell(); - /// - /// \brief Constructs RNNCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: [hidden_size, - /// input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [hidden_size, hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// - RNNCell( - const Output& X, - const Output& initial_hidden_state, - const Output& W, - const Output& R, - std::size_t hidden_size, - const std::vector& activations = std::vector{"tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f); - - /// - /// \brief Constructs RNNCell node. - /// - /// \param[in] X The input tensor with shape: [batch_size, - /// input_size]. - /// \param[in] initial_hidden_state The hidden state tensor at current time step - /// with shape: [batch_size, hidden_size]. - /// \param[in] W The weight tensor with shape: [hidden_size, - /// input_size]. - /// \param[in] R The recurrence weight tensor with shape: - /// [hidden_size, hidden_size]. - /// \param[in] B The bias tensor for input gate with shape: - /// [hidden_size]. - /// \param[in] hidden_size The number of hidden units for recurrent cell. - /// \param[in] activations The vector of activation functions used inside - /// recurrent cell. - /// \param[in] activations_alpha The vector of alpha parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] activations_beta The vector of beta parameters for activation - /// functions in order respective to activation - /// list. - /// \param[in] clip The value defining clipping range [-clip, - /// clip] on input of activation functions. - /// - RNNCell( - const Output& X, - const Output& initial_hidden_state, - const Output& W, - const Output& R, - const Output& B, - std::size_t hidden_size, - const std::vector& activations = std::vector{"tanh"}, - const std::vector& activations_alpha = {}, - const std::vector& activations_beta = {}, - float clip = 0.f); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual void pre_validate_and_infer_types() override; - virtual OutputVector decompose_op() const override; - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - private: - /// - /// \brief Creates the default bias input initialized with zeros. - /// - /// \return The object of Output class. - /// - Output get_default_bias_input() const; - - /// - /// \brief The Activation function f. - /// - util::ActivationFunction m_activation_f; - - static constexpr std::size_t s_gates_count{1}; - }; - } - using v0::RNNCell; - } // namespace op -} // namespace ngraph +#include "ngraph/op/rnn_cell.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/selu.hpp b/ngraph/core/include/ngraph/op/fused/selu.hpp index 4cdd6f9..804fb39 100644 --- a/ngraph/core/include/ngraph/op/fused/selu.hpp +++ b/ngraph/core/include/ngraph/op/fused/selu.hpp @@ -16,39 +16,4 @@ #pragma once -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief Performs a SELU activation function on all elements of the input node - class NGRAPH_API Selu : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"Selu", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - Selu() = default; - /// \brief Constructs a Selu node. - /// - /// \param data - Node producing the input tensor - /// \param alpha - Alpha coefficient of SELU operation - /// \param lambda - Lambda coefficient of SELU operation - Selu(const Output& data, - const Output& alpha, - const Output& lambda); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual OutputVector decompose_op() const override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - }; - } - using v0::Selu; - } // namespace op -} // namespace ngraph +#include "ngraph/op/selu.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/shuffle_channels.hpp b/ngraph/core/include/ngraph/op/fused/shuffle_channels.hpp index 14720ea..c1988c0 100644 --- a/ngraph/core/include/ngraph/op/fused/shuffle_channels.hpp +++ b/ngraph/core/include/ngraph/op/fused/shuffle_channels.hpp @@ -16,61 +16,4 @@ #pragma once -#include - -#include "ngraph/node.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief Permutes data in the channel dimension of the input - class NGRAPH_API ShuffleChannels : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"ShuffleChannels", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - ShuffleChannels() = default; - /// \brief Constructs a ShuffleChannels node. - /// - /// \param data - Node producing the input tensor - /// \param axis - channel dimension index in the data tensor. A negative value means - /// that the index should be calculated from the back of the input - /// data - /// shape. - /// \param group - number of group the channel dimension specified by axis should - /// be - /// split into - ShuffleChannels(const Output& data, - const int64_t axis = 1, - const int64_t group = 1); - - bool visit_attributes(AttributeVisitor& visitor) override; - size_t get_zero_based_axis() const; - - virtual void pre_validate_and_infer_types() override; - - virtual OutputVector decompose_op() const override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - int64_t get_axis() const { return m_axis; } - int64_t get_group() const { return m_group; } - private: - /// \brief Generates a shape required to permute the data - /// - /// \param data_shape - Shape of the original input data tensor - /// \return A 4D tensor to be used to reshape the input data before shuffling it - Shape get_pre_shuffle_shape(const Shape& data_shape) const; - - int64_t m_axis; - int64_t m_group; - }; - } - using v0::ShuffleChannels; - } -} +#include "ngraph/op/shuffle_channels.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/space_to_depth.hpp b/ngraph/core/include/ngraph/op/fused/space_to_depth.hpp index e71a1b8..e72b787 100644 --- a/ngraph/core/include/ngraph/op/fused/space_to_depth.hpp +++ b/ngraph/core/include/ngraph/op/fused/space_to_depth.hpp @@ -16,81 +16,4 @@ #pragma once -#include "ngraph/node.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief SpaceToDepth permutes input tensor blocks of spatial data into depth - /// dimension. - /// - /// \note Values from the height and width dimensions are moved to the depth dimension. - /// - /// Output node produces a tensor with shape: - /// [N, C * blocksize * blocksize, H / blocksize, W / blocksize] - class NGRAPH_API SpaceToDepth : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"SpaceToDepth", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - enum class SpaceToDepthMode - { - // The output depth is gathered from [block_size, ..., block_size, C] - BLOCKS_FIRST, - // The output depth is gathered from [C, block_size, ..., block_size] - DEPTH_FIRST - }; - - SpaceToDepth() = default; - /// \brief Constructs a SpaceToDepth operation. - /// - /// \param data - Node producing the input tensor - /// \param mode Specifies how the output depth dimension is gathered - /// from block coordinates and the old depth dimension. - /// \param block_size - the size of the block of values to be moved - SpaceToDepth(const Output& data, - const SpaceToDepthMode& mode, - std::size_t block_size = 1); - - SpaceToDepth(const Output& data, - const std::string& mode, - std::size_t block_size = 1); - - bool visit_attributes(AttributeVisitor& visitor) override; - std::size_t get_block_size() const { return m_blocksize; } - SpaceToDepthMode get_mode() const { return m_mode; } - virtual OutputVector decompose_op() const override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - protected: - std::size_t m_blocksize; - SpaceToDepthMode m_mode; - }; - } - using v0::SpaceToDepth; - } // namespace op - - NGRAPH_API - std::ostream& operator<<(std::ostream& s, const op::v0::SpaceToDepth::SpaceToDepthMode& type); - - template <> - class NGRAPH_API AttributeAdapter - : public EnumAttributeAdapterBase - { - public: - AttributeAdapter(op::v0::SpaceToDepth::SpaceToDepthMode& value) - : EnumAttributeAdapterBase(value) - { - } - - static constexpr DiscreteTypeInfo type_info{ - "AttributeAdapter", 0}; - const DiscreteTypeInfo& get_type_info() const override { return type_info; } - }; -} // namespace ngraph +#include "ngraph/op/space_to_depth.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/squared_difference.hpp b/ngraph/core/include/ngraph/op/fused/squared_difference.hpp index 9ab6409..89f4c09 100644 --- a/ngraph/core/include/ngraph/op/fused/squared_difference.hpp +++ b/ngraph/core/include/ngraph/op/fused/squared_difference.hpp @@ -16,51 +16,4 @@ #pragma once -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - /// \brief Calculates an element-wise squared difference between two tensors - /// - /// y[i] = (x1[i] - x2[i])^2 - class NGRAPH_API SquaredDifference : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"SquaredDifference", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - SquaredDifference() = default; - /// \brief Constructs the squared difference operation. - /// - /// \param x1 First input tensor - /// \param x2 Second input tensor - /// \param auto_broadcast Auto broadcast specification - SquaredDifference( - const Output& x1, - const Output& x2, - const AutoBroadcastSpec& auto_broadcast = AutoBroadcastType::NUMPY); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual OutputVector decompose_op() const override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - const AutoBroadcastSpec& get_autob() const override { return m_autobroadcast; } - void set_autob(const AutoBroadcastSpec& auto_broadcast) - { - m_autobroadcast = auto_broadcast; - } - - private: - AutoBroadcastSpec m_autobroadcast; - }; - } - using v0::SquaredDifference; - } // namespace op -} // namespace ngraph +#include "ngraph/op/squared_difference.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/squeeze.hpp b/ngraph/core/include/ngraph/op/fused/squeeze.hpp index 117a5ca..70c8b44 100644 --- a/ngraph/core/include/ngraph/op/fused/squeeze.hpp +++ b/ngraph/core/include/ngraph/op/fused/squeeze.hpp @@ -16,37 +16,4 @@ #pragma once -#include - -#include "ngraph/axis_vector.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - class NGRAPH_API Squeeze : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"Squeeze", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - Squeeze() = default; - Squeeze(const Output& data, const Output& axes); - - bool visit_attributes(AttributeVisitor& visitor) override; - virtual OutputVector decompose_op() const override; - virtual void pre_validate_and_infer_types() override; - bool evaluate(const HostTensorVector& outputs, - const HostTensorVector& inputs) const override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - }; - } - using v0::Squeeze; - } -} +#include "ngraph/op/squeeze.hpp" diff --git a/ngraph/core/include/ngraph/op/fused/unsqueeze.hpp b/ngraph/core/include/ngraph/op/fused/unsqueeze.hpp index 85ae319..3d2bc69 100644 --- a/ngraph/core/include/ngraph/op/fused/unsqueeze.hpp +++ b/ngraph/core/include/ngraph/op/fused/unsqueeze.hpp @@ -16,38 +16,4 @@ #pragma once -#include - -#include "ngraph/axis_vector.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" -#include "ngraph/op/util/fused_op.hpp" - -namespace ngraph -{ - namespace op - { - namespace v0 - { - class NGRAPH_API Unsqueeze : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"Unsqueeze", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - Unsqueeze() = default; - Unsqueeze(const Output& data, const Output& axes); - - virtual void pre_validate_and_infer_types() override; - virtual OutputVector decompose_op() const override; - - bool visit_attributes(AttributeVisitor& visitor) override; - bool evaluate(const HostTensorVector& outputs, - const HostTensorVector& inputs) const override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - }; - } - using v0::Unsqueeze; - } -} +#include "ngraph/op/unsqueeze.hpp" diff --git a/ngraph/core/include/ngraph/op/gelu.hpp b/ngraph/core/include/ngraph/op/gelu.hpp new file mode 100644 index 0000000..8bd75c4 --- /dev/null +++ b/ngraph/core/include/ngraph/op/gelu.hpp @@ -0,0 +1,57 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "ngraph/node.hpp" +#include "ngraph/op/op.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// \brief Gaussian Error Linear Unit + /// f(x) = 0.5 * x * (1 + erf( x / sqrt(2) ) + class NGRAPH_API Gelu : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"Gelu", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + Gelu() = default; + /// \brief Constructs an Gelu operation. + /// + /// \param data Input tensor + Gelu(const Output& data); + + bool visit_attributes(AttributeVisitor& visitor) override; + virtual OutputVector decompose_op() const override; + + void pre_validate_and_infer_types() override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + }; + } + using v0::Gelu; + } +} + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/fused/stack.hpp b/ngraph/core/include/ngraph/op/grn.hpp similarity index 54% rename from ngraph/core/include/ngraph/op/fused/stack.hpp rename to ngraph/core/include/ngraph/op/grn.hpp index 1c307d0..a56200d 100644 --- a/ngraph/core/include/ngraph/op/fused/stack.hpp +++ b/ngraph/core/include/ngraph/op/grn.hpp @@ -16,52 +16,48 @@ #pragma once +#include + #include "ngraph/node.hpp" -#include "ngraph/op/op.hpp" #include "ngraph/op/util/fused_op.hpp" +NGRAPH_SUPPRESS_DEPRECATED_START + namespace ngraph { namespace op { namespace v0 { - /// \brief Operator performing Stack. - class NGRAPH_API Stack : public ngraph::op::util::FusedOp + /// \brief Global Response Normalization with L2 norm (across channels only). + /// + class NGRAPH_API GRN : public ngraph::op::util::FusedOp { public: - static constexpr NodeTypeInfo type_info{"Stack", 0}; + static constexpr NodeTypeInfo type_info{"GRN", 0}; const NodeTypeInfo& get_type_info() const override { return type_info; } - Stack() = default; - - /// \brief Constructs a stack operation. + GRN() = default; + /// \brief Constructs a GRN operation. /// - /// \param args The outputs producing the input tensors. - /// \param axis The axis in the result array along which the input arrays are - /// stacked. - Stack(const OutputVector& args, int64_t axis); - - /// \brief Constructs a stack operation. + /// \param data - Node producing the input tensor + /// \param bias - The bias added to the variance. /// - /// \param args The nodes producing the input tensors. - /// \param axis The axis in the result array along which the input arrays are - /// stacked. - Stack(const NodeVector& args, int64_t axis); + GRN(const Output& data, float bias); + bool visit_attributes(AttributeVisitor& visitor) override; + float get_bias() const { return m_bias; } virtual void pre_validate_and_infer_types() override; - virtual OutputVector decompose_op() const override; virtual std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - /// \return The stack axis - int64_t get_axis() const { return m_axis; } - void set_axis(int64_t axis) { m_axis = axis; } - private: - int64_t m_axis; + protected: + float m_bias = 1.0f; }; } - using v0::Stack; - } // namespace op -} // namespace ngraph + using v0::GRN; + } +} + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/group_conv.hpp b/ngraph/core/include/ngraph/op/group_conv.hpp index 3efabc1..2dd1bc2 100644 --- a/ngraph/core/include/ngraph/op/group_conv.hpp +++ b/ngraph/core/include/ngraph/op/group_conv.hpp @@ -21,6 +21,8 @@ #include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/fused_op.hpp" +NGRAPH_SUPPRESS_DEPRECATED_START + namespace ngraph { namespace op @@ -250,3 +252,5 @@ namespace ngraph } // namespace v1 } // namespace op } // namespace ngraph + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/gru_cell.hpp b/ngraph/core/include/ngraph/op/gru_cell.hpp new file mode 100644 index 0000000..1c9094a --- /dev/null +++ b/ngraph/core/include/ngraph/op/gru_cell.hpp @@ -0,0 +1,186 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include +#include +#include +#include + +#include "ngraph/node.hpp" +#include "ngraph/op/util/activation_functions.hpp" +#include "ngraph/op/util/fused_op.hpp" +#include "ngraph/op/util/rnn_cell_base.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v3 + { + /// + /// \brief Class for GRU cell node. + /// + /// \note It follows notation and equations defined as in ONNX standard: + /// https://github.com/onnx/onnx/blob/master/docs/Operators.md#GRU + /// + /// Note this class represents only single *cell* and not whole GRU *layer*. + /// + class NGRAPH_API GRUCell : public util::FusedOp, public util::RNNCellBase + { + public: + static constexpr NodeTypeInfo type_info{"GRUCell", 3}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + GRUCell(); + /// + /// \brief Constructs GRUCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: + /// [gates_count * hidden_size, input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [gates_count * hidden_size, hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// + GRUCell(const Output& X, + const Output& initial_hidden_state, + const Output& W, + const Output& R, + std::size_t hidden_size); + + /// + /// \brief Constructs GRUCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: + /// [gates_count * hidden_size, input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [gates_count * hidden_size, hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// + GRUCell(const Output& X, + const Output& initial_hidden_state, + const Output& W, + const Output& R, + std::size_t hidden_size, + const std::vector& activations, + const std::vector& activations_alpha, + const std::vector& activations_beta, + float clip, + bool linear_before_reset); + + /// + /// \brief Constructs GRUCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: [gates_count * + /// hidden_size, input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [gates_count * hidden_size, hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] B The sum of biases (weight and recurrence) for + /// update, reset and hidden gates. + /// If linear_before_reset := true then biases for + /// hidden gates are + /// placed separately (weight and recurrence). + /// Shape: [gates_count * hidden_size] if + /// linear_before_reset := false + /// Shape: [(gates_count + 1) * hidden_size] if + /// linear_before_reset := true + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// \param[in] linear_before_reset Whether or not to apply the linear + /// transformation before multiplying by the + /// output of the reset gate. + /// + GRUCell(const Output& X, + const Output& initial_hidden_state, + const Output& W, + const Output& R, + const Output& B, + std::size_t hidden_size, + const std::vector& activations = + std::vector{"sigmoid", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f, + bool linear_before_reset = false); + + bool visit_attributes(AttributeVisitor& visitor) override; + virtual void pre_validate_and_infer_types() override; + virtual OutputVector decompose_op() const override; + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + bool get_linear_before_reset() const { return m_linear_before_reset; } + private: + /// brief Add and initialize bias input to all zeros. + void add_default_bias_input(); + + /// + /// \brief The Activation function f. + /// + util::ActivationFunction m_activation_f; + /// + /// \brief The Activation function g. + /// + util::ActivationFunction m_activation_g; + + static constexpr std::size_t s_gates_count{3}; + /// + /// \brief Control whether or not apply the linear transformation. + /// + /// \note The linear transformation may be applied when computing the output of + /// hidden gate. It's done before multiplying by the output of the reset gate. + /// + bool m_linear_before_reset; + }; + } + using v3::GRUCell; + } +} + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/hard_sigmoid.hpp b/ngraph/core/include/ngraph/op/hard_sigmoid.hpp new file mode 100644 index 0000000..89ccaa3 --- /dev/null +++ b/ngraph/core/include/ngraph/op/hard_sigmoid.hpp @@ -0,0 +1,61 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "ngraph/node.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// \brief Parameterized, bounded sigmoid-like, piecewise linear + /// function. min(max(alpha*x + beta, 0), 1) + /// + class NGRAPH_API HardSigmoid : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"HardSigmoid", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + HardSigmoid() = default; + + /// \brief Constructs a HardSigmoid operation. + /// + /// \param data Input tensor. + /// \param[in] alpha A scalar value representing the alpha parameter. + /// \param[in] beta A scalar value representing the beta parameter. + /// + HardSigmoid(const Output& data, + const Output& alpha, + const Output& beta); + + bool visit_attributes(AttributeVisitor& visitor) override; + virtual void pre_validate_and_infer_types() override; + virtual OutputVector decompose_op() const override; + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + }; + } + using v0::HardSigmoid; + } +} + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/lstm_cell.hpp b/ngraph/core/include/ngraph/op/lstm_cell.hpp new file mode 100644 index 0000000..391d2c5 --- /dev/null +++ b/ngraph/core/include/ngraph/op/lstm_cell.hpp @@ -0,0 +1,297 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include +#include +#include +#include + +#include "ngraph/node.hpp" +#include "ngraph/op/util/activation_functions.hpp" +#include "ngraph/op/util/fused_op.hpp" +#include "ngraph/op/util/rnn_cell_base.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + enum class LSTMWeightsFormat + { + FICO, // IE + ICOF, // PyTorch + IFCO, // DNNL, TF, MxNet + IFOC, // Caffe + IOFC, // ONNX + }; + + namespace v0 + { + /// + /// \brief Class for single lstm cell node. + /// + /// \note Following implementation supports: + /// \li \c peepholes Gers & Schmidhuber (2000) + /// https://ieeexplore.ieee.org/document/861302 + /// \li Coupling input and forget gates. + /// + /// \note It calculates following equations: + /// + /// it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi) + /// ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf) + /// ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc) + /// Ct = ft (.) Ct-1 + it (.) ct + /// ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo) + /// Ht = ot (.) h(Ct) + /// + /// * - Is a dot product, + /// (.) - is a Hadamard product (element-wise), + /// f, g, h - are activation functions. + /// + /// \note This class represents only single *cell* (for current time step) and not + /// the whole LSTM Sequence layer + /// + /// \sa LSTMSequence, RNNCell, GRUCell + /// + class NGRAPH_API LSTMCell : public util::FusedOp, public util::RNNCellBase + { + public: + static constexpr NodeTypeInfo type_info{"LSTMCell", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + LSTMCell(); + /// + /// \brief Constructs LSTMCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] initial_cell_state The cell state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The gate weights tensor with shape: + /// [4*hidden_size, input_size]. + /// \param[in] R The recurrence weights tensor with shape: + /// [4*hidden_size, hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] weights_format The order of gates in weights tensors. The + /// default format is IFCO since it is used by + /// DNNL. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// \param[in] input_forget Controls coupling input and forget gates. + /// + LSTMCell(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& W, + const Output& R, + std::size_t hidden_size, + LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, + const std::vector& activations = + std::vector{"sigmoid", "tanh", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f, + bool input_forget = false); + + /// + /// \brief Constructs LSTMCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] initial_cell_state The cell state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: [4*hidden_size, + /// input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [4*hidden_size, hidden_size]. + /// \param[in] B The bias tensor for gates with shape: + /// [4*hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] weights_format The order of gates in weights tensors. The + /// default format is IFCO since it is used by + /// DNNL. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// \param[in] input_forget Controls coupling input and forget gates. + /// + LSTMCell(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& W, + const Output& R, + const Output& B, + std::size_t hidden_size, + LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, + const std::vector& activations = + std::vector{"sigmoid", "tanh", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f, + bool input_forget = false); + + /// + /// \brief Constructs LSTMCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] initial_cell_state The cell state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: [4*hidden_size, + /// input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [4*hidden_size, hidden_size]. + /// \param[in] B The bias tensor for gates with shape: + /// [4*hidden_size]. + /// \param[in] P The weight tensor for peepholes with shape: + /// [3*hidden_size] - 3 equals to only iof gates. + /// The order is: input, output, forget gates. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] weights_format The order of gates in weights tensors. The + /// default format is IFCO since it is used by + /// DNNL. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// \param[in] input_forget Controls coupling input and forget gates. + /// + LSTMCell(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& W, + const Output& R, + const Output& B, + const Output& P, + std::size_t hidden_size, + LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, + const std::vector& activations = + std::vector{"sigmoid", "tanh", "tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f, + bool input_forget = false); + + bool visit_attributes(AttributeVisitor& visitor) override; + virtual void pre_validate_and_infer_types() override; + virtual OutputVector decompose_op() const override; + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + bool get_input_forget() const { return m_input_forget; } + LSTMWeightsFormat get_weights_format() const { return m_weights_format; } + /// + /// \brief Change data format of provided node into IFCO. + /// + /// \node The IFCO format was chosen because it's default DNNL format. + /// + /// \param[in] node The input node to be permuted. + /// + /// \return Node representing reshaped tensor according to IFCO weights format. + /// + std::shared_ptr convert_node_format(const Output& node) const; + + private: + /// + /// \brief Creates the default bias input initialized with zeros. + /// + /// \return The object of Output class. + /// + Output get_default_bias_input() const; + + /// + /// \brief Creates the default peepholes input initialized with zeros. + /// + /// \return The object of Output class. + /// + Output get_default_peepholes_input() const; + /// + /// \brief The Activation function f. + /// + util::ActivationFunction m_activation_f; + /// + /// \brief The Activation function g. + /// + util::ActivationFunction m_activation_g; + /// + /// \brief The Activation function h. + /// + util::ActivationFunction m_activation_h; + /// + /// \brief Controls whether to couple input and forget gates. + /// + bool m_input_forget = false; + + /// + /// \brief The order of gates in weights tensors. + /// + LSTMWeightsFormat m_weights_format; + + static constexpr std::size_t s_gates_count{4}; + static constexpr std::size_t s_peepholes_count{3}; + }; + } + using v0::LSTMCell; + } // namespace op + + NGRAPH_API + std::ostream& operator<<(std::ostream& s, const op::LSTMWeightsFormat& type); + + template <> + class NGRAPH_API AttributeAdapter + : public EnumAttributeAdapterBase + { + public: + AttributeAdapter(op::LSTMWeightsFormat& value) + : EnumAttributeAdapterBase(value) + { + } + + static constexpr DiscreteTypeInfo type_info{"AttributeAdapter", 1}; + const DiscreteTypeInfo& get_type_info() const override { return type_info; } + }; +} // namespace ngraph + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/lstm_sequence.hpp b/ngraph/core/include/ngraph/op/lstm_sequence.hpp new file mode 100644 index 0000000..f5ce715 --- /dev/null +++ b/ngraph/core/include/ngraph/op/lstm_sequence.hpp @@ -0,0 +1,193 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include +#include +#include +#include +#include + +#include "ngraph/node.hpp" +#include "ngraph/op/constant.hpp" +#include "ngraph/op/lstm_cell.hpp" +#include "ngraph/op/util/attr_types.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// + /// \brief Class for lstm sequence node. + /// + /// \note It follows notation and equations defined as in ONNX standard: + /// https://github.com/onnx/onnx/blob/master/docs/Operators.md#LSTM + /// + /// \sa LSTMCell, RNNCell, GRUCell + /// + /// + class NGRAPH_API LSTMSequence : public util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"LSTMSequence", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + LSTMSequence() = default; + + using direction = RecurrentSequenceDirection; + + size_t get_default_output_index() const override { return no_default_index(); } + explicit LSTMSequence(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& sequence_lengths, + const Output& W, + const Output& R, + const Output& B, + const Output& P, + const std::int64_t hidden_size, + const direction lstm_direction, + LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, + const std::vector activations_alpha = {}, + const std::vector activations_beta = {}, + const std::vector activations = {"sigmoid", + "tanh", + "tanh"}, + const float clip_threshold = 0, + const bool input_forget = false) + : FusedOp({X, + initial_hidden_state, + initial_cell_state, + sequence_lengths, + W, + R, + B, + P}) + , m_activations_alpha(activations_alpha) + , m_activations_beta(activations_beta) + , m_activations(activations) + , m_clip_threshold(clip_threshold) + , m_direction(lstm_direction) + , m_hidden_size(hidden_size) + , m_input_forget(input_forget) + , m_weights_format(weights_format) + { + constructor_validate_and_infer_types(); + } + + explicit LSTMSequence(const Output& X, + const Output& initial_hidden_state, + const Output& initial_cell_state, + const Output& sequence_lengths, + const Output& W, + const Output& R, + const Output& B, + const std::int64_t hidden_size, + const direction lstm_direction, + LSTMWeightsFormat weights_format = LSTMWeightsFormat::IFCO, + const std::vector activations_alpha = {}, + const std::vector activations_beta = {}, + const std::vector activations = {"sigmoid", + "tanh", + "tanh"}, + const float clip_threshold = 0, + const bool input_forget = false) + : LSTMSequence( + X, + initial_hidden_state, + initial_cell_state, + sequence_lengths, + W, + R, + B, + Constant::create( + element::f32, + Shape{(lstm_direction == direction::BIDIRECTIONAL ? 2UL : 1UL), + 3UL * static_cast(hidden_size)}, + std::vector{0.f}), + hidden_size, + lstm_direction, + weights_format, + activations_alpha, + activations_beta, + activations, + clip_threshold, + input_forget) + { + } + + bool visit_attributes(AttributeVisitor& visitor) override; + virtual OutputVector decompose_op() const override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + std::vector get_activations_alpha() const { return m_activations_alpha; } + std::vector get_activations_beta() const { return m_activations_beta; } + std::vector get_activations() const { return m_activations; } + float get_clip_threshold() const { return m_clip_threshold; } + direction get_direction() const { return m_direction; } + std::int64_t get_hidden_size() const { return m_hidden_size; } + bool get_input_forget() const { return m_input_forget; } + LSTMWeightsFormat get_weights_format() const { return m_weights_format; } + private: + /// + /// \brief Gets the masked value according to sequence lenght in a batch. + /// + /// \note Zeros out values or sets them to default value for inputs with + /// sequence lenght shorter than currently procssed time step. + /// + /// \param[in] data The input value. + /// \param[in] time_step The current time step denoting sequence lenght. + /// \param[in] batch_axis The batch axis index of data tensor. + /// \param[in] default_value The default value for masked elements. + /// + /// \return The masked value. + /// + std::shared_ptr + get_masked_node(const Output& data, + std::int32_t time_step, + std::size_t batch_axis = 0, + const Output& default_value = Output()) const; + + OutputVector lstm_pass(bool is_reverse = false) const; + + // Split(bi-directional) and squeeze input data to remove 'num_direction' dimension. + std::shared_ptr prepare_input(Output node, + bool is_reverse, + size_t num_direction_axis = 0) const; + + std::vector m_activations_alpha; + std::vector m_activations_beta; + std::vector m_activations; + float m_clip_threshold; + direction m_direction; + std::int64_t m_hidden_size; + bool m_input_forget; + LSTMWeightsFormat m_weights_format; + }; + } + using v0::LSTMSequence; + } // namespace op + +} // namespace ngraph + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/matmul.hpp b/ngraph/core/include/ngraph/op/matmul.hpp new file mode 100644 index 0000000..a920fab --- /dev/null +++ b/ngraph/core/include/ngraph/op/matmul.hpp @@ -0,0 +1,70 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "ngraph/node.hpp" +#include "ngraph/op/op.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// \brief Operator performing Matrix Multiplication. + class NGRAPH_API MatMul : public ngraph::op::util::FusedOp + { + public: + NGRAPH_RTTI_DECLARATION; + MatMul() = default; + /// \brief Constructs an Matrix Multiplication operation. + /// + /// \param A Matrix A + /// \param B Matrix B + /// \param transpose_a If matrix A should be transposed. + /// \param transpose_b If matrix B should be transposed. + MatMul(const Output& A, + const Output& B, + const bool& transpose_a = 0, + const bool& transpose_b = 0); + + bool visit_attributes(AttributeVisitor& visitor) override; + virtual void pre_validate_and_infer_types() override; + + virtual OutputVector decompose_op() const override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + bool evaluate(const HostTensorVector& outputs, + const HostTensorVector& inputs) const override; + + bool get_transpose_a() const { return m_transpose_a; } + bool get_transpose_b() const { return m_transpose_b; } + private: + bool m_transpose_a; + bool m_transpose_b; + }; + } + using v0::MatMul; + } // namespace op +} // namespace ngraph + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/mod.hpp b/ngraph/core/include/ngraph/op/mod.hpp new file mode 100644 index 0000000..b022780 --- /dev/null +++ b/ngraph/core/include/ngraph/op/mod.hpp @@ -0,0 +1,62 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "ngraph/node.hpp" +#include "ngraph/op/op.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v1 + { + /// \brief Mod returns an element-wise division reminder with two given tensors applying + /// multi-directional broadcast rules. + class NGRAPH_API Mod : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"Mod", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + Mod() = default; + /// \brief Constructs a Mod node. + /// + /// \param A - Dividend tensor + /// \param B - Divisor tensor + /// \param auto_broadcast Auto broadcast specification + Mod(const Output& A, + const Output& B, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastType::NUMPY); + + bool visit_attributes(AttributeVisitor& visitor) override; + virtual OutputVector decompose_op() const override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + const AutoBroadcastSpec& get_auto_broadcast() const { return m_auto_broadcast; } + private: + AutoBroadcastSpec m_auto_broadcast; + }; + } + } +} + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/mvn.hpp b/ngraph/core/include/ngraph/op/mvn.hpp new file mode 100644 index 0000000..6b48109 --- /dev/null +++ b/ngraph/core/include/ngraph/op/mvn.hpp @@ -0,0 +1,93 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "ngraph/node.hpp" +#include "ngraph/op/op.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// \brief Operator performing Mean Variance Normalization + /// + class NGRAPH_API MVN : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"MVN", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + MVN() = default; + /// \brief Constructs an MVN operation. + /// + /// \param data Input tensor with data + /// \param normalize_variance flag that denotes whether to perform variance + /// normalization. + /// \param across_channels flag that denotes if mean values are shared across + /// channels. + /// \param eps the number to be added to the variance to avoid division by zero when + /// normalizing the value + /// + MVN(const Output& data, + bool across_channels = true, + bool normalize_variance = true, + double eps = 1e-9); + + /// \brief Constructs an MVN operation. + /// + /// \param data Input tensor with data + /// \param reduction_axes A list of axes, along which to reduce. + /// \param normalize_variance flag that denotes whether to perform variance + /// normalization. + /// \param eps the number to be added to the variance to avoid division by zero when + /// normalizing the value + /// + MVN(const Output& data, + AxisSet reduction_axes, + bool normalize_variance = true, + double eps = 1e-9); + + virtual OutputVector decompose_op() const override; + + virtual void validate_and_infer_types() override; + + virtual bool visit_attributes(AttributeVisitor& visitor) override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + double get_eps() const { return m_eps; } + bool get_across_channels() const { return m_across_channels; } + bool get_normalize_variance() const { return m_normalize_variance; } + AxisSet get_reduction_axes() const { return m_reduction_axes; } + void set_reduction_axes(AxisSet axes) { m_reduction_axes = axes; } + private: + double m_eps = 1e-9; + bool m_across_channels; + bool m_normalize_variance; + AxisSet m_reduction_axes; + }; + } + using v0::MVN; + } // namespace op +} // namespace ngraph + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/normalize_l2.hpp b/ngraph/core/include/ngraph/op/normalize_l2.hpp new file mode 100644 index 0000000..d8f3a25 --- /dev/null +++ b/ngraph/core/include/ngraph/op/normalize_l2.hpp @@ -0,0 +1,76 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include + +#include "ngraph/node.hpp" +#include "ngraph/op/util/attr_types.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// \brief Normalization input tensor with L2 norm. + /// + class NGRAPH_API NormalizeL2 : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"NormalizeL2", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + NormalizeL2() = default; + /// + /// \brief Constructs a Normalize operation. + /// + /// \param data - Node producing the input tensor + /// \param axes - Node indicating axes along which reduction is + /// calculated + /// \param eps - The epsilon added to L2 norm. + /// \param eps_mode - Specifies how eps is combined with L2 value + /// calculated + /// before division + /// + NormalizeL2(const Output& data, + const Output& axes, + float eps, + EpsMode eps_mode); + + bool visit_attributes(AttributeVisitor& visitor) override; + float get_eps() const { return m_eps; } + EpsMode get_eps_mode() const { return m_eps_mode; } + virtual OutputVector decompose_op() const override; + virtual void pre_validate_and_infer_types() override; + AxisSet get_reduction_axes() const; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + protected: + float m_eps; + EpsMode m_eps_mode; + }; + } + using v0::NormalizeL2; + } +} + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/prelu.hpp b/ngraph/core/include/ngraph/op/prelu.hpp new file mode 100644 index 0000000..9cf682c --- /dev/null +++ b/ngraph/core/include/ngraph/op/prelu.hpp @@ -0,0 +1,63 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "ngraph/node.hpp" +#include "ngraph/op/op.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// \brief Parametrized Relu + /// x < 0 => f(x) = x * slope + /// x >= 0 => f(x) = x + /// + class NGRAPH_API PRelu : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"PRelu", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + PRelu() = default; + /// \brief Constructs a PRelu operation. + /// + /// \param data Input tensor + /// \param slope Multipliers for negative values + PRelu(const Output& data, const Output& slope); + + bool visit_attributes(AttributeVisitor& visitor) override; + virtual OutputVector decompose_op() const override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + void pre_validate_and_infer_types() override; + + bool evaluate(const HostTensorVector& outputs, + const HostTensorVector& inputs) const override; + }; + } + using v0::PRelu; + } +} + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/rnn_cell.hpp b/ngraph/core/include/ngraph/op/rnn_cell.hpp new file mode 100644 index 0000000..3dc4264 --- /dev/null +++ b/ngraph/core/include/ngraph/op/rnn_cell.hpp @@ -0,0 +1,158 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include +#include +#include +#include + +#include "ngraph/node.hpp" +#include "ngraph/op/util/activation_functions.hpp" +#include "ngraph/op/util/fused_op.hpp" +#include "ngraph/op/util/rnn_cell_base.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// + /// \brief Class for single RNN cell node. + /// + /// \note It follows notation and equations defined as in ONNX standard: + /// https://github.com/onnx/onnx/blob/master/docs/Operators.md#RNN + /// + /// \note It calculates following equations: + /// + /// Ht = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Wbi + Rbi) + /// + /// * - Is a dot product, + /// f - is activation functions. + /// + /// \note This class represents only single *cell* (for current time step) + /// and not the whole RNN Sequence layer + /// + /// \sa LSTMSequence, LSTMCell, GRUCell + /// + class NGRAPH_API RNNCell : public util::FusedOp, public util::RNNCellBase + { + public: + static constexpr NodeTypeInfo type_info{"RNNCell", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + RNNCell(); + /// + /// \brief Constructs RNNCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: [hidden_size, + /// input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [hidden_size, hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// + RNNCell( + const Output& X, + const Output& initial_hidden_state, + const Output& W, + const Output& R, + std::size_t hidden_size, + const std::vector& activations = std::vector{"tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f); + + /// + /// \brief Constructs RNNCell node. + /// + /// \param[in] X The input tensor with shape: [batch_size, + /// input_size]. + /// \param[in] initial_hidden_state The hidden state tensor at current time step + /// with shape: [batch_size, hidden_size]. + /// \param[in] W The weight tensor with shape: [hidden_size, + /// input_size]. + /// \param[in] R The recurrence weight tensor with shape: + /// [hidden_size, hidden_size]. + /// \param[in] B The bias tensor for input gate with shape: + /// [hidden_size]. + /// \param[in] hidden_size The number of hidden units for recurrent cell. + /// \param[in] activations The vector of activation functions used inside + /// recurrent cell. + /// \param[in] activations_alpha The vector of alpha parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] activations_beta The vector of beta parameters for activation + /// functions in order respective to activation + /// list. + /// \param[in] clip The value defining clipping range [-clip, + /// clip] on input of activation functions. + /// + RNNCell( + const Output& X, + const Output& initial_hidden_state, + const Output& W, + const Output& R, + const Output& B, + std::size_t hidden_size, + const std::vector& activations = std::vector{"tanh"}, + const std::vector& activations_alpha = {}, + const std::vector& activations_beta = {}, + float clip = 0.f); + + bool visit_attributes(AttributeVisitor& visitor) override; + virtual void pre_validate_and_infer_types() override; + virtual OutputVector decompose_op() const override; + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + private: + /// + /// \brief Creates the default bias input initialized with zeros. + /// + /// \return The object of Output class. + /// + Output get_default_bias_input() const; + + /// + /// \brief The Activation function f. + /// + util::ActivationFunction m_activation_f; + + static constexpr std::size_t s_gates_count{1}; + }; + } + using v0::RNNCell; + } // namespace op +} // namespace ngraph + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/selu.hpp b/ngraph/core/include/ngraph/op/selu.hpp new file mode 100644 index 0000000..25af4b8 --- /dev/null +++ b/ngraph/core/include/ngraph/op/selu.hpp @@ -0,0 +1,58 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "ngraph/node.hpp" +#include "ngraph/op/op.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// \brief Performs a SELU activation function on all elements of the input node + class NGRAPH_API Selu : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"Selu", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + Selu() = default; + /// \brief Constructs a Selu node. + /// + /// \param data - Node producing the input tensor + /// \param alpha - Alpha coefficient of SELU operation + /// \param lambda - Lambda coefficient of SELU operation + Selu(const Output& data, + const Output& alpha, + const Output& lambda); + + bool visit_attributes(AttributeVisitor& visitor) override; + virtual OutputVector decompose_op() const override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + }; + } + using v0::Selu; + } // namespace op +} // namespace ngraph + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/shuffle_channels.hpp b/ngraph/core/include/ngraph/op/shuffle_channels.hpp new file mode 100644 index 0000000..aa7daf7 --- /dev/null +++ b/ngraph/core/include/ngraph/op/shuffle_channels.hpp @@ -0,0 +1,80 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include + +#include "ngraph/node.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// \brief Permutes data in the channel dimension of the input + class NGRAPH_API ShuffleChannels : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"ShuffleChannels", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + ShuffleChannels() = default; + /// \brief Constructs a ShuffleChannels node. + /// + /// \param data - Node producing the input tensor + /// \param axis - channel dimension index in the data tensor. A negative value means + /// that the index should be calculated from the back of the input + /// data + /// shape. + /// \param group - number of group the channel dimension specified by axis should + /// be + /// split into + ShuffleChannels(const Output& data, + const int64_t axis = 1, + const int64_t group = 1); + + bool visit_attributes(AttributeVisitor& visitor) override; + size_t get_zero_based_axis() const; + + virtual void pre_validate_and_infer_types() override; + + virtual OutputVector decompose_op() const override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + int64_t get_axis() const { return m_axis; } + int64_t get_group() const { return m_group; } + private: + /// \brief Generates a shape required to permute the data + /// + /// \param data_shape - Shape of the original input data tensor + /// \return A 4D tensor to be used to reshape the input data before shuffling it + Shape get_pre_shuffle_shape(const Shape& data_shape) const; + + int64_t m_axis; + int64_t m_group; + }; + } + using v0::ShuffleChannels; + } +} + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/space_to_depth.hpp b/ngraph/core/include/ngraph/op/space_to_depth.hpp new file mode 100644 index 0000000..2a35d83 --- /dev/null +++ b/ngraph/core/include/ngraph/op/space_to_depth.hpp @@ -0,0 +1,100 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "ngraph/node.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// \brief SpaceToDepth permutes input tensor blocks of spatial data into depth + /// dimension. + /// + /// \note Values from the height and width dimensions are moved to the depth dimension. + /// + /// Output node produces a tensor with shape: + /// [N, C * blocksize * blocksize, H / blocksize, W / blocksize] + class NGRAPH_API SpaceToDepth : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"SpaceToDepth", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + enum class SpaceToDepthMode + { + // The output depth is gathered from [block_size, ..., block_size, C] + BLOCKS_FIRST, + // The output depth is gathered from [C, block_size, ..., block_size] + DEPTH_FIRST + }; + + SpaceToDepth() = default; + /// \brief Constructs a SpaceToDepth operation. + /// + /// \param data - Node producing the input tensor + /// \param mode Specifies how the output depth dimension is gathered + /// from block coordinates and the old depth dimension. + /// \param block_size - the size of the block of values to be moved + SpaceToDepth(const Output& data, + const SpaceToDepthMode& mode, + std::size_t block_size = 1); + + SpaceToDepth(const Output& data, + const std::string& mode, + std::size_t block_size = 1); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::size_t get_block_size() const { return m_blocksize; } + SpaceToDepthMode get_mode() const { return m_mode; } + virtual OutputVector decompose_op() const override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + protected: + std::size_t m_blocksize; + SpaceToDepthMode m_mode; + }; + } + using v0::SpaceToDepth; + } // namespace op + + NGRAPH_API + std::ostream& operator<<(std::ostream& s, const op::v0::SpaceToDepth::SpaceToDepthMode& type); + + template <> + class NGRAPH_API AttributeAdapter + : public EnumAttributeAdapterBase + { + public: + AttributeAdapter(op::v0::SpaceToDepth::SpaceToDepthMode& value) + : EnumAttributeAdapterBase(value) + { + } + + static constexpr DiscreteTypeInfo type_info{ + "AttributeAdapter", 0}; + const DiscreteTypeInfo& get_type_info() const override { return type_info; } + }; +} // namespace ngraph + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/split.hpp b/ngraph/core/include/ngraph/op/split.hpp index 2322720..7387987 100644 --- a/ngraph/core/include/ngraph/op/split.hpp +++ b/ngraph/core/include/ngraph/op/split.hpp @@ -22,6 +22,8 @@ #include "ngraph/node.hpp" #include "ngraph/op/util/fused_op.hpp" +NGRAPH_SUPPRESS_DEPRECATED_START + namespace ngraph { namespace op @@ -113,3 +115,5 @@ namespace ngraph using v0::Split; } } + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/squared_difference.hpp b/ngraph/core/include/ngraph/op/squared_difference.hpp new file mode 100644 index 0000000..ec6335d --- /dev/null +++ b/ngraph/core/include/ngraph/op/squared_difference.hpp @@ -0,0 +1,70 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "ngraph/node.hpp" +#include "ngraph/op/op.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// \brief Calculates an element-wise squared difference between two tensors + /// + /// y[i] = (x1[i] - x2[i])^2 + class NGRAPH_API SquaredDifference : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"SquaredDifference", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + SquaredDifference() = default; + /// \brief Constructs the squared difference operation. + /// + /// \param x1 First input tensor + /// \param x2 Second input tensor + /// \param auto_broadcast Auto broadcast specification + SquaredDifference( + const Output& x1, + const Output& x2, + const AutoBroadcastSpec& auto_broadcast = AutoBroadcastType::NUMPY); + + bool visit_attributes(AttributeVisitor& visitor) override; + virtual OutputVector decompose_op() const override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + const AutoBroadcastSpec& get_autob() const override { return m_autobroadcast; } + void set_autob(const AutoBroadcastSpec& auto_broadcast) + { + m_autobroadcast = auto_broadcast; + } + + private: + AutoBroadcastSpec m_autobroadcast; + }; + } + using v0::SquaredDifference; + } // namespace op +} // namespace ngraph + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/squeeze.hpp b/ngraph/core/include/ngraph/op/squeeze.hpp new file mode 100644 index 0000000..e08460f --- /dev/null +++ b/ngraph/core/include/ngraph/op/squeeze.hpp @@ -0,0 +1,56 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include + +#include "ngraph/axis_vector.hpp" +#include "ngraph/node.hpp" +#include "ngraph/op/op.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + class NGRAPH_API Squeeze : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"Squeeze", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + Squeeze() = default; + Squeeze(const Output& data, const Output& axes); + + bool visit_attributes(AttributeVisitor& visitor) override; + virtual OutputVector decompose_op() const override; + virtual void pre_validate_and_infer_types() override; + bool evaluate(const HostTensorVector& outputs, + const HostTensorVector& inputs) const override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + }; + } + using v0::Squeeze; + } +} + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/tensor_iterator.hpp b/ngraph/core/include/ngraph/op/tensor_iterator.hpp index a6e110f..2debfc6 100644 --- a/ngraph/core/include/ngraph/op/tensor_iterator.hpp +++ b/ngraph/core/include/ngraph/op/tensor_iterator.hpp @@ -24,6 +24,8 @@ #include "ngraph/op/parameter.hpp" #include "ngraph/op/util/fused_op.hpp" +NGRAPH_SUPPRESS_DEPRECATED_START + namespace ngraph { namespace op @@ -447,3 +449,5 @@ namespace ngraph std::vector>& m_ref; }; } + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/unsqueeze.hpp b/ngraph/core/include/ngraph/op/unsqueeze.hpp new file mode 100644 index 0000000..8180d60 --- /dev/null +++ b/ngraph/core/include/ngraph/op/unsqueeze.hpp @@ -0,0 +1,57 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include + +#include "ngraph/axis_vector.hpp" +#include "ngraph/node.hpp" +#include "ngraph/op/op.hpp" +#include "ngraph/op/util/fused_op.hpp" + +NGRAPH_SUPPRESS_DEPRECATED_START + +namespace ngraph +{ + namespace op + { + namespace v0 + { + class NGRAPH_API Unsqueeze : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"Unsqueeze", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + Unsqueeze() = default; + Unsqueeze(const Output& data, const Output& axes); + + virtual void pre_validate_and_infer_types() override; + virtual OutputVector decompose_op() const override; + + bool visit_attributes(AttributeVisitor& visitor) override; + bool evaluate(const HostTensorVector& outputs, + const HostTensorVector& inputs) const override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + }; + } + using v0::Unsqueeze; + } +} + +NGRAPH_SUPPRESS_DEPRECATED_END diff --git a/ngraph/core/include/ngraph/op/util/fused_op.hpp b/ngraph/core/include/ngraph/op/util/fused_op.hpp index c5fa628..7520fb9 100644 --- a/ngraph/core/include/ngraph/op/util/fused_op.hpp +++ b/ngraph/core/include/ngraph/op/util/fused_op.hpp @@ -27,7 +27,10 @@ namespace ngraph /// \brief Abstract base class for fused ops, i.e ops that can be broken down into core /// ngraph ops /// - class NGRAPH_API FusedOp : public Op + class NGRAPH_DEPRECATED( + "FusedOp approach was deprecated! " + "Please use inheritance from usual Op instead of FusedOp") NGRAPH_API FusedOp + : public Op { public: // Fused op decomposition can be performed in the presence of diff --git a/ngraph/core/include/ngraph/ops.hpp b/ngraph/core/include/ngraph/ops.hpp index 46a4bea..40bc285 100644 --- a/ngraph/core/include/ngraph/ops.hpp +++ b/ngraph/core/include/ngraph/ops.hpp @@ -36,6 +36,7 @@ #include "ngraph/op/broadcast.hpp" #include "ngraph/op/bucketize.hpp" #include "ngraph/op/ceiling.hpp" +#include "ngraph/op/clamp.hpp" #include "ngraph/op/concat.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/convert.hpp" @@ -48,6 +49,7 @@ #include "ngraph/op/cum_sum.hpp" #include "ngraph/op/deformable_convolution.hpp" #include "ngraph/op/deformable_psroi_pooling.hpp" +#include "ngraph/op/depth_to_space.hpp" #include "ngraph/op/dequantize.hpp" #include "ngraph/op/detection_output.hpp" #include "ngraph/op/divide.hpp" @@ -60,50 +62,40 @@ #include "ngraph/op/erf.hpp" #include "ngraph/op/exp.hpp" #include "ngraph/op/extractimagepatches.hpp" +#include "ngraph/op/fake_quantize.hpp" #include "ngraph/op/floor.hpp" #include "ngraph/op/floor_mod.hpp" -#include "ngraph/op/fused/clamp.hpp" -#include "ngraph/op/fused/depth_to_space.hpp" -#include "ngraph/op/fused/fake_quantize.hpp" -#include "ngraph/op/fused/gelu.hpp" -#include "ngraph/op/fused/grn.hpp" -#include "ngraph/op/fused/gru_cell.hpp" -#include "ngraph/op/fused/hard_sigmoid.hpp" -#include "ngraph/op/fused/lstm_cell.hpp" -#include "ngraph/op/fused/lstm_sequence.hpp" -#include "ngraph/op/fused/matmul.hpp" -#include "ngraph/op/fused/mod.hpp" -#include "ngraph/op/fused/mvn.hpp" -#include "ngraph/op/fused/normalize_l2.hpp" -#include "ngraph/op/fused/prelu.hpp" -#include "ngraph/op/fused/rnn_cell.hpp" -#include "ngraph/op/fused/selu.hpp" -#include "ngraph/op/fused/shuffle_channels.hpp" -#include "ngraph/op/fused/space_to_depth.hpp" -#include "ngraph/op/fused/squared_difference.hpp" -#include "ngraph/op/fused/squeeze.hpp" -#include "ngraph/op/fused/unsqueeze.hpp" #include "ngraph/op/gather.hpp" #include "ngraph/op/gather_nd.hpp" #include "ngraph/op/gather_tree.hpp" +#include "ngraph/op/gelu.hpp" #include "ngraph/op/greater.hpp" #include "ngraph/op/greater_eq.hpp" +#include "ngraph/op/grn.hpp" #include "ngraph/op/group_conv.hpp" +#include "ngraph/op/gru_cell.hpp" +#include "ngraph/op/hard_sigmoid.hpp" #include "ngraph/op/interpolate.hpp" #include "ngraph/op/less.hpp" #include "ngraph/op/less_eq.hpp" #include "ngraph/op/log.hpp" #include "ngraph/op/lrn.hpp" +#include "ngraph/op/lstm_cell.hpp" +#include "ngraph/op/lstm_sequence.hpp" +#include "ngraph/op/matmul.hpp" #include "ngraph/op/max.hpp" #include "ngraph/op/max_pool.hpp" #include "ngraph/op/maximum.hpp" #include "ngraph/op/min.hpp" #include "ngraph/op/minimum.hpp" #include "ngraph/op/mish.hpp" +#include "ngraph/op/mod.hpp" #include "ngraph/op/multiply.hpp" +#include "ngraph/op/mvn.hpp" #include "ngraph/op/negative.hpp" #include "ngraph/op/non_max_suppression.hpp" #include "ngraph/op/non_zero.hpp" +#include "ngraph/op/normalize_l2.hpp" #include "ngraph/op/not.hpp" #include "ngraph/op/not_equal.hpp" #include "ngraph/op/one_hot.hpp" @@ -112,6 +104,7 @@ #include "ngraph/op/parameter.hpp" #include "ngraph/op/passthrough.hpp" #include "ngraph/op/power.hpp" +#include "ngraph/op/prelu.hpp" #include "ngraph/op/prior_box.hpp" #include "ngraph/op/prior_box_clustered.hpp" #include "ngraph/op/product.hpp" @@ -135,6 +128,7 @@ #include "ngraph/op/result.hpp" #include "ngraph/op/reverse.hpp" #include "ngraph/op/reverse_sequence.hpp" +#include "ngraph/op/rnn_cell.hpp" #include "ngraph/op/roi_align.hpp" #include "ngraph/op/roi_pooling.hpp" #include "ngraph/op/round.hpp" @@ -142,7 +136,9 @@ #include "ngraph/op/scatter_nd_update.hpp" #include "ngraph/op/scatter_update.hpp" #include "ngraph/op/select.hpp" +#include "ngraph/op/selu.hpp" #include "ngraph/op/shape_of.hpp" +#include "ngraph/op/shuffle_channels.hpp" #include "ngraph/op/sigmoid.hpp" #include "ngraph/op/sign.hpp" #include "ngraph/op/sin.hpp" @@ -150,8 +146,11 @@ #include "ngraph/op/slice.hpp" #include "ngraph/op/softmax.hpp" #include "ngraph/op/space_to_batch.hpp" +#include "ngraph/op/space_to_depth.hpp" #include "ngraph/op/split.hpp" #include "ngraph/op/sqrt.hpp" +#include "ngraph/op/squared_difference.hpp" +#include "ngraph/op/squeeze.hpp" #include "ngraph/op/stop_gradient.hpp" #include "ngraph/op/strided_slice.hpp" #include "ngraph/op/subtract.hpp" @@ -163,6 +162,7 @@ #include "ngraph/op/tile.hpp" #include "ngraph/op/topk.hpp" #include "ngraph/op/transpose.hpp" +#include "ngraph/op/unsqueeze.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/op_types.hpp" #include "ngraph/op/variadic_split.hpp" diff --git a/ngraph/core/src/builder/reshape.cpp b/ngraph/core/src/builder/reshape.cpp index 26ce7bf..99995e2 100644 --- a/ngraph/core/src/builder/reshape.cpp +++ b/ngraph/core/src/builder/reshape.cpp @@ -23,11 +23,11 @@ #include "ngraph/builder/reshape.hpp" #include "ngraph/op/concat.hpp" #include "ngraph/op/constant.hpp" -#include "ngraph/op/fused/squeeze.hpp" #include "ngraph/op/product.hpp" #include "ngraph/op/reduce_prod.hpp" #include "ngraph/op/reshape.hpp" #include "ngraph/op/shape_of.hpp" +#include "ngraph/op/squeeze.hpp" #include "ngraph/op/transpose.hpp" #include "ngraph/op/variadic_split.hpp" #include "ngraph/opsets/opset1.hpp" diff --git a/ngraph/core/src/op/fused/clamp.cpp b/ngraph/core/src/op/clamp.cpp similarity index 99% rename from ngraph/core/src/op/fused/clamp.cpp rename to ngraph/core/src/op/clamp.cpp index d5d0571..154f7ce 100644 --- a/ngraph/core/src/op/fused/clamp.cpp +++ b/ngraph/core/src/op/clamp.cpp @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. //***************************************************************************** -#include "ngraph/op/fused/clamp.hpp" +#include "ngraph/op/clamp.hpp" #include "ngraph/builder/make_constant.hpp" #include "ngraph/itt.hpp" @@ -25,6 +25,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::Clamp::type_info; namespace diff --git a/ngraph/core/src/op/fused/depth_to_space.cpp b/ngraph/core/src/op/depth_to_space.cpp similarity index 99% rename from ngraph/core/src/op/fused/depth_to_space.cpp rename to ngraph/core/src/op/depth_to_space.cpp index e4eb0be..3b31d40 100644 --- a/ngraph/core/src/op/fused/depth_to_space.cpp +++ b/ngraph/core/src/op/depth_to_space.cpp @@ -25,6 +25,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::DepthToSpace::type_info; op::DepthToSpace::DepthToSpace(const Output& data, diff --git a/ngraph/core/src/op/fused/fake_quantize.cpp b/ngraph/core/src/op/fake_quantize.cpp similarity index 99% rename from ngraph/core/src/op/fused/fake_quantize.cpp rename to ngraph/core/src/op/fake_quantize.cpp index 3e3542a..5646920 100644 --- a/ngraph/core/src/op/fused/fake_quantize.cpp +++ b/ngraph/core/src/op/fake_quantize.cpp @@ -37,6 +37,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + NGRAPH_RTTI_DEFINITION(op::FakeQuantize, "FakeQuantize", 0); op::FakeQuantize::FakeQuantize(const Output& data, diff --git a/ngraph/core/src/op/fused/stack.cpp b/ngraph/core/src/op/fused/stack.cpp deleted file mode 100644 index b5f2caa..0000000 --- a/ngraph/core/src/op/fused/stack.cpp +++ /dev/null @@ -1,95 +0,0 @@ -//***************************************************************************** -// Copyright 2017-2020 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** -#include -#include - -#include "matmul.hpp" -#include "ngraph/builder/reshape.hpp" -#include "ngraph/op/concat.hpp" -#include "ngraph/op/fused/stack.hpp" -#include "ngraph/op/reshape.hpp" - -using namespace std; -using namespace ngraph; - -constexpr NodeTypeInfo op::Stack::type_info; - -op::Stack::Stack(const OutputVector& args, int64_t axis) - : FusedOp(OutputVector{args}) - , m_axis(axis) -{ - constructor_validate_and_infer_types(); -} - -op::Stack::Stack(const NodeVector& args, int64_t axis) - : Stack(as_output_vector(args), axis) -{ -} - -shared_ptr op::Stack::clone_with_new_inputs(const OutputVector& new_args) const -{ - return make_shared(new_args, m_axis); -} - -void op::Stack::pre_validate_and_infer_types() -{ - bool is_input_dynamic = false; - - for (size_t i = 0; i < get_input_size(); ++i) - { - if (get_input_partial_shape(i).is_dynamic()) - { - is_input_dynamic = true; - break; - } - } - - if (is_input_dynamic) - { - set_output_type(0, get_input_element_type(0), PartialShape::dynamic()); - } -} - -OutputVector op::Stack::decompose_op() const -{ - auto axis = get_axis(); - std::vector> args; - PartialShape inputs_shape_scheme{PartialShape::dynamic()}; - for (size_t i = 0; i < get_input_size(); ++i) - { - PartialShape this_input_shape = get_input_partial_shape(i); - NODE_VALIDATION_CHECK( - this, - PartialShape::merge_into(inputs_shape_scheme, this_input_shape), - "Argument shapes are inconsistent; they must have the same rank, and must have ", - "equal dimension everywhere except on the concatenation axis (axis ", - axis, - ")."); - } - - for (size_t i = 0; i < get_input_size(); ++i) - { - auto data = input_value(i); - auto data_shape = data.get_shape(); - axis = (axis < 0) ? axis + data_shape.size() + 1 : axis; - data_shape.insert(data_shape.begin() + axis, 1); - std::vector input_order(data_shape.size() - 1); - std::iota(std::begin(input_order), std::end(input_order), 0); - args.push_back(std::make_shared(data, AxisVector(input_order), data_shape)); - } - auto concat = std::make_shared(args, axis); - return {concat}; -} diff --git a/ngraph/core/src/op/fused/gelu.cpp b/ngraph/core/src/op/gelu.cpp similarity index 97% rename from ngraph/core/src/op/fused/gelu.cpp rename to ngraph/core/src/op/gelu.cpp index 9ef04d0..786f124 100644 --- a/ngraph/core/src/op/fused/gelu.cpp +++ b/ngraph/core/src/op/gelu.cpp @@ -21,7 +21,7 @@ #include "ngraph/op/divide.hpp" #include "ngraph/op/erf.hpp" #include "ngraph/op/exp.hpp" -#include "ngraph/op/fused/gelu.hpp" +#include "ngraph/op/gelu.hpp" #include "ngraph/op/multiply.hpp" #include "ngraph/op/negative.hpp" #include "ngraph/op/subtract.hpp" @@ -29,6 +29,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::Gelu::type_info; op::Gelu::Gelu(const Output& data) diff --git a/ngraph/core/src/op/fused/grn.cpp b/ngraph/core/src/op/grn.cpp similarity index 98% rename from ngraph/core/src/op/fused/grn.cpp rename to ngraph/core/src/op/grn.cpp index 27a2875..b176dfe 100644 --- a/ngraph/core/src/op/fused/grn.cpp +++ b/ngraph/core/src/op/grn.cpp @@ -29,6 +29,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::GRN::type_info; op::GRN::GRN(const Output& data, float bias) diff --git a/ngraph/core/src/op/group_conv.cpp b/ngraph/core/src/op/group_conv.cpp index 7ab5a56..68ef2ff 100644 --- a/ngraph/core/src/op/group_conv.cpp +++ b/ngraph/core/src/op/group_conv.cpp @@ -29,6 +29,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + //------------------------------------------------------------------------------ // v1::GroupConvolution //------------------------------------------------------------------------------ diff --git a/ngraph/core/src/op/fused/gru_cell.cpp b/ngraph/core/src/op/gru_cell.cpp similarity index 99% rename from ngraph/core/src/op/fused/gru_cell.cpp rename to ngraph/core/src/op/gru_cell.cpp index 3bad7cc..fb1a327 100644 --- a/ngraph/core/src/op/fused/gru_cell.cpp +++ b/ngraph/core/src/op/gru_cell.cpp @@ -21,13 +21,15 @@ #include "ngraph/builder/split.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/dot.hpp" -#include "ngraph/op/fused/gru_cell.hpp" +#include "ngraph/op/gru_cell.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::v3::GRUCell::type_info; op::v3::GRUCell::GRUCell() diff --git a/ngraph/core/src/op/fused/hard_sigmoid.cpp b/ngraph/core/src/op/hard_sigmoid.cpp similarity index 98% rename from ngraph/core/src/op/fused/hard_sigmoid.cpp rename to ngraph/core/src/op/hard_sigmoid.cpp index 4988f1c..89d3e30 100644 --- a/ngraph/core/src/op/fused/hard_sigmoid.cpp +++ b/ngraph/core/src/op/hard_sigmoid.cpp @@ -18,7 +18,7 @@ #include "ngraph/op/add.hpp" #include "ngraph/op/constant.hpp" -#include "ngraph/op/fused/hard_sigmoid.hpp" +#include "ngraph/op/hard_sigmoid.hpp" #include "ngraph/op/maximum.hpp" #include "ngraph/op/minimum.hpp" #include "ngraph/op/multiply.hpp" @@ -27,6 +27,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::HardSigmoid::type_info; op::HardSigmoid::HardSigmoid(const Output& data, diff --git a/ngraph/core/src/op/fused/lstm_cell.cpp b/ngraph/core/src/op/lstm_cell.cpp similarity index 99% rename from ngraph/core/src/op/fused/lstm_cell.cpp rename to ngraph/core/src/op/lstm_cell.cpp index f90de39..2e1fb3b 100644 --- a/ngraph/core/src/op/fused/lstm_cell.cpp +++ b/ngraph/core/src/op/lstm_cell.cpp @@ -24,13 +24,15 @@ #include "ngraph/op/concat.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/dot.hpp" -#include "ngraph/op/fused/lstm_cell.hpp" +#include "ngraph/op/lstm_cell.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::LSTMCell::type_info; op::LSTMCell::LSTMCell() diff --git a/ngraph/core/src/op/fused/lstm_sequence.cpp b/ngraph/core/src/op/lstm_sequence.cpp similarity index 99% rename from ngraph/core/src/op/fused/lstm_sequence.cpp rename to ngraph/core/src/op/lstm_sequence.cpp index 94daf30..34f7744 100644 --- a/ngraph/core/src/op/fused/lstm_sequence.cpp +++ b/ngraph/core/src/op/lstm_sequence.cpp @@ -14,7 +14,7 @@ // limitations under the License. //***************************************************************************** -#include "ngraph/op/fused/lstm_sequence.hpp" +#include "ngraph/op/lstm_sequence.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/builder/autobroadcast.hpp" diff --git a/ngraph/core/src/op/fused/matmul.cpp b/ngraph/core/src/op/matmul.cpp similarity index 99% rename from ngraph/core/src/op/fused/matmul.cpp rename to ngraph/core/src/op/matmul.cpp index 239231c..941b114 100644 --- a/ngraph/core/src/op/fused/matmul.cpp +++ b/ngraph/core/src/op/matmul.cpp @@ -27,6 +27,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + NGRAPH_RTTI_DEFINITION(op::MatMul, "MatMul", 0); op::MatMul::MatMul(const Output& A, diff --git a/ngraph/core/src/op/fused/mod.cpp b/ngraph/core/src/op/mod.cpp similarity index 97% rename from ngraph/core/src/op/fused/mod.cpp rename to ngraph/core/src/op/mod.cpp index 3593b5b..3028453 100644 --- a/ngraph/core/src/op/fused/mod.cpp +++ b/ngraph/core/src/op/mod.cpp @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. //***************************************************************************** -#include "ngraph/op/fused/mod.hpp" +#include "ngraph/op/mod.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/builder/make_constant.hpp" #include "ngraph/op/abs.hpp" @@ -26,6 +26,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::v1::Mod::type_info; op::v1::Mod::Mod(const Output& A, diff --git a/ngraph/core/src/op/fused/mvn.cpp b/ngraph/core/src/op/mvn.cpp similarity index 99% rename from ngraph/core/src/op/fused/mvn.cpp rename to ngraph/core/src/op/mvn.cpp index dc327a9..27c5914 100644 --- a/ngraph/core/src/op/fused/mvn.cpp +++ b/ngraph/core/src/op/mvn.cpp @@ -27,6 +27,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::MVN::type_info; op::MVN::MVN(const Output& data, bool across_channels, bool normalize_variance, double eps) diff --git a/ngraph/core/src/op/fused/normalize_l2.cpp b/ngraph/core/src/op/normalize_l2.cpp similarity index 98% rename from ngraph/core/src/op/fused/normalize_l2.cpp rename to ngraph/core/src/op/normalize_l2.cpp index 8e5b3a7..9951049 100644 --- a/ngraph/core/src/op/fused/normalize_l2.cpp +++ b/ngraph/core/src/op/normalize_l2.cpp @@ -21,13 +21,15 @@ #include "ngraph/builder/reshape.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/divide.hpp" -#include "ngraph/op/fused/normalize_l2.hpp" #include "ngraph/op/multiply.hpp" +#include "ngraph/op/normalize_l2.hpp" #include "ngraph/op/util/op_types.hpp" using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::NormalizeL2::type_info; op::NormalizeL2::NormalizeL2(const Output& data, diff --git a/ngraph/core/src/op/fused/prelu.cpp b/ngraph/core/src/op/prelu.cpp similarity index 98% rename from ngraph/core/src/op/fused/prelu.cpp rename to ngraph/core/src/op/prelu.cpp index 979a5e7..4b142c2 100644 --- a/ngraph/core/src/op/fused/prelu.cpp +++ b/ngraph/core/src/op/prelu.cpp @@ -14,7 +14,7 @@ // limitations under the License. //***************************************************************************** -#include "ngraph/op/fused/prelu.hpp" +#include "ngraph/op/prelu.hpp" #include #include "ngraph/itt.hpp" @@ -30,6 +30,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::PRelu::type_info; op::PRelu::PRelu(const Output& data, const Output& slope) diff --git a/ngraph/core/src/op/fused/rnn_cell.cpp b/ngraph/core/src/op/rnn_cell.cpp similarity index 99% rename from ngraph/core/src/op/fused/rnn_cell.cpp rename to ngraph/core/src/op/rnn_cell.cpp index 0579e95..65ab2ba 100644 --- a/ngraph/core/src/op/fused/rnn_cell.cpp +++ b/ngraph/core/src/op/rnn_cell.cpp @@ -22,13 +22,15 @@ #include "ngraph/op/add.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/dot.hpp" -#include "ngraph/op/fused/rnn_cell.hpp" +#include "ngraph/op/rnn_cell.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::RNNCell::type_info; op::RNNCell::RNNCell() diff --git a/ngraph/core/src/op/fused/selu.cpp b/ngraph/core/src/op/selu.cpp similarity index 97% rename from ngraph/core/src/op/fused/selu.cpp rename to ngraph/core/src/op/selu.cpp index 372282f..2b8eddc 100644 --- a/ngraph/core/src/op/fused/selu.cpp +++ b/ngraph/core/src/op/selu.cpp @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. //***************************************************************************** -#include "ngraph/op/fused/selu.hpp" +#include "ngraph/op/selu.hpp" #include "ngraph/op/add.hpp" #include "ngraph/op/constant.hpp" @@ -26,6 +26,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::v0::Selu::type_info; op::v0::Selu::Selu(const Output& data, const Output& alpha, const Output& lambda) diff --git a/ngraph/core/src/op/fused/shuffle_channels.cpp b/ngraph/core/src/op/shuffle_channels.cpp similarity index 98% rename from ngraph/core/src/op/fused/shuffle_channels.cpp rename to ngraph/core/src/op/shuffle_channels.cpp index ade7c81..9b9a23c 100644 --- a/ngraph/core/src/op/fused/shuffle_channels.cpp +++ b/ngraph/core/src/op/shuffle_channels.cpp @@ -14,13 +14,15 @@ // limitations under the License. //***************************************************************************** -#include "ngraph/op/fused/shuffle_channels.hpp" +#include "ngraph/op/shuffle_channels.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/builder/reshape.hpp" using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::ShuffleChannels::type_info; op::ShuffleChannels::ShuffleChannels(const Output& data, diff --git a/ngraph/core/src/op/fused/space_to_depth.cpp b/ngraph/core/src/op/space_to_depth.cpp similarity index 99% rename from ngraph/core/src/op/fused/space_to_depth.cpp rename to ngraph/core/src/op/space_to_depth.cpp index 4a3f9ab..26a0736 100644 --- a/ngraph/core/src/op/fused/space_to_depth.cpp +++ b/ngraph/core/src/op/space_to_depth.cpp @@ -25,6 +25,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::SpaceToDepth::type_info; op::SpaceToDepth::SpaceToDepth(const Output& data, diff --git a/ngraph/core/src/op/split.cpp b/ngraph/core/src/op/split.cpp index 05861a3..103707d 100644 --- a/ngraph/core/src/op/split.cpp +++ b/ngraph/core/src/op/split.cpp @@ -28,6 +28,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::v0::Split::type_info; op::v0::Split::Split(const Output& data, const Output& axis, const size_t num_split) diff --git a/ngraph/core/src/op/fused/squared_difference.cpp b/ngraph/core/src/op/squared_difference.cpp similarity index 96% rename from ngraph/core/src/op/fused/squared_difference.cpp rename to ngraph/core/src/op/squared_difference.cpp index afe70f3..0e9410e 100644 --- a/ngraph/core/src/op/fused/squared_difference.cpp +++ b/ngraph/core/src/op/squared_difference.cpp @@ -14,7 +14,7 @@ // limitations under the License. //***************************************************************************** -#include "ngraph/op/fused/squared_difference.hpp" +#include "ngraph/op/squared_difference.hpp" #include "ngraph/attribute_visitor.hpp" #include "ngraph/node.hpp" #include "ngraph/op/multiply.hpp" @@ -24,6 +24,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::SquaredDifference::type_info; op::SquaredDifference::SquaredDifference(const Output& x1, diff --git a/ngraph/core/src/op/fused/squeeze.cpp b/ngraph/core/src/op/squeeze.cpp similarity index 99% rename from ngraph/core/src/op/fused/squeeze.cpp rename to ngraph/core/src/op/squeeze.cpp index 9e8ba99..57b765a 100644 --- a/ngraph/core/src/op/fused/squeeze.cpp +++ b/ngraph/core/src/op/squeeze.cpp @@ -20,14 +20,16 @@ #include "ngraph/itt.hpp" #include "ngraph/op/constant.hpp" -#include "ngraph/op/fused/squeeze.hpp" #include "ngraph/op/reshape.hpp" +#include "ngraph/op/squeeze.hpp" #include "ngraph/runtime/reference/copy.hpp" #include "ngraph/validation_util.hpp" using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::Squeeze::type_info; op::Squeeze::Squeeze(const Output& data, const Output& axes) diff --git a/ngraph/core/src/op/tensor_iterator.cpp b/ngraph/core/src/op/tensor_iterator.cpp index 03a5d9b..e411df3 100644 --- a/ngraph/core/src/op/tensor_iterator.cpp +++ b/ngraph/core/src/op/tensor_iterator.cpp @@ -22,6 +22,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::v0::TensorIterator::type_info; constexpr DiscreteTypeInfo op::v0::TensorIterator::SliceInputDescription::type_info; diff --git a/ngraph/core/src/op/fused/unsqueeze.cpp b/ngraph/core/src/op/unsqueeze.cpp similarity index 98% rename from ngraph/core/src/op/fused/unsqueeze.cpp rename to ngraph/core/src/op/unsqueeze.cpp index 4711f37..892fac2 100644 --- a/ngraph/core/src/op/fused/unsqueeze.cpp +++ b/ngraph/core/src/op/unsqueeze.cpp @@ -19,8 +19,8 @@ #include "ngraph/itt.hpp" #include "ngraph/op/constant.hpp" -#include "ngraph/op/fused/unsqueeze.hpp" #include "ngraph/op/reshape.hpp" +#include "ngraph/op/unsqueeze.hpp" #include "ngraph/op/util/op_types.hpp" #include "ngraph/runtime/reference/copy.hpp" #include "ngraph/validation_util.hpp" @@ -28,6 +28,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + constexpr NodeTypeInfo op::Unsqueeze::type_info; op::Unsqueeze::Unsqueeze(const Output& data, const Output& axes) diff --git a/ngraph/core/src/op/util/activation_functions.cpp b/ngraph/core/src/op/util/activation_functions.cpp index 495ad73..bb88f10 100644 --- a/ngraph/core/src/op/util/activation_functions.cpp +++ b/ngraph/core/src/op/util/activation_functions.cpp @@ -21,7 +21,7 @@ #include "activation_functions.hpp" #include "ngraph/op/constant.hpp" -#include "ngraph/op/fused/hard_sigmoid.hpp" +#include "ngraph/op/hard_sigmoid.hpp" #include "ngraph/op/relu.hpp" #include "ngraph/op/sigmoid.hpp" #include "ngraph/op/tanh.hpp" diff --git a/ngraph/core/src/op/util/fused_op.cpp b/ngraph/core/src/op/util/fused_op.cpp index e3bdb2a..349091d 100644 --- a/ngraph/core/src/op/util/fused_op.cpp +++ b/ngraph/core/src/op/util/fused_op.cpp @@ -20,6 +20,8 @@ using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + op::util::FusedOp::FusedOp() : Op() { diff --git a/ngraph/core/src/op/util/op_types.cpp b/ngraph/core/src/op/util/op_types.cpp index 8a159e6..5a3d219 100644 --- a/ngraph/core/src/op/util/op_types.cpp +++ b/ngraph/core/src/op/util/op_types.cpp @@ -65,7 +65,9 @@ bool ngraph::op::supports_auto_broadcast(const ngraph::Node* node) bool ngraph::op::supports_decompose(const ngraph::Node* node) { + NGRAPH_SUPPRESS_DEPRECATED_START return dynamic_cast(node) != nullptr; + NGRAPH_SUPPRESS_DEPRECATED_END } bool ngraph::op::is_op(const ngraph::Node* node) diff --git a/ngraph/core/src/op/util/rnn_cell_base.cpp b/ngraph/core/src/op/util/rnn_cell_base.cpp index 43fed86..c149251 100644 --- a/ngraph/core/src/op/util/rnn_cell_base.cpp +++ b/ngraph/core/src/op/util/rnn_cell_base.cpp @@ -20,7 +20,7 @@ #include "ngraph/attribute_visitor.hpp" #include "ngraph/op/add.hpp" -#include "ngraph/op/fused/clamp.hpp" +#include "ngraph/op/clamp.hpp" #include "ngraph/op/multiply.hpp" #include "ngraph/op/subtract.hpp" #include "ngraph/op/util/rnn_cell_base.hpp" diff --git a/ngraph/core/src/pass/constant_folding_gather.cpp b/ngraph/core/src/pass/constant_folding_gather.cpp index 2d154ce..b30bd85 100644 --- a/ngraph/core/src/pass/constant_folding_gather.cpp +++ b/ngraph/core/src/pass/constant_folding_gather.cpp @@ -16,8 +16,8 @@ #include "constant_folding.hpp" #include "ngraph/op/concat.hpp" -#include "ngraph/op/fused/squeeze.hpp" #include "ngraph/op/gather.hpp" +#include "ngraph/op/squeeze.hpp" #include "ngraph/runtime/reference/gather.hpp" using namespace std; diff --git a/ngraph/core/src/pass/nop_elimination.cpp b/ngraph/core/src/pass/nop_elimination.cpp index 78d9fac..37d3a64 100644 --- a/ngraph/core/src/pass/nop_elimination.cpp +++ b/ngraph/core/src/pass/nop_elimination.cpp @@ -26,15 +26,15 @@ #include "ngraph/op/concat.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/convert.hpp" -#include "ngraph/op/fused/squeeze.hpp" -#include "ngraph/op/fused/unsqueeze.hpp" #include "ngraph/op/non_zero.hpp" #include "ngraph/op/pad.hpp" #include "ngraph/op/reshape.hpp" #include "ngraph/op/shape_of.hpp" #include "ngraph/op/slice.hpp" +#include "ngraph/op/squeeze.hpp" #include "ngraph/op/stop_gradient.hpp" #include "ngraph/op/sum.hpp" +#include "ngraph/op/unsqueeze.hpp" #include "ngraph/op/util/op_types.hpp" #include "ngraph/opsets/opset3.hpp" #include "ngraph/util.hpp" diff --git a/ngraph/core/src/validation_util.cpp b/ngraph/core/src/validation_util.cpp index 828841f..bb78025 100644 --- a/ngraph/core/src/validation_util.cpp +++ b/ngraph/core/src/validation_util.cpp @@ -19,10 +19,10 @@ #include "ngraph/evaluator.hpp" #include "ngraph/op/concat.hpp" #include "ngraph/op/convert.hpp" -#include "ngraph/op/fused/squeeze.hpp" -#include "ngraph/op/fused/unsqueeze.hpp" #include "ngraph/op/min.hpp" #include "ngraph/op/minimum.hpp" +#include "ngraph/op/squeeze.hpp" +#include "ngraph/op/unsqueeze.hpp" #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type_traits.hpp" diff --git a/ngraph/frontend/onnx_import/src/op/gemm.cpp b/ngraph/frontend/onnx_import/src/op/gemm.cpp index 05739bb..0243af2 100644 --- a/ngraph/frontend/onnx_import/src/op/gemm.cpp +++ b/ngraph/frontend/onnx_import/src/op/gemm.cpp @@ -20,7 +20,7 @@ #include "ngraph/builder/reshape.hpp" #include "ngraph/op/add.hpp" #include "ngraph/op/constant.hpp" -#include "ngraph/op/fused/matmul.hpp" +#include "ngraph/op/matmul.hpp" #include "ngraph/op/multiply.hpp" #include "onnx_import/default_opset.hpp" diff --git a/ngraph/frontend/onnx_import/src/op/loop.cpp b/ngraph/frontend/onnx_import/src/op/loop.cpp index 865b985..8c1ba80 100644 --- a/ngraph/frontend/onnx_import/src/op/loop.cpp +++ b/ngraph/frontend/onnx_import/src/op/loop.cpp @@ -26,6 +26,8 @@ #include "onnx_import/exceptions.hpp" #include "onnx_import/utils/reshape.hpp" +NGRAPH_SUPPRESS_DEPRECATED_START + namespace ngraph { namespace onnx_import diff --git a/ngraph/frontend/onnx_import/src/op/lstm.cpp b/ngraph/frontend/onnx_import/src/op/lstm.cpp index 8a78824..83a2d82 100644 --- a/ngraph/frontend/onnx_import/src/op/lstm.cpp +++ b/ngraph/frontend/onnx_import/src/op/lstm.cpp @@ -27,7 +27,7 @@ #include "ngraph/enum_names.hpp" #include "ngraph/op/add.hpp" #include "ngraph/op/constant.hpp" -#include "ngraph/op/fused/lstm_sequence.hpp" +#include "ngraph/op/lstm_sequence.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" diff --git a/ngraph/frontend/onnx_import/src/op/mean_variance_normalization.cpp b/ngraph/frontend/onnx_import/src/op/mean_variance_normalization.cpp index 57f7bf5..943bbdb 100644 --- a/ngraph/frontend/onnx_import/src/op/mean_variance_normalization.cpp +++ b/ngraph/frontend/onnx_import/src/op/mean_variance_normalization.cpp @@ -18,7 +18,7 @@ #include "mean_variance_normalization.hpp" #include "ngraph/axis_set.hpp" -#include "ngraph/op/fused/mvn.hpp" +#include "ngraph/op/mvn.hpp" #include "ngraph/validation_util.hpp" #include "onnx_import/default_opset.hpp" diff --git a/ngraph/frontend/onnx_import/src/op/mod.cpp b/ngraph/frontend/onnx_import/src/op/mod.cpp index ec37081..1b6fe4a 100644 --- a/ngraph/frontend/onnx_import/src/op/mod.cpp +++ b/ngraph/frontend/onnx_import/src/op/mod.cpp @@ -18,7 +18,7 @@ #include "mod.hpp" #include "ngraph/op/abs.hpp" -#include "ngraph/op/fused/mod.hpp" +#include "ngraph/op/mod.hpp" #include "ngraph/op/util/attr_types.hpp" #include "onnx_import/default_opset.hpp" #include "onnx_import/exceptions.hpp" diff --git a/ngraph/frontend/onnx_import/src/op/selu.cpp b/ngraph/frontend/onnx_import/src/op/selu.cpp index 98c2efc..76f27dc 100644 --- a/ngraph/frontend/onnx_import/src/op/selu.cpp +++ b/ngraph/frontend/onnx_import/src/op/selu.cpp @@ -18,7 +18,7 @@ #include #include "ngraph/op/constant.hpp" -#include "ngraph/op/fused/selu.hpp" +#include "ngraph/op/selu.hpp" #include "onnx_import/default_opset.hpp" #include "selu.hpp" diff --git a/ngraph/frontend/onnx_import/src/op/squeeze.cpp b/ngraph/frontend/onnx_import/src/op/squeeze.cpp index 63921e6..035f590 100644 --- a/ngraph/frontend/onnx_import/src/op/squeeze.cpp +++ b/ngraph/frontend/onnx_import/src/op/squeeze.cpp @@ -14,7 +14,7 @@ // limitations under the License. //***************************************************************************** -#include "ngraph/op/fused/squeeze.hpp" +#include "ngraph/op/squeeze.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/validation_util.hpp" #include "onnx_import/default_opset.hpp" diff --git a/ngraph/python/tests/__init__.py b/ngraph/python/tests/__init__.py index 37fbf8d..2853a4e 100644 --- a/ngraph/python/tests/__init__.py +++ b/ngraph/python/tests/__init__.py @@ -76,7 +76,7 @@ xfail_issue_36481 = xfail_test(reason="TypeError: _get_node_factory() takes from xfail_issue_36483 = xfail_test(reason="RuntimeError: Unsupported primitive of type: " "Ceiling name: Ceiling_22669") xfail_issue_36485 = xfail_test(reason="RuntimeError: Check 'm_group >= 1' failed at " - "/openvino/ngraph/src/ngraph/op/fused/shuffle_channels.cpp:77:") + "/openvino/ngraph/core/src/op/shuffle_channels.cpp:77:") xfail_issue_36486 = xfail_test(reason="RuntimeError: HardSigmoid operation should be converted " "to HardSigmoid_IE") xfail_issue_36487 = xfail_test(reason="Assertion error - mvn operator computation mismatch") diff --git a/ngraph/test/attributes.cpp b/ngraph/test/attributes.cpp index 08bc882..7d0afd7 100644 --- a/ngraph/test/attributes.cpp +++ b/ngraph/test/attributes.cpp @@ -28,6 +28,8 @@ using namespace ngraph; using ngraph::test::NodeBuilder; using ngraph::test::ValueMap; +NGRAPH_SUPPRESS_DEPRECATED_START + TEST(attributes, value_map) { ValueMap value_map; diff --git a/ngraph/test/eval.cpp b/ngraph/test/eval.cpp index f9c7a6b..bd67c6e 100644 --- a/ngraph/test/eval.cpp +++ b/ngraph/test/eval.cpp @@ -39,8 +39,6 @@ #include "ngraph/op/erf.hpp" #include "ngraph/op/exp.hpp" #include "ngraph/op/floor.hpp" -#include "ngraph/op/fused/squeeze.hpp" -#include "ngraph/op/fused/unsqueeze.hpp" #include "ngraph/op/gather.hpp" #include "ngraph/op/log.hpp" #include "ngraph/op/max_pool.hpp" @@ -62,11 +60,13 @@ #include "ngraph/op/sin.hpp" #include "ngraph/op/sinh.hpp" #include "ngraph/op/sqrt.hpp" +#include "ngraph/op/squeeze.hpp" #include "ngraph/op/stop_gradient.hpp" #include "ngraph/op/tan.hpp" #include "ngraph/op/tanh.hpp" #include "ngraph/op/topk.hpp" #include "ngraph/op/transpose.hpp" +#include "ngraph/op/unsqueeze.hpp" #include "ngraph/runtime/host_tensor.hpp" #include "ngraph/validation_util.hpp" #include "util/all_close_f.hpp" diff --git a/ngraph/test/op_eval/matmul.cpp b/ngraph/test/op_eval/matmul.cpp index 3cd60db..49cabb9 100644 --- a/ngraph/test/op_eval/matmul.cpp +++ b/ngraph/test/op_eval/matmul.cpp @@ -19,7 +19,7 @@ #include "gtest/gtest.h" -#include "ngraph/op/fused/matmul.hpp" +#include "ngraph/op/matmul.hpp" #include "ngraph/runtime/host_tensor.hpp" #include "util/all_close_f.hpp" diff --git a/ngraph/test/op_is.cpp b/ngraph/test/op_is.cpp index 2b199ec..bf35547 100644 --- a/ngraph/test/op_is.cpp +++ b/ngraph/test/op_is.cpp @@ -25,6 +25,8 @@ using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + namespace { void op_is_Abs() diff --git a/ngraph/test/opset1.cpp b/ngraph/test/opset1.cpp index 0393c11..eb9bade 100644 --- a/ngraph/test/opset1.cpp +++ b/ngraph/test/opset1.cpp @@ -28,6 +28,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + namespace { string capitulate(string name) diff --git a/ngraph/test/runtime/op/group_conv.cpp b/ngraph/test/runtime/op/group_conv.cpp index cd39ce5..cd14a8c 100644 --- a/ngraph/test/runtime/op/group_conv.cpp +++ b/ngraph/test/runtime/op/group_conv.cpp @@ -30,6 +30,8 @@ using namespace std; using namespace ngraph; +NGRAPH_SUPPRESS_DEPRECATED_START + //------------------------------------------------------------------------------ // v0::GroupConvolution //------------------------------------------------------------------------------ diff --git a/ngraph/test/runtime/op/group_conv.hpp b/ngraph/test/runtime/op/group_conv.hpp index 6226f10..bc6cb33 100644 --- a/ngraph/test/runtime/op/group_conv.hpp +++ b/ngraph/test/runtime/op/group_conv.hpp @@ -22,6 +22,8 @@ #include "ngraph/op/util/attr_types.hpp" #include "ngraph/op/util/fused_op.hpp" +NGRAPH_SUPPRESS_DEPRECATED_START + namespace ngraph { namespace op @@ -136,3 +138,5 @@ namespace ngraph } } // namespace op } // namespace ngraph + +NGRAPH_SUPPRESS_DEPRECATED_END -- 2.7.4