From 41da44ec0716117f80f6d4653a18160dfb069c6c Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 31 Jul 2020 13:00:28 +0300 Subject: [PATCH] Removed v0 convolution and group convolution (#1512) --- ngraph/src/ngraph/builder/split.hpp | 1 + ngraph/src/ngraph/op/convolution.cpp | 639 ---------------- ngraph/src/ngraph/op/convolution.hpp | 311 -------- ngraph/src/ngraph/op/group_conv.cpp | 302 -------- ngraph/src/ngraph/op/group_conv.hpp | 114 +-- ngraph/src/ngraph/op/op_version_tbl.hpp | 4 - ngraph/src/ngraph/serializer.cpp | 172 ----- ngraph/src/ngraph/validation_util.hpp | 2 + ngraph/test/CMakeLists.txt | 1 - ngraph/test/backend/convolution.in.cpp | 57 +- ngraph/test/backend/fused_op.in.cpp | 143 ++-- ngraph/test/backend/group_convolution.in.cpp | 3 +- ngraph/test/build_graph.cpp | 17 - ngraph/test/op_is.cpp | 10 +- ngraph/test/opset_pass/convolution_opset_pass.cpp | 2 + ngraph/test/runtime/CMakeLists.txt | 4 + ngraph/test/runtime/interpreter/int_executable.hpp | 8 +- ngraph/test/runtime/op/convolution.cpp | 356 +++++++++ ngraph/test/runtime/op/convolution.hpp | 314 ++++++++ ngraph/test/runtime/op/group_conv.cpp | 333 ++++++++ ngraph/test/runtime/op/group_conv.hpp | 138 ++++ ngraph/test/runtime/opset0.hpp | 2 + ngraph/test/runtime/opset0_downgrade.cpp | 18 +- ngraph/test/runtime/opset0_tbl.hpp | 8 +- ngraph/test/runtime/opset1_upgrade.cpp | 6 +- ngraph/test/type_prop/convolution.cpp | 834 +++++++++++---------- ngraph/test/type_prop/group_convolution.cpp | 107 --- ngraph/test/type_prop_benchmark.cpp | 75 -- 28 files changed, 1703 insertions(+), 2278 deletions(-) create mode 100644 ngraph/test/runtime/op/convolution.cpp create mode 100644 ngraph/test/runtime/op/convolution.hpp create mode 100644 ngraph/test/runtime/op/group_conv.cpp create mode 100644 ngraph/test/runtime/op/group_conv.hpp delete mode 100644 ngraph/test/type_prop_benchmark.cpp diff --git a/ngraph/src/ngraph/builder/split.hpp b/ngraph/src/ngraph/builder/split.hpp index 08ae980..e2bce38 100644 --- a/ngraph/src/ngraph/builder/split.hpp +++ b/ngraph/src/ngraph/builder/split.hpp @@ -49,6 +49,7 @@ namespace ngraph /// /// \return The vector containing multiple outputs we split input node into. /// + NGRAPH_API OutputVector split(const Output& value, size_t split_parts, int axis = 0); namespace opset1 diff --git a/ngraph/src/ngraph/op/convolution.cpp b/ngraph/src/ngraph/op/convolution.cpp index afd6670..2df1851 100644 --- a/ngraph/src/ngraph/op/convolution.cpp +++ b/ngraph/src/ngraph/op/convolution.cpp @@ -458,642 +458,3 @@ shared_ptr m_output_padding); } } - -// *** Convolution OP SET 0 *** -constexpr NodeTypeInfo op::v0::Convolution::type_info; - -op::v0::Convolution::Convolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const Strides& data_dilation_strides, - const PadType& pad_type) - : Op({data_batch, filters}) - , m_window_movement_strides(window_movement_strides) - , m_window_dilation_strides(window_dilation_strides) - , m_padding_below(padding_below) - , m_padding_above(padding_above) - , m_data_dilation_strides(data_dilation_strides) - , m_pad_type(pad_type) -{ - constructor_validate_and_infer_types(); -} - -bool op::v0::Convolution::visit_attributes(AttributeVisitor& visitor) -{ - visitor.on_attribute("window_movement_strides", m_window_movement_strides); - visitor.on_attribute("window_dilation_strides", m_window_dilation_strides); - visitor.on_attribute("data_dilation_strides", m_data_dilation_strides); - visitor.on_attribute("padding_below", m_padding_below); - visitor.on_attribute("padding_above", m_padding_above); - visitor.on_attribute("pad_type", m_pad_type); - return true; -} - -void op::v0::Convolution::validate_and_infer_types() -{ - const PartialShape& data_batch_shape = get_input_partial_shape(0); - element::Type data_batch_et = get_input_element_type(0); - const PartialShape& filters_shape = get_input_partial_shape(1); - element::Type filters_et = get_input_element_type(1); - - if (m_data_dilation_strides.size() == 0) - { - m_data_dilation_strides = conv_default_strides(this, data_batch_shape, filters_shape); - } - - if (m_window_movement_strides.size() == 0) - { - m_window_movement_strides = conv_default_strides(this, data_batch_shape, filters_shape); - } - - if (m_window_dilation_strides.size() == 0) - { - m_window_dilation_strides = conv_default_strides(this, data_batch_shape, filters_shape); - } - - if (m_padding_below.size() == 0) - { - m_padding_below = conv_default_padding(this, data_batch_shape, filters_shape); - } - - if (m_padding_above.size() == 0) - { - m_padding_above = conv_default_padding(this, data_batch_shape, filters_shape); - } - - if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER) - { - if (data_batch_shape.is_static() && filters_shape.is_static()) - { - // TODO: data dilation - m_padding_below.clear(); - m_padding_above.clear(); - auto filter_shape = filters_shape.to_shape(); - filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I} - infer_auto_padding(data_batch_shape.to_shape(), - filter_shape, - m_window_movement_strides, - m_window_dilation_strides, - m_pad_type, - m_padding_above, - m_padding_below); - } - } - - element::Type result_et; - PartialShape result_shape; - - NODE_VALIDATION_CHECK( - this, - element::Type::merge(result_et, data_batch_et, filters_et), - "Element types for data batch and filters do not match (data batch element type: ", - data_batch_et, - ", filters element type: ", - filters_et, - ")."); - - result_shape = infer_convolution_forward(this, - data_batch_shape, - m_data_dilation_strides, - m_padding_below, - m_padding_above, - filters_shape, - m_window_movement_strides, - m_window_dilation_strides); - - set_output_type(0, result_et, result_shape); -} - -op::v0::Convolution::Convolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above) - : Convolution(data_batch, - filters, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - Strides()) -{ -} - -op::v0::Convolution::Convolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides) - : Convolution(data_batch, - filters, - window_movement_strides, - window_dilation_strides, - CoordinateDiff(), - CoordinateDiff()) -{ -} - -op::v0::Convolution::Convolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides) - : Convolution(data_batch, - filters, - window_movement_strides, - Strides(), - CoordinateDiff(), - CoordinateDiff()) -{ -} - -op::v0::Convolution::Convolution(const Output& data_batch, const Output& filters) - : Convolution(data_batch, filters, Strides(), Strides(), CoordinateDiff(), CoordinateDiff()) -{ -} - -shared_ptr op::v0::Convolution::clone_with_new_inputs(const OutputVector& new_args) const -{ - check_new_args_count(this, new_args); - return make_shared(new_args.at(0), - new_args.at(1), - m_window_movement_strides, - m_window_dilation_strides, - m_padding_below, - m_padding_above, - m_data_dilation_strides, - m_pad_type); -} - -constexpr NodeTypeInfo op::v0::ConvolutionBackpropData::type_info; -shared_ptr op::v0::Convolution::get_default_value() const -{ - return ngraph::make_constant_from_string("0", get_element_type(), get_shape()); -} - -op::v0::ConvolutionBackpropData::ConvolutionBackpropData( - const Shape& data_batch_shape, - const Output& filters, - const Output& output_delta, - const Strides& window_movement_strides_forward, - const Strides& window_dilation_strides_forward, - const CoordinateDiff& padding_below_forward, - const CoordinateDiff& padding_above_forward, - const Strides& data_dilation_strides_forward) - : Op({filters, output_delta}) - , m_data_batch_shape(data_batch_shape) - , m_window_movement_strides_forward(window_movement_strides_forward) - , m_window_dilation_strides_forward(window_dilation_strides_forward) - , m_padding_below_forward(padding_below_forward) - , m_padding_above_forward(padding_above_forward) - , m_data_dilation_strides_forward(data_dilation_strides_forward) -{ - constructor_validate_and_infer_types(); -} - -bool op::v0::ConvolutionBackpropData::visit_attributes(AttributeVisitor& visitor) -{ - visitor.on_attribute("data_batch_shape", m_data_batch_shape); - visitor.on_attribute("window_movement_strides_forward", m_window_movement_strides_forward); - visitor.on_attribute("window_dilation_strides_forward", m_window_dilation_strides_forward); - visitor.on_attribute("padding_below_forward", m_padding_below_forward); - visitor.on_attribute("padding_above_forward", m_padding_above_forward); - visitor.on_attribute("data_dilation_strides_forward", m_data_dilation_strides_forward); - return true; -} - -void op::v0::ConvolutionBackpropData::validate_and_infer_types() -{ - // Backprop to data is itself convolution, with inputs/outputs/attributes transmogrified as - // follows. - // - // Forward Backward - // "N" axis for data batch 0 0 - // "C" axis for data batch 1 1 - // "Co" axis for filters 0 0 - // "Ci" axis for filters 1 1 - // "N" axis for output 0 0 - // "C" axis for output 1 1 - // Data batch x delta - // Data batch shape S_x S_o - // Filters f reverse(f) [on spatial axes] - // Filters shape S_f S_f - // Window movement strides q_x p_x - // Window dilation strides p_f p_f - // Padding below a_x (S_f - 1)p_f - a_x - // Padding above b_x (S_f - 1)p_f + - // + ((a_x + (S_x - 1)p_x + b_x - (S_f - 1)p_f) - // % q_x) - // - b_x - // Data dilation strides p_x q_x - // Output shape S_o S_x - // - // To _validate_, we simply need to check/infer the output shape of the forward convolution, - // then check to make sure that the incoming delta has the same shape as the forward output. - const PartialShape& filters_shape = get_input_partial_shape(0); - element::Type filters_et = get_input_element_type(0); - const PartialShape& delta_shape = get_input_partial_shape(1); - element::Type delta_et = get_input_element_type(1); - - element::Type forward_result_et; - PartialShape forward_result_shape; - - NODE_VALIDATION_CHECK( - this, - element::Type::merge(forward_result_et, delta_et, filters_et), - "Element types for data batch and filters do not match (data batch element type: ", - delta_et, - ", filters element type: ", - filters_et, - ")."); - - forward_result_shape = infer_convolution_forward(this, - m_data_batch_shape, - m_data_dilation_strides_forward, - m_padding_below_forward, - m_padding_above_forward, - filters_shape, - m_window_movement_strides_forward, - m_window_dilation_strides_forward); - - NODE_VALIDATION_CHECK(this, - forward_result_shape.compatible(delta_shape), - "Inferred forward output shape (", - forward_result_shape, - ") does not match shape of ", - "delta (", - delta_shape, - ")."); - - set_output_type(0, forward_result_et, m_data_batch_shape); -} - -shared_ptr - op::v0::ConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const -{ - check_new_args_count(this, new_args); - return make_shared(m_data_batch_shape, - new_args.at(0), - new_args.at(1), - m_window_movement_strides_forward, - m_window_dilation_strides_forward, - m_padding_below_forward, - m_padding_above_forward, - m_data_dilation_strides_forward); -} - -CoordinateDiff op::v0::ConvolutionBackpropData::compute_backward_delta_out_pad_below() const -{ - auto& in_shape = get_data_batch_shape(); - auto& filter_dilation = get_window_dilation_strides_forward(); - auto& filter_shape = get_input_shape(0); - auto& in_pad_below = get_padding_below_forward(); - size_t spatial_dim_count = static_cast(in_shape.size()) - 2; - - CoordinateDiff backward_delta_out_pad_below; - backward_delta_out_pad_below.resize(spatial_dim_count); - - for (size_t i = 0; i < spatial_dim_count; i++) - { - backward_delta_out_pad_below[i] = - (static_cast(filter_shape[i + 2]) - 1) * filter_dilation[i] - - in_pad_below[i]; - } - return backward_delta_out_pad_below; -} - -CoordinateDiff op::v0::ConvolutionBackpropData::compute_backward_delta_out_pad_above() const -{ - auto& in_shape = get_data_batch_shape(); - auto& filter_dilation = get_window_dilation_strides_forward(); - auto& filter_shape = get_input_shape(0); - auto& in_pad_below = get_padding_below_forward(); - auto& in_pad_above = get_padding_above_forward(); - auto& in_dilation = get_data_dilation_strides_forward(); - auto& stride = get_window_movement_strides_forward(); - size_t spatial_dim_count = static_cast(in_shape.size()) - 2; - - CoordinateDiff backward_delta_out_pad_above; - backward_delta_out_pad_above.resize(spatial_dim_count); - - for (size_t i = 0; i < spatial_dim_count; i++) - { - backward_delta_out_pad_above[i] = - (static_cast(filter_shape[i + 2]) - 1) * filter_dilation[i] + - ((in_pad_below[i] + ((in_shape[i + 2]) - 1) * in_dilation[i] + in_pad_above[i] - - (static_cast(filter_shape[i + 2]) - 1) * filter_dilation[i]) % - stride[i]) - - in_pad_above[i]; - } - return backward_delta_out_pad_above; -} - -// -// This is a legacy function, retained because the CPU backend uses it for now. -// TODO(amprocte): Update CPU backend to use the new stuff in validation_util.hpp, and remove this -// function. -// -Shape op::util::infer_convolution_output_shape(const Node* node, - const Shape& data_batch_shape, - const Shape& filters_shape, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const Strides& data_dilation_strides, - size_t batch_axis_data, - size_t input_channel_axis_data, - size_t input_channel_axis_filters, - size_t output_channel_axis_filters, - size_t batch_axis_result, - size_t output_channel_axis_result) -{ - NODE_VALIDATION_CHECK(node, batch_axis_data <= 1, "(This is an internal nGraph error)"); - NODE_VALIDATION_CHECK(node, input_channel_axis_data <= 1, "(This is an internal nGraph error)"); - NODE_VALIDATION_CHECK( - node, input_channel_axis_filters <= 1, "(This is an internal nGraph error)"); - NODE_VALIDATION_CHECK( - node, output_channel_axis_filters <= 1, "(This is an internal nGraph error)"); - NODE_VALIDATION_CHECK(node, batch_axis_result <= 1, "(This is an internal nGraph error)"); - NODE_VALIDATION_CHECK( - node, output_channel_axis_result <= 1, "(This is an internal nGraph error)"); - - // - // Make sure data_batch: NCiDi for some Di of rank>0, N != 0, Ci != 0. - // - NODE_VALIDATION_CHECK(node, - data_batch_shape.size() >= 3, - "Data batch input must have rank of at least 3 (one batch axis, ", - "one input-channel axis, and at least one spatial dimension) ", - "(data batch shape: ", - data_batch_shape, - ")."); - - size_t batch_size = data_batch_shape[batch_axis_data]; - NODE_VALIDATION_CHECK(node, - batch_size != 0, - "Data batch size is zero (data batch shape: ", - data_batch_shape, - ", ", - "batch axis is axis ", - batch_axis_data, - ")."); - - size_t input_channel_count = data_batch_shape[input_channel_axis_data]; - NODE_VALIDATION_CHECK(node, - input_channel_count != 0, - "Input channel count is zero (data batch shape: ", - data_batch_shape, - ", ", - "channel axis is axis ", - input_channel_axis_data, - ")."); - - size_t spatial_dimension_count = data_batch_shape.size() - 2; - - // - // Make sure filters: CoCiWv for some Co>0, rank of W = rank of Di. - // - NODE_VALIDATION_CHECK( - node, - filters_shape.size() == 2 + spatial_dimension_count, - "Filter input must have rank equal to the data batch (one axis for output ", - "channels, one axis for input channels, and the same number of spatial ", - "dimensions as the data batch (filter input shape: ", - filters_shape, - ", ", - "data batch shape: ", - data_batch_shape, - ")."); - - size_t output_channel_count = filters_shape[output_channel_axis_filters]; - NODE_VALIDATION_CHECK(node, - output_channel_count != 0, - "Output channel count for filters is zero (filters shape: ", - filters_shape, - ", ", - "output channels on axis ", - output_channel_axis_filters, - ")."); - - NODE_VALIDATION_CHECK(node, - filters_shape[input_channel_axis_filters] == input_channel_count, - "Input channel count for filters (", - filters_shape[input_channel_axis_filters], - ") ", - "does not match the number of channels in the data batch (", - input_channel_count, - ") ", - "(filter input shape: ", - filters_shape, - ", filter input channels on axis ", - input_channel_axis_filters, - "; data batch shape: ", - data_batch_shape, - ", data batch channels on axis ", - batch_axis_data, - ")."); - - // - // Make sure window movement strides, window dilation strides, and data dilation strides - // have same rank as Di. - // - NODE_VALIDATION_CHECK( - node, - window_movement_strides.size() == spatial_dimension_count, - "Rank of window movement strides does not match the number of spatial dimensions (", - spatial_dimension_count, - ") in the data batch (window movement strides: ", - window_movement_strides, - ", data batch shape: ", - data_batch_shape, - ")."); - - NODE_VALIDATION_CHECK( - node, - window_dilation_strides.size() == spatial_dimension_count, - "Rank of window dilation strides does not match the number of spatial dimensions (", - spatial_dimension_count, - ") in the data batch (window dilation strides: ", - window_dilation_strides, - ", data batch shape: ", - data_batch_shape, - ")."); - - NODE_VALIDATION_CHECK( - node, - data_dilation_strides.size() == spatial_dimension_count, - "Rank of data dilation strides does not match the number of spatial dimensions (", - spatial_dimension_count, - ") in the data batch (data dilation strides: ", - data_dilation_strides, - ", data batch shape: ", - data_batch_shape, - ")."); - - // - // Make sure padding-below and padding-above shapes have same rank as Di. - // - NODE_VALIDATION_CHECK( - node, - padding_below.size() == spatial_dimension_count, - "Rank of the padding below does not match the number of spatial dimensions (", - spatial_dimension_count, - ") in the data batch (padding below: ", - padding_below, - ", data batch shape: ", - data_batch_shape, - ")."); - - NODE_VALIDATION_CHECK( - node, - padding_above.size() == spatial_dimension_count, - "Rank of the padding above does not match the number of spatial dimensions (", - spatial_dimension_count, - ") in the data batch (padding above: ", - padding_above, - ", data batch shape: ", - data_batch_shape, - ")."); - - // - // Extract input item shape Di and make sure all dimensions are larger than 0 after padding and - // dilation. - // - std::vector input_item_virtual_shape_signed; - - for (size_t i = 0; i < spatial_dimension_count; i++) - { - NODE_VALIDATION_CHECK(node, - data_dilation_strides[i] != 0, - "Data dilation stride at spatial dimension ", - i, - " is zero ", - "(data dilation strides: ", - data_dilation_strides, - ")."); - - size_t dim_size = data_batch_shape[1 + 1 + i]; - size_t dilated_dim_size = (dim_size - 1) * data_dilation_strides[i] + 1; - - ptrdiff_t padded_dilated_dim_size = padding_below[i] + dilated_dim_size + padding_above[i]; - - input_item_virtual_shape_signed.push_back(padded_dilated_dim_size); - } - - Shape input_item_virtual_shape; - - for (size_t i = 0; i < spatial_dimension_count; i++) - { - NODE_VALIDATION_CHECK(node, - input_item_virtual_shape_signed[i] > 0, - "Input dimension after padding and dilation is non-positive ", - "at spatial axis ", - i, - " (post-padding/dilation input item shape: ", - input_item_virtual_shape, - ", data batch shape: ", - data_batch_shape, - ", data dilation strides: ", - data_dilation_strides, - ", padding below: ", - padding_below, - ", padding above: ", - padding_above, - ")."); - - input_item_virtual_shape.push_back(size_t(input_item_virtual_shape_signed[i])); - } - - // - // Extract the physical shape Wp of the convolution window, *not* including dilation, from the - // filter dimensions. At the same time, make sure window shape dimensions are all larger than - // 0. - // - Shape window_physical_shape; - - for (size_t i = 0; i < spatial_dimension_count; i++) - { - window_physical_shape.push_back(filters_shape[1 + 1 + i]); - NODE_VALIDATION_CHECK(node, - window_physical_shape[i] != 0, - "Filters shape at spatial dimension ", - i, - " is zero ", - "(filters shape: ", - filters_shape, - ")."); - } - - // - // Compute virtual shape Wp of the convolution window, *including* dilation. At the same time, - // make sure all window dilation strides are larger than 0, and that the dilated filter fits - // within the spatial dimensions. - // - Shape window_virtual_shape; - - for (size_t i = 0; i < spatial_dimension_count; i++) - { - NODE_VALIDATION_CHECK(node, - window_dilation_strides[i] != 0, - "Window dilation stride at spatial dimension ", - i, - " is zero ", - "(window dilation strides: ", - window_dilation_strides, - ")."); - - window_virtual_shape.push_back((window_physical_shape[i] - 1) * window_dilation_strides[i] + - 1); - - NODE_VALIDATION_CHECK( - node, - window_virtual_shape[i] <= input_item_virtual_shape[i], - "Post-dilation window shape is smaller than the post-padding/dilation ", - "input item shape at spatial dimension ", - i, - " (post-padding/dilation ", - "input item shape: ", - input_item_virtual_shape, - ", data batch shape: ", - data_batch_shape, - ", data dilation strides: ", - data_dilation_strides, - ", padding below: ", - padding_below, - ", padding above: ", - padding_above, - ", post-dilation window shape: ", - window_virtual_shape, - ", filters shape: ", - filters_shape, - ", window dilation strides: ", - window_dilation_strides); - } - - // - // Construct result shape: NCoDo or CoNDo (depending on *_axis_result), checking at the same - // time that all window movement strides are larger than 0. - // - Shape result_shape(spatial_dimension_count + 2); - result_shape[batch_axis_result] = batch_size; - result_shape[output_channel_axis_result] = output_channel_count; - - for (size_t i = 0; i < spatial_dimension_count; i++) - { - NODE_VALIDATION_CHECK(node, - window_movement_strides[i] != 0, - "Window movement stride at spatial dimension ", - i, - " is zero ", - "(window movement strides: ", - window_movement_strides, - ")."); - - result_shape[i + 2] = ceil_div(input_item_virtual_shape[i] - window_virtual_shape[i] + 1, - window_movement_strides[i]); - } - - return result_shape; -} diff --git a/ngraph/src/ngraph/op/convolution.hpp b/ngraph/src/ngraph/op/convolution.hpp index 63e85f1..e476415 100644 --- a/ngraph/src/ngraph/op/convolution.hpp +++ b/ngraph/src/ngraph/op/convolution.hpp @@ -217,316 +217,5 @@ namespace ngraph CoordinateDiff m_output_padding; }; } // namespace v1 - - namespace v0 - { - /// \brief Batched convolution operation, with optional window dilation and stride. - /// - class NGRAPH_API Convolution : public Op - { - public: - static constexpr NodeTypeInfo type_info{"Convolution", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - /// \brief Constructs a batched convolution operation. - Convolution() = default; - /// \brief Constructs a batched convolution operation. - /// - /// \param data_batch The node producing the input data batch tensor.
- /// `[N, C_IN, D1, ... Df]` - /// \param filters The node producing the filters tensor.
- /// `[C_OUT, C_IN, F1, ... Ff]` - /// \param window_movement_strides The window movement strides.
- /// `[f]` - /// \param window_dilation_strides The window dilation strides.
- /// `[f]` - /// \param padding_below The padding-below sizes.
- /// `[f]` - /// \param padding_above The padding-above sizes.
- /// `[f]` - /// \param data_dilation_strides The data dilation strides.
- /// `[f]` - /// \param pad_type The pad type for automatically computing padding sizes.
- /// `[f]` - /// - /// Output `[N, C_OUT, R1, ... Rf]` - /// - Convolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const Strides& data_dilation_strides, - const PadType& pad_type = PadType::EXPLICIT); - - /// \brief Constructs a batched convolution operation with no data dilation (i.e., - /// all - /// data dilation strides are 1). - /// - /// \param data_batch The node producing the input data batch tensor.
- /// `[N, C_IN, D1, ... Df]` - /// \param filters The node producing the filters tensor.
- /// `[C_OUT, C_IN, F1, ... Ff]` - /// \param window_movement_strides The window movement strides.
- /// `[f]` - /// \param window_dilation_strides The window dilation strides.
- /// `[f]` - /// \param padding_below The padding-below sizes.
- /// `[f]` - /// \param padding_above The padding-above sizes.
- /// `[f]` - /// - /// Output `[N, C_OUT, R1, ... Rf]` - /// - Convolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above); - - /// \brief Constructs a batched convolution operation with no padding or data - /// dilation - /// (i.e., padding above and below are 0 everywhere, and all data dilation - /// strides are 1). - /// - /// \param data_batch The node producing the input data batch tensor.
- /// `[N, C_IN, D1, ... Df]` - /// \param filters The node producing the filters tensor.
- /// `[C_OUT, C_IN, F1, ... Ff]` - /// \param window_movement_strides The window movement strides.
- /// `[f]` - /// \param window_dilation_strides The window dilation strides.
- /// `[f]` - /// - /// Output `[N, C_OUT, R1, ... Rf]` - /// - Convolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides); - - /// \brief Constructs a batched convolution operation with no window dilation, - /// padding, - /// or data dilation (i.e., padding above and below are 0 everywhere, and all - /// window/data dilation strides are 1). - /// - /// \param data_batch The node producing the input data batch tensor.
- /// `[N, C_IN, D1, ... Df]` - /// \param filters The node producing the filters tensor.
- /// `[C_OUT, C_IN, F1, ... Ff]` - /// \param window_movement_strides The window movement strides.
- /// `[f]` - /// - /// Output `[N, C_OUT, R1, ... Rf]` - /// - Convolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides); - - /// \brief Constructs a batched convolution operation with no window dilation or - /// movement stride (i.e., padding above and below are 0 everywhere, and all - /// window/data dilation strides and window movement strides are 1). - /// - /// \param data_batch The node producing the input data batch tensor.
- /// `[N, C_IN, D1, ... Df]` - /// \param filters The node producing the filters tensor.
- /// `[C_OUT, C_IN, F1, ... Ff]` - /// - /// Output `[N, C_OUT, R1, ... Rf]` - /// - Convolution(const Output& data_batch, const Output& filters); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The window movement strides. - const Strides& get_window_movement_strides() const - { - return m_window_movement_strides; - } - void set_window_movement_strides(const Strides& window_movement_strides) - { - m_window_movement_strides = window_movement_strides; - } - /// \return The window dilation strides. - const Strides& get_window_dilation_strides() const - { - return m_window_dilation_strides; - } - void set_window_dilation_strides(const Strides& window_dilation_strides) - { - m_window_dilation_strides = window_dilation_strides; - } - /// \return The padding-below sizes (possibly negative). - const CoordinateDiff& get_padding_below() const { return m_padding_below; } - void set_padding_below(const CoordinateDiff& padding_below) - { - m_padding_below = padding_below; - } - /// \return The padding-above sizes (possibly negative). - const CoordinateDiff& get_padding_above() const { return m_padding_above; } - void set_adding_above(const CoordinateDiff& padding_above) - { - m_padding_above = padding_above; - } - /// \return The input data dilation strides. - const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; } - void set_data_dilation_strides(const Strides& data_dilation_strides) - { - m_data_dilation_strides = data_dilation_strides; - } - /// \return The pad type for convolution. - const PadType& get_pad_type() const { return m_pad_type; } - void set_pad_type(const PadType& pad_type) { m_pad_type = pad_type; } - /// \return The default value for Convolution. - virtual std::shared_ptr get_default_value() const override; - - protected: - Strides m_window_movement_strides; - Strides m_window_dilation_strides; - CoordinateDiff m_padding_below; - CoordinateDiff m_padding_above; - Strides m_data_dilation_strides; - PadType m_pad_type; - }; - - /// \brief Data batch backprop for batched convolution operation. - class NGRAPH_API ConvolutionBackpropData : public Op - { - public: - static constexpr NodeTypeInfo type_info{"ConvolutionBackpropData", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - /// \brief Constructs a batched-convolution data batch-backprop operation. - ConvolutionBackpropData() = default; - /// - /// \brief Constructs a batched-convolution data batch-backprop operation. - /// - /// \param data_batch_shape The shape of the data batch from - /// forward-prop. - /// \param filters The node producing the filters from - /// forward-prop. - /// \param data The node producing output delta. - /// \param window_movement_strides_forward The window movement strides from - /// forward-prop. - /// \param window_dilation_strides_forward The window dilation strides from - /// forward-prop. - /// \param padding_below_forward The padding-below sizes from - /// forward-prop. - /// \param padding_above_forward The padding-above sizes from - /// forward-prop. - /// \param data_dilation_strides_forward The data dilation strides from - /// forward-prop. - /// - ConvolutionBackpropData(const Shape& data_batch_shape, - const Output& filters, - const Output& data, - const Strides& window_movement_strides_forward, - const Strides& window_dilation_strides_forward, - const CoordinateDiff& padding_below_forward, - const CoordinateDiff& padding_above_forward, - const Strides& data_dilation_strides_forward); - - void validate_and_infer_types() override; - bool visit_attributes(AttributeVisitor& visitor) override; - - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - /// \return The data batch shape. - const Shape& get_data_batch_shape() const { return m_data_batch_shape; } - void set_data_batch_shape(const Shape& data_batch_shape) - { - m_data_batch_shape = data_batch_shape; - } - /// \return The window movement strides from the forward prop. - const Strides& get_window_movement_strides_forward() const - { - return m_window_movement_strides_forward; - } - void set_window_movement_strides_forward( - const Strides& window_movement_strides_forward) - { - m_window_movement_strides_forward = window_movement_strides_forward; - } - /// \return The window dilation strides from the forward prop. - const Strides& get_window_dilation_strides_forward() const - { - return m_window_dilation_strides_forward; - } - void set_window_dilation_strides_forward( - const Strides& window_dilation_strides_forward) - { - m_window_dilation_strides_forward = window_dilation_strides_forward; - } - /// \return The padding-below sizes (possibly negative) from the forward prop. - const CoordinateDiff& get_padding_below_forward() const - { - return m_padding_below_forward; - } - void set_padding_below_forward(const CoordinateDiff& padding_below_forward) - { - m_padding_below_forward = padding_below_forward; - } - /// \return The padding-above sizes (possibly negative) from the forward prop. - const CoordinateDiff& get_padding_above_forward() const - { - return m_padding_above_forward; - } - void set_padding_above_forward(const CoordinateDiff& padding_above_forward) - { - m_padding_above_forward = padding_above_forward; - } - /// \return The input data dilation strides from the forward prop. - const Strides& get_data_dilation_strides_forward() const - { - return m_data_dilation_strides_forward; - } - void set_data_dilation_strides_forward(const Strides& data_dilation_strides_forward) - { - m_data_dilation_strides_forward = data_dilation_strides_forward; - } - - // Compute the pad_above values to be used if in a convolution - CoordinateDiff compute_backward_delta_out_pad_above() const; - CoordinateDiff compute_backward_delta_out_pad_below() const; - - protected: - Shape m_data_batch_shape; - Strides m_window_movement_strides_forward; - Strides m_window_dilation_strides_forward; - CoordinateDiff m_padding_below_forward; - CoordinateDiff m_padding_above_forward; - Strides m_data_dilation_strides_forward; - }; - } // namespace v0 - - namespace util - { - // This is a legacy function, retained because the CPU backend uses it for now. - // TODO: Update CPU backend to use the new stuff in validation_util.hpp, and remove this - // function. - NGRAPH_API - Shape infer_convolution_output_shape(const Node* node, - const Shape& data_batch_shape, - const Shape& filters_shape, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const Strides& data_dilation_strides, - size_t batch_axis_data, - size_t input_channel_axis_data, - size_t input_channel_axis_filters, - size_t output_channel_axis_filters, - size_t batch_axis_result, - size_t output_channel_axis_result); - } // namespace util - - using v0::Convolution; - using v0::ConvolutionBackpropData; } // namespace op } // namespace ngraph diff --git a/ngraph/src/ngraph/op/group_conv.cpp b/ngraph/src/ngraph/op/group_conv.cpp index 3744e43..3f2b642 100644 --- a/ngraph/src/ngraph/op/group_conv.cpp +++ b/ngraph/src/ngraph/op/group_conv.cpp @@ -576,305 +576,3 @@ shared_ptr m_output_padding); } } - -//------------------------------------------------------------------------------ -// v0::GroupConvolution -//------------------------------------------------------------------------------ - -constexpr NodeTypeInfo op::v0::GroupConvolution::type_info; - -op::v0::GroupConvolution::GroupConvolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const Strides& data_dilation_strides, - const size_t groups, - const PadType& pad_type) - : FusedOp({data_batch, filters}) - , m_window_movement_strides(window_movement_strides) - , m_window_dilation_strides(window_dilation_strides) - , m_padding_below(padding_below) - , m_padding_above(padding_above) - , m_data_dilation_strides(data_dilation_strides) - , m_groups(groups) - , m_pad_type(pad_type) - , m_groups_in_filters(false) -{ - constructor_validate_and_infer_types(); -} - -op::v0::GroupConvolution::GroupConvolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const Strides& data_dilation_strides, - const PadType& pad_type) - : FusedOp({data_batch, filters}) - , m_window_movement_strides(window_movement_strides) - , m_window_dilation_strides(window_dilation_strides) - , m_padding_below(padding_below) - , m_padding_above(padding_above) - , m_data_dilation_strides(data_dilation_strides) - , m_groups(0) - , m_pad_type(pad_type) - , m_groups_in_filters(true) -{ - constructor_validate_and_infer_types(); -} - -void op::v0::GroupConvolution::pre_validate_and_infer_types() -{ - auto data_shape = get_input_partial_shape(0); - auto filters_shape = get_input_partial_shape(1); - - if (data_shape.is_static() && filters_shape.is_static()) - { - // Update groups - if (m_groups_in_filters) - { - m_groups = get_input_partial_shape(1)[0].get_length(); - } - - // Data channels - NODE_VALIDATION_CHECK(this, - data_shape.to_shape()[1] % get_groups() == 0, - "Data channels not a multiple of group size"); - // Output channels - NODE_VALIDATION_CHECK(this, - filters_shape.to_shape()[0] % get_groups() == 0, - "# Filters not a multiple of group size"); - - // Input Filters - NODE_VALIDATION_CHECK(this, - (filters_shape.to_shape()[m_groups_in_filters ? 2 : 1] * - get_groups()) == data_shape.to_shape()[1], - "Incorrect number of channels per filter"); - } - else - { - set_output_type(0, get_input_element_type(0), PartialShape::dynamic()); - } -} - -void op::v0::GroupConvolution::post_validate_and_infer_types() -{ - auto data_shape = get_input_partial_shape(0); - auto filters_shape = get_input_partial_shape(1); - if (data_shape.is_static() && filters_shape.is_static()) - { - if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER) - { - m_padding_below.clear(); - m_padding_above.clear(); - auto filter_shape = filters_shape.to_shape(); - filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I} - infer_auto_padding(data_shape.to_shape(), - filter_shape, - m_window_movement_strides, - m_window_dilation_strides, - m_pad_type, - m_padding_above, - m_padding_below); - } - } -} - -Shape op::v0::GroupConvolution::get_weights_dimensions() const -{ - auto data_shape = get_input_shape(0); - auto weights_shape = get_input_shape(1); - // check if weights already includes groups - if (m_groups_in_filters) - { - return weights_shape; - } - // reshape weights into 5d tensors that includes groups - const size_t OC = 0; - const size_t OC_IN_OUTPUT = 1; - const size_t IC = 1; - Shape weights_shape_groups{weights_shape}; - // adjust output and channel given a number of groups - - weights_shape_groups.at(OC) = get_shape().at(OC_IN_OUTPUT) / get_groups(); - weights_shape_groups.at(IC) = data_shape.at(IC) / get_groups(); - // push_front the number of groups - weights_shape_groups.insert(weights_shape_groups.begin(), get_groups()); - return weights_shape_groups; -} - -shared_ptr op::v0::GroupConvolution::clone_with_new_inputs(const OutputVector& new_args) const -{ - check_new_args_count(this, new_args); - - if (m_groups_in_filters) - { - return make_shared(new_args.at(0), - new_args.at(1), - get_window_movement_strides(), - get_window_dilation_strides(), - get_padding_below(), - get_padding_above(), - get_data_dilation_strides(), - get_pad_type()); - } - else - { - return make_shared(new_args.at(0), - new_args.at(1), - get_window_movement_strides(), - get_window_dilation_strides(), - get_padding_below(), - get_padding_above(), - get_data_dilation_strides(), - get_groups(), - get_pad_type()); - } -} - -OutputVector op::v0::GroupConvolution::decompose_op() const -{ - auto data = input_value(0); - auto filters = input_value(1); - auto filters_shape = get_input_shape(1); - // Split one convolution op to N ops where N is the number of groups - // and concat results after computation. - NodeVector convolution_nodes; - - // slice data - auto sliced_data = builder::split(data, get_groups(), 1); - // slice filters - auto sliced_filters = builder::split(filters, get_groups(), 0); - for (std::size_t group{0}; group < get_groups(); ++group) - { - auto sliced_filter = sliced_filters[group]; - if (m_groups_in_filters) - { - // Remove group dimmension after slicing - sliced_filter = make_shared( - sliced_filters[group], - get_default_order(sliced_filters[group].get_shape().size()), - Shape(std::next(std::begin(filters_shape), 1), std::end(filters_shape))); - } - convolution_nodes.push_back( - std::make_shared(sliced_data[group], - sliced_filter, - m_window_movement_strides, - m_window_dilation_strides, - m_padding_below, - m_padding_above, - m_data_dilation_strides, - m_pad_type)); - } - std::size_t concatenation_axis = 1; - return {std::make_shared(convolution_nodes, concatenation_axis)}; -} - -//------------------------------------------------------------------------------ -// v0::GroupConvolutionBackpropData -//------------------------------------------------------------------------------ - -constexpr NodeTypeInfo op::v0::GroupConvolutionBackpropData::type_info; - -op::v0::GroupConvolutionBackpropData::GroupConvolutionBackpropData( - const Output& data_batch, - const Output& filters, - const Output& output_delta, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const size_t groups) - : FusedOp({data_batch, filters, output_delta}) - , m_window_movement_strides(window_movement_strides) - , m_window_dilation_strides(window_dilation_strides) - , m_padding_below(padding_below) - , m_padding_above(padding_above) - , m_groups(groups) -{ - constructor_validate_and_infer_types(); -} - -void op::v0::GroupConvolutionBackpropData::pre_validate_and_infer_types() -{ - element::Type data_element_type = get_input_element_type(2); - element::Type filters_elem_type = get_input_element_type(1); - - NODE_VALIDATION_CHECK(this, - data_element_type.is_dynamic() || data_element_type.is_real(), - "Output delta element type must be f16, bf16, f32, f64 or dynamic (got ", - data_element_type, - ")."); - NODE_VALIDATION_CHECK(this, - filters_elem_type.is_dynamic() || filters_elem_type.is_real(), - "Filters element type must be f16, bf16, f32, f64 or dynamic (got ", - filters_elem_type, - ")."); - - PartialShape data_pshape = get_input_partial_shape(0); - PartialShape filters_pshape = get_input_partial_shape(1); - PartialShape delta_pshape = get_input_partial_shape(2); - - if (data_pshape.is_dynamic() || filters_pshape.is_dynamic() || delta_pshape.is_dynamic()) - { - set_output_type(0, data_element_type, PartialShape::dynamic()); - } -} - -shared_ptr - op::v0::GroupConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const -{ - if (new_args.size() != 3) - { - throw ngraph_error("Incorrect number of new arguments"); - } - - return make_shared(new_args.at(0), - new_args.at(1), - new_args.at(2), - get_window_movement_strides(), - get_window_dilation_strides(), - get_padding_below(), - get_padding_above(), - get_groups()); -} - -OutputVector op::v0::GroupConvolutionBackpropData::decompose_op() const -{ - auto filters = input_value(1); - auto output_delta = input_value(2); - auto data_shape = get_input_shape(0); - - NodeVector sliced_inputs; - - auto groups = get_groups(); - // slice data shape - data_shape[1] /= groups; - // slice delta - auto sliced_delta = builder::split(output_delta, groups, 1); - // slice filters - auto sliced_filters = builder::split(filters, groups, 0); - - auto num_spatials = get_window_movement_strides().size(); - - for (size_t i = 0; i < groups; ++i) - { - auto sliced_conv = std::make_shared( - data_shape, - sliced_filters[i], - sliced_delta[i], - get_window_movement_strides(), - get_window_dilation_strides(), - get_padding_below(), - get_padding_above(), - Strides(num_spatials, 1)); // default data dilation strides - - sliced_inputs.push_back(sliced_conv); - } - - size_t concatenation_axis = 1; - return {std::make_shared(sliced_inputs, concatenation_axis)}; -} diff --git a/ngraph/src/ngraph/op/group_conv.hpp b/ngraph/src/ngraph/op/group_conv.hpp index bd2cb15..51d34dc 100644 --- a/ngraph/src/ngraph/op/group_conv.hpp +++ b/ngraph/src/ngraph/op/group_conv.hpp @@ -248,117 +248,5 @@ namespace ngraph CoordinateDiff m_output_padding; }; } // namespace v1 - - namespace v0 - { - /// \brief Group Convolution - class NGRAPH_API GroupConvolution : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"GroupConvolution", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - GroupConvolution() = default; - GroupConvolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const Strides& data_dilation_strides, - const size_t groups, - const PadType& pad_type = PadType::EXPLICIT); - - // constructor which accept groups included in filters shape. - GroupConvolution(const Output& data_batch, - const Output& filters, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const Strides& data_dilation_strides, - const PadType& pad_type = PadType::EXPLICIT); - Shape get_weights_dimensions() const; - const Strides& get_window_movement_strides() const - { - return m_window_movement_strides; - } - const Strides& get_window_dilation_strides() const - { - return m_window_dilation_strides; - } - const CoordinateDiff& get_padding_below() const { return m_padding_below; } - const CoordinateDiff& get_padding_above() const { return m_padding_above; } - const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; } - Output get_filters() { return input_value(1); } - Output get_data_batch() { return input_value(0); } - size_t get_groups() const { return m_groups; }; - const PadType& get_pad_type() const { return m_pad_type; } - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - virtual OutputVector decompose_op() const override; - - virtual void pre_validate_and_infer_types() override; - virtual void post_validate_and_infer_types() override; - - bool has_groups_in_filters() const { return m_groups_in_filters; } - protected: - Strides m_window_movement_strides; - Strides m_window_dilation_strides; - CoordinateDiff m_padding_below; - CoordinateDiff m_padding_above; - Strides m_data_dilation_strides; - size_t m_groups; - PadType m_pad_type{PadType::NOTSET}; - - private: - bool m_groups_in_filters; - }; - - /// \brief Group Convolution data batch backprop - class NGRAPH_API GroupConvolutionBackpropData : public ngraph::op::util::FusedOp - { - public: - static constexpr NodeTypeInfo type_info{"GroupConvolutionBackpropData", 0}; - const NodeTypeInfo& get_type_info() const override { return type_info; } - GroupConvolutionBackpropData() = default; - GroupConvolutionBackpropData(const Output& data_batch, - const Output& filters, - const Output& output_delta, - const Strides& window_movement_strides, - const Strides& window_dilation_strides, - const CoordinateDiff& padding_below, - const CoordinateDiff& padding_above, - const size_t groups); - - const Strides& get_window_movement_strides() const - { - return m_window_movement_strides; - } - const Strides& get_window_dilation_strides() const - { - return m_window_dilation_strides; - } - const CoordinateDiff& get_padding_below() const { return m_padding_below; } - const CoordinateDiff& get_padding_above() const { return m_padding_above; } - size_t get_groups() const { return m_groups; }; - virtual std::shared_ptr - clone_with_new_inputs(const OutputVector& new_args) const override; - - virtual OutputVector decompose_op() const override; - - virtual void pre_validate_and_infer_types() override; - - protected: - Strides m_window_movement_strides; - Strides m_window_dilation_strides; - CoordinateDiff m_padding_below; - CoordinateDiff m_padding_above; - size_t m_groups; - }; - } - - using v0::GroupConvolution; - using v0::GroupConvolutionBackpropData; - } // namespace op + } // namespace op } // namespace ngraph diff --git a/ngraph/src/ngraph/op/op_version_tbl.hpp b/ngraph/src/ngraph/op/op_version_tbl.hpp index 5b5136c..79e6d0f 100644 --- a/ngraph/src/ngraph/op/op_version_tbl.hpp +++ b/ngraph/src/ngraph/op/op_version_tbl.hpp @@ -52,9 +52,7 @@ NGRAPH_OP(Concat, ngraph::op::v0, 0) NGRAPH_OP(Constant, ngraph::op::v0, 0) NGRAPH_OP(Convert, ngraph::op::v0, 0) NGRAPH_OP(ConvertLike, ngraph::op::v1, 1) -NGRAPH_OP(Convolution, ngraph::op::v0, 0) NGRAPH_OP(Convolution, ngraph::op::v1, 1) -NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v0, 0) NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v1, 1) NGRAPH_OP(Cos, ngraph::op::v0, 0) NGRAPH_OP(Cosh, ngraph::op::v0, 0) @@ -91,9 +89,7 @@ NGRAPH_OP(Greater, ngraph::op::v0, 0) NGRAPH_OP(Greater, ngraph::op::v1, 1) NGRAPH_OP(GreaterEq, ngraph::op::v0, 0) NGRAPH_OP(GreaterEqual, ngraph::op::v1, 1) -NGRAPH_OP(GroupConvolution, ngraph::op::v0, 0) NGRAPH_OP(GroupConvolution, ngraph::op::v1, 1) -NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v0, 0) NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v1, 1) NGRAPH_OP(HardSigmoid, ngraph::op::v0, 0) NGRAPH_OP(Interpolate, ngraph::op::v0, 0) diff --git a/ngraph/src/ngraph/serializer.cpp b/ngraph/src/ngraph/serializer.cpp index c0f005a..f385b16 100644 --- a/ngraph/src/ngraph/serializer.cpp +++ b/ngraph/src/ngraph/serializer.cpp @@ -1030,75 +1030,6 @@ shared_ptr JSONDeserializer::deserialize_node(json node_js) node = make_shared(args[0], target_type); break; } - case OP_TYPEID::Convolution: - { - auto window_movement_strides = - node_js.at("window_movement_strides").get>(); - auto window_dilation_strides = - node_js.at("window_dilation_strides").get>(); - auto padding_below = node_js.at("padding_below").get>(); - auto padding_above = node_js.at("padding_above").get>(); - - // For backwards compatibility, we accept "image_dilation_strides" in place of - // "data_dilation_strides", and we also allow it to be omitted altogether. - json data_dilation_strides; - if (has_key(node_js, "data_dilation_strides")) - { - data_dilation_strides = node_js["data_dilation_strides"]; - } - else if (has_key(node_js, "image_dilation_strides")) - { - data_dilation_strides = node_js["image_dilation_strides"]; - } - - op::PadType pad_type = read_pad_type(node_js); - - if (data_dilation_strides.empty()) - { - node = make_shared(args[0], - args[1], - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above); - } - else - { - node = make_shared( - args[0], - args[1], - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides.get>(), - pad_type); - } - break; - } - case OP_TYPEID::ConvolutionBackpropData: - { - auto data_batch_shape = node_js.at("data_batch_shape").get>(); - auto window_movement_strides_forward = - node_js.at("window_movement_strides_forward").get>(); - auto window_dilation_strides_forward = - node_js.at("window_dilation_strides_forward").get>(); - auto padding_below_forward = - node_js.at("padding_below_forward").get>(); - auto padding_above_forward = - node_js.at("padding_above_forward").get>(); - auto data_dilation_strides_forward = - node_js.at("data_dilation_strides_forward").get>(); - node = make_shared(data_batch_shape, - args[0], - args[1], - window_movement_strides_forward, - window_dilation_strides_forward, - padding_below_forward, - padding_above_forward, - data_dilation_strides_forward); - break; - } case OP_TYPEID::Cos: { node = make_shared(args[0]); @@ -1253,62 +1184,6 @@ shared_ptr JSONDeserializer::deserialize_node(json node_js) node = make_shared(args[0], bias); break; } - case OP_TYPEID::GroupConvolution: - { - auto window_movement_strides = - node_js.at("window_movement_strides").get>(); - auto window_dilation_strides = - node_js.at("window_dilation_strides").get>(); - auto padding_below = node_js.at("padding_below").get>(); - auto padding_above = node_js.at("padding_above").get>(); - auto data_dilation_strides = node_js.at("data_dilation_strides").get>(); - op::PadType pad_type = read_pad_type(node_js); - if (has_key(node_js, "groups")) - { - auto groups = node_js.at("groups").get(); - node = make_shared(args[0], - args[1], - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides, - groups, - pad_type); - } - else - { - node = make_shared(args[0], - args[1], - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides, - pad_type); - } - break; - } - case OP_TYPEID::GroupConvolutionBackpropData: - { - auto window_movement_strides = - node_js.at("window_movement_strides").get>(); - auto window_dilation_strides = - node_js.at("window_dilation_strides").get>(); - auto padding_below = node_js.at("padding_below").get>(); - auto padding_above = node_js.at("padding_above").get>(); - auto groups = node_js.at("groups").get(); - - node = make_shared(args[0], - args[1], - args[2], - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - groups); - break; - } case OP_TYPEID::HardSigmoid: { node = make_shared(args[0], args[1], args[2]); @@ -2252,28 +2127,6 @@ json JSONSerializer::serialize_node(const Node& n) node["target_type"] = write_element_type(tmp->get_convert_element_type()); break; } - case OP_TYPEID::Convolution: - { - auto tmp = static_cast(&n); - node["window_movement_strides"] = tmp->get_window_movement_strides(); - node["window_dilation_strides"] = tmp->get_window_dilation_strides(); - node["padding_below"] = tmp->get_padding_below(); - node["padding_above"] = tmp->get_padding_above(); - node["data_dilation_strides"] = tmp->get_data_dilation_strides(); - node["pad_type"] = tmp->get_pad_type(); - break; - } - case OP_TYPEID::ConvolutionBackpropData: - { - auto tmp = static_cast(&n); - node["data_batch_shape"] = tmp->get_data_batch_shape(); - node["window_movement_strides_forward"] = tmp->get_window_movement_strides_forward(); - node["window_dilation_strides_forward"] = tmp->get_window_dilation_strides_forward(); - node["padding_below_forward"] = tmp->get_padding_below_forward(); - node["padding_above_forward"] = tmp->get_padding_above_forward(); - node["data_dilation_strides_forward"] = tmp->get_data_dilation_strides_forward(); - break; - } case OP_TYPEID::Cos: { break; } case OP_TYPEID::Cosh: { break; @@ -2412,31 +2265,6 @@ json JSONSerializer::serialize_node(const Node& n) node["bias"] = tmp->get_bias(); break; } - case OP_TYPEID::GroupConvolution: - { - auto tmp = static_cast(&n); - node["window_movement_strides"] = tmp->get_window_movement_strides(); - node["window_dilation_strides"] = tmp->get_window_dilation_strides(); - node["padding_below"] = tmp->get_padding_below(); - node["padding_above"] = tmp->get_padding_above(); - node["data_dilation_strides"] = tmp->get_data_dilation_strides(); - if (!tmp->has_groups_in_filters()) - { - node["groups"] = tmp->get_groups(); - } - node["pad_type"] = tmp->get_pad_type(); - break; - } - case OP_TYPEID::GroupConvolutionBackpropData: - { - auto tmp = static_cast(&n); - node["window_movement_strides"] = tmp->get_window_movement_strides(); - node["window_dilation_strides"] = tmp->get_window_dilation_strides(); - node["padding_below"] = tmp->get_padding_below(); - node["padding_above"] = tmp->get_padding_above(); - node["groups"] = tmp->get_groups(); - break; - } case OP_TYPEID::HardSigmoid: { break; } case OP_TYPEID::Less: diff --git a/ngraph/src/ngraph/validation_util.hpp b/ngraph/src/ngraph/validation_util.hpp index e71b74c..a9415a5 100644 --- a/ngraph/src/ngraph/validation_util.hpp +++ b/ngraph/src/ngraph/validation_util.hpp @@ -25,10 +25,12 @@ namespace ngraph { + NGRAPH_API Strides conv_default_strides(const Node* node, const PartialShape& data_batch_shape, const PartialShape& filters_shape); + NGRAPH_API CoordinateDiff conv_default_padding(const Node* node, const PartialShape& data_batch_shape, const PartialShape& filters_shape); diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index e89a076..d1a54cf 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -185,7 +185,6 @@ set(SRC type_prop/unary_elementwise.cpp type_prop/unsqueeze.cpp type_prop/variadic_split.cpp - type_prop_benchmark.cpp type_prop_layers.cpp util.cpp ) diff --git a/ngraph/test/backend/convolution.in.cpp b/ngraph/test/backend/convolution.in.cpp index 47ce09e..ab2b593 100644 --- a/ngraph/test/backend/convolution.in.cpp +++ b/ngraph/test/backend/convolution.in.cpp @@ -17,6 +17,7 @@ #include "gtest/gtest.h" #include "ngraph/ngraph.hpp" #include "ngraph/runtime/tensor.hpp" +#include "op/convolution.hpp" #include "runtime/backend.hpp" #include "util/all_close.hpp" #include "util/all_close_f.hpp" @@ -37,20 +38,20 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_outlining) Shape shape_b{2, 2, 1, 1}; auto B = make_shared(element::f32, shape_b); Shape shape_r{1, 2, 2, 2}; - auto conv1 = make_shared(A, - B, - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{1, 1}); - auto conv2 = make_shared(conv1, - B, - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{1, 1}); + auto conv1 = make_shared(A, + B, + Strides{1, 1}, + Strides{1, 1}, + CoordinateDiff{0, 0}, + CoordinateDiff{0, 0}, + Strides{1, 1}); + auto conv2 = make_shared(conv1, + B, + Strides{1, 1}, + Strides{1, 1}, + CoordinateDiff{0, 0}, + CoordinateDiff{0, 0}, + Strides{1, 1}); auto f = make_shared(conv2, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -76,13 +77,13 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_simple) Shape shape_b{2, 2, 1, 1}; auto B = make_shared(element::f32, shape_b); Shape shape_r{1, 2, 2, 2}; - auto conv1 = make_shared(A, - B, - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{1, 1}); + auto conv1 = make_shared(A, + B, + Strides{1, 1}, + Strides{1, 1}, + CoordinateDiff{0, 0}, + CoordinateDiff{0, 0}, + Strides{1, 1}); auto f = make_shared(conv1, ParameterVector{A, B}); @@ -109,13 +110,13 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_simple_padding) Shape shape_b{1, 1, 1, 1}; auto B = make_shared(element::f32, shape_b); Shape shape_r{1, 1, 5, 5}; - auto conv1 = make_shared(A, - B, - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{1, 1}, - CoordinateDiff{2, 2}, - Strides{1, 1}); + auto conv1 = make_shared(A, + B, + Strides{1, 1}, + Strides{1, 1}, + CoordinateDiff{1, 1}, + CoordinateDiff{2, 2}, + Strides{1, 1}); auto f = make_shared(conv1, ParameterVector{A, B}); diff --git a/ngraph/test/backend/fused_op.in.cpp b/ngraph/test/backend/fused_op.in.cpp index 6e3f4f4..d33fb8d 100644 --- a/ngraph/test/backend/fused_op.in.cpp +++ b/ngraph/test/backend/fused_op.in.cpp @@ -34,6 +34,7 @@ #include "ngraph/check.hpp" #include "ngraph/ngraph.hpp" #include "ngraph/op/util/attr_types.hpp" +#include "op/group_conv.hpp" #include "util/all_close.hpp" #include "util/all_close_f.hpp" #include "util/engine/test_engines.hpp" @@ -194,14 +195,14 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv) { auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); - auto group_conv = make_shared(data, - filters, - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{1, 1}, - 2); + auto group_conv = make_shared(data, + filters, + Strides{1, 1}, + Strides{1, 1}, + CoordinateDiff{0, 0}, + CoordinateDiff{0, 0}, + Strides{1, 1}, + 2); auto f0 = make_shared(NodeVector{group_conv}, ParameterVector{data, filters}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -222,14 +223,14 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_striding) { auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); - auto group_conv = make_shared(data, - filters, - Strides{2, 2}, - Strides{1, 1}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{1, 1}, - 2); + auto group_conv = make_shared(data, + filters, + Strides{2, 2}, + Strides{1, 1}, + CoordinateDiff{0, 0}, + CoordinateDiff{0, 0}, + Strides{1, 1}, + 2); auto f0 = make_shared(NodeVector{group_conv}, ParameterVector{data, filters}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -250,14 +251,14 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_window_dilation) { auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); - auto group_conv = make_shared(data, - filters, - Strides{1, 1}, - Strides{2, 2}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{1, 1}, - 2); + auto group_conv = make_shared(data, + filters, + Strides{1, 1}, + Strides{2, 2}, + CoordinateDiff{0, 0}, + CoordinateDiff{0, 0}, + Strides{1, 1}, + 2); auto f0 = make_shared(NodeVector{group_conv}, ParameterVector{data, filters}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -278,14 +279,14 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_data_dilation) { auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); - auto group_conv = make_shared(data, - filters, - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{2, 2}, - 2); + auto group_conv = make_shared(data, + filters, + Strides{1, 1}, + Strides{1, 1}, + CoordinateDiff{0, 0}, + CoordinateDiff{0, 0}, + Strides{2, 2}, + 2); auto f0 = make_shared(NodeVector{group_conv}, ParameterVector{data, filters}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -306,14 +307,14 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_padding) { auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); - auto group_conv = make_shared(data, - filters, - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{1, 0}, - CoordinateDiff{0, 1}, - Strides{1, 1}, - 2); + auto group_conv = make_shared(data, + filters, + Strides{1, 1}, + Strides{1, 1}, + CoordinateDiff{1, 0}, + CoordinateDiff{0, 1}, + Strides{1, 1}, + 2); auto f0 = make_shared(NodeVector{group_conv}, ParameterVector{data, filters}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -334,14 +335,14 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_padding_and_window_dilation) { auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); - auto group_conv = make_shared(data, - filters, - Strides{1, 1}, - Strides{2, 2}, - CoordinateDiff{1, 0}, - CoordinateDiff{0, 1}, - Strides{1, 1}, - 2); + auto group_conv = make_shared(data, + filters, + Strides{1, 1}, + Strides{2, 2}, + CoordinateDiff{1, 0}, + CoordinateDiff{0, 1}, + Strides{1, 1}, + 2); auto f0 = make_shared(NodeVector{group_conv}, ParameterVector{data, filters}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -362,14 +363,14 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_input_shape_variation) { auto data = make_shared(element::f32, Shape{1, 4, 4, 1}); auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); - auto group_conv = make_shared(data, - filters, - Strides{1, 1}, - Strides{2, 2}, - CoordinateDiff{1, 0}, - CoordinateDiff{0, 1}, - Strides{1, 1}, - 2); + auto group_conv = make_shared(data, + filters, + Strides{1, 1}, + Strides{2, 2}, + CoordinateDiff{1, 0}, + CoordinateDiff{0, 1}, + Strides{1, 1}, + 2); auto f0 = make_shared(NodeVector{group_conv}, ParameterVector{data, filters}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -390,14 +391,14 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_input_data_variation) { auto data = make_shared(element::f32, Shape{1, 4, 3, 3}); auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); - auto group_conv = make_shared(data, - filters, - Strides{1, 1}, - Strides{2, 2}, - CoordinateDiff{1, 0}, - CoordinateDiff{0, 1}, - Strides{1, 1}, - 2); + auto group_conv = make_shared(data, + filters, + Strides{1, 1}, + Strides{2, 2}, + CoordinateDiff{1, 0}, + CoordinateDiff{0, 1}, + Strides{1, 1}, + 2); auto f0 = make_shared(NodeVector{group_conv}, ParameterVector{data, filters}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -421,13 +422,13 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_groups_included_in_shape) { auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); auto filters = make_shared(element::f32, Shape{2, 1, 2, 1, 1}); - auto group_conv = make_shared(data, - filters, - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{1, 1}); + auto group_conv = make_shared(data, + filters, + Strides{1, 1}, + Strides{1, 1}, + CoordinateDiff{0, 0}, + CoordinateDiff{0, 0}, + Strides{1, 1}); auto f0 = make_shared(NodeVector{group_conv}, ParameterVector{data, filters}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); diff --git a/ngraph/test/backend/group_convolution.in.cpp b/ngraph/test/backend/group_convolution.in.cpp index d286f2d..8db4e90 100644 --- a/ngraph/test/backend/group_convolution.in.cpp +++ b/ngraph/test/backend/group_convolution.in.cpp @@ -17,6 +17,7 @@ #include "gtest/gtest.h" #include "ngraph/ngraph.hpp" #include "ngraph/runtime/tensor.hpp" +#include "op/group_conv.hpp" #include "runtime/backend.hpp" #include "util/all_close.hpp" #include "util/all_close_f.hpp" @@ -48,7 +49,7 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_group_convolution_backprop_data) auto padding_end = CoordinateDiff{0, 0}; size_t groups = 3; - auto conv_bprop_data = make_shared( + auto conv_bprop_data = make_shared( data_batch, filters, deltas, strides, dilations, padding_begin, padding_end, groups); auto f = make_shared(conv_bprop_data, ParameterVector{data_batch, filters, deltas}); diff --git a/ngraph/test/build_graph.cpp b/ngraph/test/build_graph.cpp index 0144629..3fc0bff 100644 --- a/ngraph/test/build_graph.cpp +++ b/ngraph/test/build_graph.cpp @@ -151,23 +151,6 @@ TEST(build_graph, no_arg_construction) ASSERT_EQ(add1->get_output_shape(0), Shape{7}); } -TEST(build_graph, multi_output_split) -{ - const auto data = make_shared(element::f32, Shape{64, 8, 100, 150}); - auto filters = make_shared(element::f32, Shape{128, 2, 10, 20}); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); - const auto split = make_shared(data, axis, 2); - auto conv = make_shared(split->output(1), - filters, - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{1, 1}, - 2); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 91, 131})); -} - TEST(build_graph, multi_output_split_dynamic) { const auto data = make_shared(element::f32, PartialShape::dynamic()); diff --git a/ngraph/test/op_is.cpp b/ngraph/test/op_is.cpp index 074c1d9..33c97a5 100644 --- a/ngraph/test/op_is.cpp +++ b/ngraph/test/op_is.cpp @@ -19,6 +19,8 @@ #include "ngraph/ngraph.hpp" #include "ngraph/op/util/op_types.hpp" #include "ngraph/validation_util.hpp" +#include "op/convolution.hpp" +#include "op/group_conv.hpp" #include "util/test_tools.hpp" using namespace ngraph; @@ -162,7 +164,7 @@ namespace void op_is_Convolution() { - op::Convolution node; + op::v0::Convolution node; EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); @@ -171,7 +173,7 @@ namespace void op_is_ConvolutionBackpropData() { - op::ConvolutionBackpropData node; + op::v0::ConvolutionBackpropData node; EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); @@ -405,7 +407,7 @@ namespace void op_is_GroupConvolution() { - op::GroupConvolution node; + op::v0::GroupConvolution node; EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); @@ -414,7 +416,7 @@ namespace void op_is_GroupConvolutionBackpropData() { - op::GroupConvolutionBackpropData node; + op::v0::GroupConvolutionBackpropData node; EXPECT_FALSE(op::is_unary_elementwise_arithmetic(&node)); EXPECT_FALSE(op::is_binary_elementwise_arithmetic(&node)); EXPECT_FALSE(op::is_binary_elementwise_comparison(&node)); diff --git a/ngraph/test/opset_pass/convolution_opset_pass.cpp b/ngraph/test/opset_pass/convolution_opset_pass.cpp index 74fb100..0a0b042 100644 --- a/ngraph/test/opset_pass/convolution_opset_pass.cpp +++ b/ngraph/test/opset_pass/convolution_opset_pass.cpp @@ -3,6 +3,8 @@ #include "ngraph/ngraph.hpp" #include "ngraph/pass/manager.hpp" +#include "op/convolution.hpp" +#include "op/group_conv.hpp" #include "opset0_downgrade.hpp" #include "opset1_upgrade.hpp" #include "util/test_control.hpp" diff --git a/ngraph/test/runtime/CMakeLists.txt b/ngraph/test/runtime/CMakeLists.txt index 03cb43d..7f9d22d 100644 --- a/ngraph/test/runtime/CMakeLists.txt +++ b/ngraph/test/runtime/CMakeLists.txt @@ -33,6 +33,10 @@ set (SRC dynamic/dynamic_backend.hpp op/avg_pool.cpp op/avg_pool.hpp + op/convolution.cpp + op/convolution.hpp + op/group_conv.cpp + op/group_conv.hpp pass/dyn_elimination.cpp pass/dyn_elimination.hpp pass/fused_op_decomposition.cpp diff --git a/ngraph/test/runtime/interpreter/int_executable.hpp b/ngraph/test/runtime/interpreter/int_executable.hpp index 9db6fc3..2e0e71c 100644 --- a/ngraph/test/runtime/interpreter/int_executable.hpp +++ b/ngraph/test/runtime/interpreter/int_executable.hpp @@ -90,6 +90,8 @@ #include "ngraph/runtime/reference/topk.hpp" #include "ngraph/runtime/tensor.hpp" #include "op/avg_pool.hpp" +#include "op/convolution.hpp" +#include "op/group_conv.hpp" namespace ngraph { @@ -348,7 +350,7 @@ protected: } case OP_TYPEID::Convolution: { - const op::Convolution* c = static_cast(&node); + const op::v0::Convolution* c = static_cast(&node); reference::convolution(args[0]->get_data_ptr(), args[1]->get_data_ptr(), out[0]->get_data_ptr(), @@ -366,8 +368,8 @@ protected: case OP_TYPEID::ConvolutionBackpropData: { // Note that args[1] and args[0] are switched here from the usual order. - const op::ConvolutionBackpropData* c = - static_cast(&node); + const op::v0::ConvolutionBackpropData* c = + static_cast(&node); reference::convolution_backprop_in(args[1]->get_data_ptr(), args[0]->get_data_ptr(), out[0]->get_data_ptr(), diff --git a/ngraph/test/runtime/op/convolution.cpp b/ngraph/test/runtime/op/convolution.cpp new file mode 100644 index 0000000..4963a59 --- /dev/null +++ b/ngraph/test/runtime/op/convolution.cpp @@ -0,0 +1,356 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include "convolution.hpp" +#include "ngraph/axis_vector.hpp" +#include "ngraph/coordinate_diff.hpp" +#include "ngraph/op/reshape.hpp" +#include "ngraph/op/reverse.hpp" +#include "ngraph/util.hpp" +#include "ngraph/validation_util.hpp" + +using namespace std; +using namespace ngraph; + +// *** Convolution OP SET 0 *** +constexpr NodeTypeInfo op::v0::Convolution::type_info; + +op::v0::Convolution::Convolution(const Output& data_batch, + const Output& filters, + const Strides& window_movement_strides, + const Strides& window_dilation_strides, + const CoordinateDiff& padding_below, + const CoordinateDiff& padding_above, + const Strides& data_dilation_strides, + const PadType& pad_type) + : Op({data_batch, filters}) + , m_window_movement_strides(window_movement_strides) + , m_window_dilation_strides(window_dilation_strides) + , m_padding_below(padding_below) + , m_padding_above(padding_above) + , m_data_dilation_strides(data_dilation_strides) + , m_pad_type(pad_type) +{ + constructor_validate_and_infer_types(); +} + +bool op::v0::Convolution::visit_attributes(AttributeVisitor& visitor) +{ + visitor.on_attribute("window_movement_strides", m_window_movement_strides); + visitor.on_attribute("window_dilation_strides", m_window_dilation_strides); + visitor.on_attribute("data_dilation_strides", m_data_dilation_strides); + visitor.on_attribute("padding_below", m_padding_below); + visitor.on_attribute("padding_above", m_padding_above); + visitor.on_attribute("pad_type", m_pad_type); + return true; +} + +void op::v0::Convolution::validate_and_infer_types() +{ + const PartialShape& data_batch_shape = get_input_partial_shape(0); + element::Type data_batch_et = get_input_element_type(0); + const PartialShape& filters_shape = get_input_partial_shape(1); + element::Type filters_et = get_input_element_type(1); + + if (m_data_dilation_strides.size() == 0) + { + m_data_dilation_strides = conv_default_strides(this, data_batch_shape, filters_shape); + } + + if (m_window_movement_strides.size() == 0) + { + m_window_movement_strides = conv_default_strides(this, data_batch_shape, filters_shape); + } + + if (m_window_dilation_strides.size() == 0) + { + m_window_dilation_strides = conv_default_strides(this, data_batch_shape, filters_shape); + } + + if (m_padding_below.size() == 0) + { + m_padding_below = conv_default_padding(this, data_batch_shape, filters_shape); + } + + if (m_padding_above.size() == 0) + { + m_padding_above = conv_default_padding(this, data_batch_shape, filters_shape); + } + + if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER) + { + if (data_batch_shape.is_static() && filters_shape.is_static()) + { + // TODO: data dilation + m_padding_below.clear(); + m_padding_above.clear(); + auto filter_shape = filters_shape.to_shape(); + filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I} + infer_auto_padding(data_batch_shape.to_shape(), + filter_shape, + m_window_movement_strides, + m_window_dilation_strides, + m_pad_type, + m_padding_above, + m_padding_below); + } + } + + element::Type result_et; + PartialShape result_shape; + + NODE_VALIDATION_CHECK( + this, + element::Type::merge(result_et, data_batch_et, filters_et), + "Element types for data batch and filters do not match (data batch element type: ", + data_batch_et, + ", filters element type: ", + filters_et, + ")."); + + result_shape = infer_convolution_forward(this, + data_batch_shape, + m_data_dilation_strides, + m_padding_below, + m_padding_above, + filters_shape, + m_window_movement_strides, + m_window_dilation_strides); + + set_output_type(0, result_et, result_shape); +} + +op::v0::Convolution::Convolution(const Output& data_batch, + const Output& filters, + const Strides& window_movement_strides, + const Strides& window_dilation_strides, + const CoordinateDiff& padding_below, + const CoordinateDiff& padding_above) + : Convolution(data_batch, + filters, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + Strides()) +{ +} + +op::v0::Convolution::Convolution(const Output& data_batch, + const Output& filters, + const Strides& window_movement_strides, + const Strides& window_dilation_strides) + : Convolution(data_batch, + filters, + window_movement_strides, + window_dilation_strides, + CoordinateDiff(), + CoordinateDiff()) +{ +} + +op::v0::Convolution::Convolution(const Output& data_batch, + const Output& filters, + const Strides& window_movement_strides) + : Convolution(data_batch, + filters, + window_movement_strides, + Strides(), + CoordinateDiff(), + CoordinateDiff()) +{ +} + +op::v0::Convolution::Convolution(const Output& data_batch, const Output& filters) + : Convolution(data_batch, filters, Strides(), Strides(), CoordinateDiff(), CoordinateDiff()) +{ +} + +shared_ptr op::v0::Convolution::clone_with_new_inputs(const OutputVector& new_args) const +{ + check_new_args_count(this, new_args); + return make_shared(new_args.at(0), + new_args.at(1), + m_window_movement_strides, + m_window_dilation_strides, + m_padding_below, + m_padding_above, + m_data_dilation_strides, + m_pad_type); +} + +constexpr NodeTypeInfo op::v0::ConvolutionBackpropData::type_info; +shared_ptr op::v0::Convolution::get_default_value() const +{ + return ngraph::make_constant_from_string("0", get_element_type(), get_shape()); +} + +op::v0::ConvolutionBackpropData::ConvolutionBackpropData( + const Shape& data_batch_shape, + const Output& filters, + const Output& output_delta, + const Strides& window_movement_strides_forward, + const Strides& window_dilation_strides_forward, + const CoordinateDiff& padding_below_forward, + const CoordinateDiff& padding_above_forward, + const Strides& data_dilation_strides_forward) + : Op({filters, output_delta}) + , m_data_batch_shape(data_batch_shape) + , m_window_movement_strides_forward(window_movement_strides_forward) + , m_window_dilation_strides_forward(window_dilation_strides_forward) + , m_padding_below_forward(padding_below_forward) + , m_padding_above_forward(padding_above_forward) + , m_data_dilation_strides_forward(data_dilation_strides_forward) +{ + constructor_validate_and_infer_types(); +} + +bool op::v0::ConvolutionBackpropData::visit_attributes(AttributeVisitor& visitor) +{ + visitor.on_attribute("data_batch_shape", m_data_batch_shape); + visitor.on_attribute("window_movement_strides_forward", m_window_movement_strides_forward); + visitor.on_attribute("window_dilation_strides_forward", m_window_dilation_strides_forward); + visitor.on_attribute("padding_below_forward", m_padding_below_forward); + visitor.on_attribute("padding_above_forward", m_padding_above_forward); + visitor.on_attribute("data_dilation_strides_forward", m_data_dilation_strides_forward); + return true; +} + +void op::v0::ConvolutionBackpropData::validate_and_infer_types() +{ + // Backprop to data is itself convolution, with inputs/outputs/attributes transmogrified as + // follows. + // + // Forward Backward + // "N" axis for data batch 0 0 + // "C" axis for data batch 1 1 + // "Co" axis for filters 0 0 + // "Ci" axis for filters 1 1 + // "N" axis for output 0 0 + // "C" axis for output 1 1 + // Data batch x delta + // Data batch shape S_x S_o + // Filters f reverse(f) [on spatial axes] + // Filters shape S_f S_f + // Window movement strides q_x p_x + // Window dilation strides p_f p_f + // Padding below a_x (S_f - 1)p_f - a_x + // Padding above b_x (S_f - 1)p_f + + // + ((a_x + (S_x - 1)p_x + b_x - (S_f - 1)p_f) + // % q_x) + // - b_x + // Data dilation strides p_x q_x + // Output shape S_o S_x + // + // To _validate_, we simply need to check/infer the output shape of the forward convolution, + // then check to make sure that the incoming delta has the same shape as the forward output. + const PartialShape& filters_shape = get_input_partial_shape(0); + element::Type filters_et = get_input_element_type(0); + const PartialShape& delta_shape = get_input_partial_shape(1); + element::Type delta_et = get_input_element_type(1); + + element::Type forward_result_et; + PartialShape forward_result_shape; + + NODE_VALIDATION_CHECK( + this, + element::Type::merge(forward_result_et, delta_et, filters_et), + "Element types for data batch and filters do not match (data batch element type: ", + delta_et, + ", filters element type: ", + filters_et, + ")."); + + forward_result_shape = infer_convolution_forward(this, + m_data_batch_shape, + m_data_dilation_strides_forward, + m_padding_below_forward, + m_padding_above_forward, + filters_shape, + m_window_movement_strides_forward, + m_window_dilation_strides_forward); + + NODE_VALIDATION_CHECK(this, + forward_result_shape.compatible(delta_shape), + "Inferred forward output shape (", + forward_result_shape, + ") does not match shape of ", + "delta (", + delta_shape, + ")."); + + set_output_type(0, forward_result_et, m_data_batch_shape); +} + +shared_ptr + op::v0::ConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const +{ + check_new_args_count(this, new_args); + return make_shared(m_data_batch_shape, + new_args.at(0), + new_args.at(1), + m_window_movement_strides_forward, + m_window_dilation_strides_forward, + m_padding_below_forward, + m_padding_above_forward, + m_data_dilation_strides_forward); +} + +CoordinateDiff op::v0::ConvolutionBackpropData::compute_backward_delta_out_pad_below() const +{ + auto& in_shape = get_data_batch_shape(); + auto& filter_dilation = get_window_dilation_strides_forward(); + auto& filter_shape = get_input_shape(0); + auto& in_pad_below = get_padding_below_forward(); + size_t spatial_dim_count = static_cast(in_shape.size()) - 2; + + CoordinateDiff backward_delta_out_pad_below; + backward_delta_out_pad_below.resize(spatial_dim_count); + + for (size_t i = 0; i < spatial_dim_count; i++) + { + backward_delta_out_pad_below[i] = + (static_cast(filter_shape[i + 2]) - 1) * filter_dilation[i] - + in_pad_below[i]; + } + return backward_delta_out_pad_below; +} + +CoordinateDiff op::v0::ConvolutionBackpropData::compute_backward_delta_out_pad_above() const +{ + auto& in_shape = get_data_batch_shape(); + auto& filter_dilation = get_window_dilation_strides_forward(); + auto& filter_shape = get_input_shape(0); + auto& in_pad_below = get_padding_below_forward(); + auto& in_pad_above = get_padding_above_forward(); + auto& in_dilation = get_data_dilation_strides_forward(); + auto& stride = get_window_movement_strides_forward(); + size_t spatial_dim_count = static_cast(in_shape.size()) - 2; + + CoordinateDiff backward_delta_out_pad_above; + backward_delta_out_pad_above.resize(spatial_dim_count); + + for (size_t i = 0; i < spatial_dim_count; i++) + { + backward_delta_out_pad_above[i] = + (static_cast(filter_shape[i + 2]) - 1) * filter_dilation[i] + + ((in_pad_below[i] + ((in_shape[i + 2]) - 1) * in_dilation[i] + in_pad_above[i] - + (static_cast(filter_shape[i + 2]) - 1) * filter_dilation[i]) % + stride[i]) - + in_pad_above[i]; + } + return backward_delta_out_pad_above; +} diff --git a/ngraph/test/runtime/op/convolution.hpp b/ngraph/test/runtime/op/convolution.hpp new file mode 100644 index 0000000..15161b5 --- /dev/null +++ b/ngraph/test/runtime/op/convolution.hpp @@ -0,0 +1,314 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "backend_visibility.hpp" +#include "ngraph/coordinate_diff.hpp" +#include "ngraph/op/op.hpp" +#include "ngraph/op/util/attr_types.hpp" + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// \brief Batched convolution operation, with optional window dilation and stride. + /// + class BACKEND_API Convolution : public Op + { + public: + static constexpr NodeTypeInfo type_info{"Convolution", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + /// \brief Constructs a batched convolution operation. + Convolution() = default; + /// \brief Constructs a batched convolution operation. + /// + /// \param data_batch The node producing the input data batch tensor.
+ /// `[N, C_IN, D1, ... Df]` + /// \param filters The node producing the filters tensor.
+ /// `[C_OUT, C_IN, F1, ... Ff]` + /// \param window_movement_strides The window movement strides.
+ /// `[f]` + /// \param window_dilation_strides The window dilation strides.
+ /// `[f]` + /// \param padding_below The padding-below sizes.
+ /// `[f]` + /// \param padding_above The padding-above sizes.
+ /// `[f]` + /// \param data_dilation_strides The data dilation strides.
+ /// `[f]` + /// \param pad_type The pad type for automatically computing padding sizes.
+ /// `[f]` + /// + /// Output `[N, C_OUT, R1, ... Rf]` + /// + Convolution(const Output& data_batch, + const Output& filters, + const Strides& window_movement_strides, + const Strides& window_dilation_strides, + const CoordinateDiff& padding_below, + const CoordinateDiff& padding_above, + const Strides& data_dilation_strides, + const PadType& pad_type = PadType::EXPLICIT); + + /// \brief Constructs a batched convolution operation with no data dilation (i.e., + /// all + /// data dilation strides are 1). + /// + /// \param data_batch The node producing the input data batch tensor.
+ /// `[N, C_IN, D1, ... Df]` + /// \param filters The node producing the filters tensor.
+ /// `[C_OUT, C_IN, F1, ... Ff]` + /// \param window_movement_strides The window movement strides.
+ /// `[f]` + /// \param window_dilation_strides The window dilation strides.
+ /// `[f]` + /// \param padding_below The padding-below sizes.
+ /// `[f]` + /// \param padding_above The padding-above sizes.
+ /// `[f]` + /// + /// Output `[N, C_OUT, R1, ... Rf]` + /// + Convolution(const Output& data_batch, + const Output& filters, + const Strides& window_movement_strides, + const Strides& window_dilation_strides, + const CoordinateDiff& padding_below, + const CoordinateDiff& padding_above); + + /// \brief Constructs a batched convolution operation with no padding or data + /// dilation + /// (i.e., padding above and below are 0 everywhere, and all data dilation + /// strides are 1). + /// + /// \param data_batch The node producing the input data batch tensor.
+ /// `[N, C_IN, D1, ... Df]` + /// \param filters The node producing the filters tensor.
+ /// `[C_OUT, C_IN, F1, ... Ff]` + /// \param window_movement_strides The window movement strides.
+ /// `[f]` + /// \param window_dilation_strides The window dilation strides.
+ /// `[f]` + /// + /// Output `[N, C_OUT, R1, ... Rf]` + /// + Convolution(const Output& data_batch, + const Output& filters, + const Strides& window_movement_strides, + const Strides& window_dilation_strides); + + /// \brief Constructs a batched convolution operation with no window dilation, + /// padding, + /// or data dilation (i.e., padding above and below are 0 everywhere, and all + /// window/data dilation strides are 1). + /// + /// \param data_batch The node producing the input data batch tensor.
+ /// `[N, C_IN, D1, ... Df]` + /// \param filters The node producing the filters tensor.
+ /// `[C_OUT, C_IN, F1, ... Ff]` + /// \param window_movement_strides The window movement strides.
+ /// `[f]` + /// + /// Output `[N, C_OUT, R1, ... Rf]` + /// + Convolution(const Output& data_batch, + const Output& filters, + const Strides& window_movement_strides); + + /// \brief Constructs a batched convolution operation with no window dilation or + /// movement stride (i.e., padding above and below are 0 everywhere, and all + /// window/data dilation strides and window movement strides are 1). + /// + /// \param data_batch The node producing the input data batch tensor.
+ /// `[N, C_IN, D1, ... Df]` + /// \param filters The node producing the filters tensor.
+ /// `[C_OUT, C_IN, F1, ... Ff]` + /// + /// Output `[N, C_OUT, R1, ... Rf]` + /// + Convolution(const Output& data_batch, const Output& filters); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The window movement strides. + const Strides& get_window_movement_strides() const + { + return m_window_movement_strides; + } + void set_window_movement_strides(const Strides& window_movement_strides) + { + m_window_movement_strides = window_movement_strides; + } + /// \return The window dilation strides. + const Strides& get_window_dilation_strides() const + { + return m_window_dilation_strides; + } + void set_window_dilation_strides(const Strides& window_dilation_strides) + { + m_window_dilation_strides = window_dilation_strides; + } + /// \return The padding-below sizes (possibly negative). + const CoordinateDiff& get_padding_below() const { return m_padding_below; } + void set_padding_below(const CoordinateDiff& padding_below) + { + m_padding_below = padding_below; + } + /// \return The padding-above sizes (possibly negative). + const CoordinateDiff& get_padding_above() const { return m_padding_above; } + void set_adding_above(const CoordinateDiff& padding_above) + { + m_padding_above = padding_above; + } + /// \return The input data dilation strides. + const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; } + void set_data_dilation_strides(const Strides& data_dilation_strides) + { + m_data_dilation_strides = data_dilation_strides; + } + /// \return The pad type for convolution. + const PadType& get_pad_type() const { return m_pad_type; } + void set_pad_type(const PadType& pad_type) { m_pad_type = pad_type; } + /// \return The default value for Convolution. + virtual std::shared_ptr get_default_value() const override; + + protected: + Strides m_window_movement_strides; + Strides m_window_dilation_strides; + CoordinateDiff m_padding_below; + CoordinateDiff m_padding_above; + Strides m_data_dilation_strides; + PadType m_pad_type; + }; + + /// \brief Data batch backprop for batched convolution operation. + class BACKEND_API ConvolutionBackpropData : public Op + { + public: + static constexpr NodeTypeInfo type_info{"ConvolutionBackpropData", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + /// \brief Constructs a batched-convolution data batch-backprop operation. + ConvolutionBackpropData() = default; + /// + /// \brief Constructs a batched-convolution data batch-backprop operation. + /// + /// \param data_batch_shape The shape of the data batch from + /// forward-prop. + /// \param filters The node producing the filters from + /// forward-prop. + /// \param data The node producing output delta. + /// \param window_movement_strides_forward The window movement strides from + /// forward-prop. + /// \param window_dilation_strides_forward The window dilation strides from + /// forward-prop. + /// \param padding_below_forward The padding-below sizes from + /// forward-prop. + /// \param padding_above_forward The padding-above sizes from + /// forward-prop. + /// \param data_dilation_strides_forward The data dilation strides from + /// forward-prop. + /// + ConvolutionBackpropData(const Shape& data_batch_shape, + const Output& filters, + const Output& data, + const Strides& window_movement_strides_forward, + const Strides& window_dilation_strides_forward, + const CoordinateDiff& padding_below_forward, + const CoordinateDiff& padding_above_forward, + const Strides& data_dilation_strides_forward); + + void validate_and_infer_types() override; + bool visit_attributes(AttributeVisitor& visitor) override; + + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The data batch shape. + const Shape& get_data_batch_shape() const { return m_data_batch_shape; } + void set_data_batch_shape(const Shape& data_batch_shape) + { + m_data_batch_shape = data_batch_shape; + } + /// \return The window movement strides from the forward prop. + const Strides& get_window_movement_strides_forward() const + { + return m_window_movement_strides_forward; + } + void set_window_movement_strides_forward( + const Strides& window_movement_strides_forward) + { + m_window_movement_strides_forward = window_movement_strides_forward; + } + /// \return The window dilation strides from the forward prop. + const Strides& get_window_dilation_strides_forward() const + { + return m_window_dilation_strides_forward; + } + void set_window_dilation_strides_forward( + const Strides& window_dilation_strides_forward) + { + m_window_dilation_strides_forward = window_dilation_strides_forward; + } + /// \return The padding-below sizes (possibly negative) from the forward prop. + const CoordinateDiff& get_padding_below_forward() const + { + return m_padding_below_forward; + } + void set_padding_below_forward(const CoordinateDiff& padding_below_forward) + { + m_padding_below_forward = padding_below_forward; + } + /// \return The padding-above sizes (possibly negative) from the forward prop. + const CoordinateDiff& get_padding_above_forward() const + { + return m_padding_above_forward; + } + void set_padding_above_forward(const CoordinateDiff& padding_above_forward) + { + m_padding_above_forward = padding_above_forward; + } + /// \return The input data dilation strides from the forward prop. + const Strides& get_data_dilation_strides_forward() const + { + return m_data_dilation_strides_forward; + } + void set_data_dilation_strides_forward(const Strides& data_dilation_strides_forward) + { + m_data_dilation_strides_forward = data_dilation_strides_forward; + } + + // Compute the pad_above values to be used if in a convolution + CoordinateDiff compute_backward_delta_out_pad_above() const; + CoordinateDiff compute_backward_delta_out_pad_below() const; + + protected: + Shape m_data_batch_shape; + Strides m_window_movement_strides_forward; + Strides m_window_dilation_strides_forward; + CoordinateDiff m_padding_below_forward; + CoordinateDiff m_padding_above_forward; + Strides m_data_dilation_strides_forward; + }; + } // namespace v0 + } // namespace op +} // namespace ngraph diff --git a/ngraph/test/runtime/op/group_conv.cpp b/ngraph/test/runtime/op/group_conv.cpp new file mode 100644 index 0000000..cd39ce5 --- /dev/null +++ b/ngraph/test/runtime/op/group_conv.cpp @@ -0,0 +1,333 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#include + +#include "convolution.hpp" +#include "group_conv.hpp" +#include "ngraph/attribute_visitor.hpp" +#include "ngraph/builder/reshape.hpp" +#include "ngraph/builder/split.hpp" +#include "ngraph/op/concat.hpp" +#include "ngraph/op/convolution.hpp" +#include "ngraph/op/reshape.hpp" +#include "ngraph/op/slice.hpp" +#include "ngraph/validation_util.hpp" + +using namespace std; +using namespace ngraph; + +//------------------------------------------------------------------------------ +// v0::GroupConvolution +//------------------------------------------------------------------------------ + +constexpr NodeTypeInfo op::v0::GroupConvolution::type_info; + +op::v0::GroupConvolution::GroupConvolution(const Output& data_batch, + const Output& filters, + const Strides& window_movement_strides, + const Strides& window_dilation_strides, + const CoordinateDiff& padding_below, + const CoordinateDiff& padding_above, + const Strides& data_dilation_strides, + const size_t groups, + const PadType& pad_type) + : FusedOp({data_batch, filters}) + , m_window_movement_strides(window_movement_strides) + , m_window_dilation_strides(window_dilation_strides) + , m_padding_below(padding_below) + , m_padding_above(padding_above) + , m_data_dilation_strides(data_dilation_strides) + , m_groups(groups) + , m_pad_type(pad_type) + , m_groups_in_filters(false) +{ + constructor_validate_and_infer_types(); +} + +op::v0::GroupConvolution::GroupConvolution(const Output& data_batch, + const Output& filters, + const Strides& window_movement_strides, + const Strides& window_dilation_strides, + const CoordinateDiff& padding_below, + const CoordinateDiff& padding_above, + const Strides& data_dilation_strides, + const PadType& pad_type) + : FusedOp({data_batch, filters}) + , m_window_movement_strides(window_movement_strides) + , m_window_dilation_strides(window_dilation_strides) + , m_padding_below(padding_below) + , m_padding_above(padding_above) + , m_data_dilation_strides(data_dilation_strides) + , m_groups(0) + , m_pad_type(pad_type) + , m_groups_in_filters(true) +{ + constructor_validate_and_infer_types(); +} + +void op::v0::GroupConvolution::pre_validate_and_infer_types() +{ + auto data_shape = get_input_partial_shape(0); + auto filters_shape = get_input_partial_shape(1); + + if (data_shape.is_static() && filters_shape.is_static()) + { + // Update groups + if (m_groups_in_filters) + { + m_groups = get_input_partial_shape(1)[0].get_length(); + } + + // Data channels + NODE_VALIDATION_CHECK(this, + data_shape.to_shape()[1] % get_groups() == 0, + "Data channels not a multiple of group size"); + // Output channels + NODE_VALIDATION_CHECK(this, + filters_shape.to_shape()[0] % get_groups() == 0, + "# Filters not a multiple of group size"); + + // Input Filters + NODE_VALIDATION_CHECK(this, + (filters_shape.to_shape()[m_groups_in_filters ? 2 : 1] * + get_groups()) == data_shape.to_shape()[1], + "Incorrect number of channels per filter"); + } + else + { + set_output_type(0, get_input_element_type(0), PartialShape::dynamic()); + } +} + +void op::v0::GroupConvolution::post_validate_and_infer_types() +{ + auto data_shape = get_input_partial_shape(0); + auto filters_shape = get_input_partial_shape(1); + if (data_shape.is_static() && filters_shape.is_static()) + { + if (m_pad_type == PadType::SAME_UPPER || m_pad_type == PadType::SAME_LOWER) + { + m_padding_below.clear(); + m_padding_above.clear(); + auto filter_shape = filters_shape.to_shape(); + filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I} + infer_auto_padding(data_shape.to_shape(), + filter_shape, + m_window_movement_strides, + m_window_dilation_strides, + m_pad_type, + m_padding_above, + m_padding_below); + } + } +} + +Shape op::v0::GroupConvolution::get_weights_dimensions() const +{ + auto data_shape = get_input_shape(0); + auto weights_shape = get_input_shape(1); + // check if weights already includes groups + if (m_groups_in_filters) + { + return weights_shape; + } + // reshape weights into 5d tensors that includes groups + const size_t OC = 0; + const size_t OC_IN_OUTPUT = 1; + const size_t IC = 1; + Shape weights_shape_groups{weights_shape}; + // adjust output and channel given a number of groups + + weights_shape_groups.at(OC) = get_shape().at(OC_IN_OUTPUT) / get_groups(); + weights_shape_groups.at(IC) = data_shape.at(IC) / get_groups(); + // push_front the number of groups + weights_shape_groups.insert(weights_shape_groups.begin(), get_groups()); + return weights_shape_groups; +} + +shared_ptr op::v0::GroupConvolution::clone_with_new_inputs(const OutputVector& new_args) const +{ + check_new_args_count(this, new_args); + + if (m_groups_in_filters) + { + return make_shared(new_args.at(0), + new_args.at(1), + get_window_movement_strides(), + get_window_dilation_strides(), + get_padding_below(), + get_padding_above(), + get_data_dilation_strides(), + get_pad_type()); + } + else + { + return make_shared(new_args.at(0), + new_args.at(1), + get_window_movement_strides(), + get_window_dilation_strides(), + get_padding_below(), + get_padding_above(), + get_data_dilation_strides(), + get_groups(), + get_pad_type()); + } +} + +OutputVector op::v0::GroupConvolution::decompose_op() const +{ + auto data = input_value(0); + auto filters = input_value(1); + auto filters_shape = get_input_shape(1); + // Split one convolution op to N ops where N is the number of groups + // and concat results after computation. + NodeVector convolution_nodes; + + // slice data + auto sliced_data = builder::split(data, get_groups(), 1); + // slice filters + auto sliced_filters = builder::split(filters, get_groups(), 0); + for (std::size_t group{0}; group < get_groups(); ++group) + { + auto sliced_filter = sliced_filters[group]; + if (m_groups_in_filters) + { + // Remove group dimmension after slicing + sliced_filter = make_shared( + sliced_filters[group], + get_default_order(sliced_filters[group].get_shape().size()), + Shape(std::next(std::begin(filters_shape), 1), std::end(filters_shape))); + } + convolution_nodes.push_back( + std::make_shared(sliced_data[group], + sliced_filter, + m_window_movement_strides, + m_window_dilation_strides, + m_padding_below, + m_padding_above, + m_data_dilation_strides, + m_pad_type)); + } + std::size_t concatenation_axis = 1; + return {std::make_shared(convolution_nodes, concatenation_axis)}; +} + +//------------------------------------------------------------------------------ +// v0::GroupConvolutionBackpropData +//------------------------------------------------------------------------------ + +constexpr NodeTypeInfo op::v0::GroupConvolutionBackpropData::type_info; + +op::v0::GroupConvolutionBackpropData::GroupConvolutionBackpropData( + const Output& data_batch, + const Output& filters, + const Output& output_delta, + const Strides& window_movement_strides, + const Strides& window_dilation_strides, + const CoordinateDiff& padding_below, + const CoordinateDiff& padding_above, + const size_t groups) + : FusedOp({data_batch, filters, output_delta}) + , m_window_movement_strides(window_movement_strides) + , m_window_dilation_strides(window_dilation_strides) + , m_padding_below(padding_below) + , m_padding_above(padding_above) + , m_groups(groups) +{ + constructor_validate_and_infer_types(); +} + +void op::v0::GroupConvolutionBackpropData::pre_validate_and_infer_types() +{ + element::Type data_element_type = get_input_element_type(2); + element::Type filters_elem_type = get_input_element_type(1); + + NODE_VALIDATION_CHECK(this, + data_element_type.is_dynamic() || data_element_type.is_real(), + "Output delta element type must be f16, bf16, f32, f64 or dynamic (got ", + data_element_type, + ")."); + NODE_VALIDATION_CHECK(this, + filters_elem_type.is_dynamic() || filters_elem_type.is_real(), + "Filters element type must be f16, bf16, f32, f64 or dynamic (got ", + filters_elem_type, + ")."); + + PartialShape data_pshape = get_input_partial_shape(0); + PartialShape filters_pshape = get_input_partial_shape(1); + PartialShape delta_pshape = get_input_partial_shape(2); + + if (data_pshape.is_dynamic() || filters_pshape.is_dynamic() || delta_pshape.is_dynamic()) + { + set_output_type(0, data_element_type, PartialShape::dynamic()); + } +} + +shared_ptr + op::v0::GroupConvolutionBackpropData::clone_with_new_inputs(const OutputVector& new_args) const +{ + if (new_args.size() != 3) + { + throw ngraph_error("Incorrect number of new arguments"); + } + + return make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + get_window_movement_strides(), + get_window_dilation_strides(), + get_padding_below(), + get_padding_above(), + get_groups()); +} + +OutputVector op::v0::GroupConvolutionBackpropData::decompose_op() const +{ + auto filters = input_value(1); + auto output_delta = input_value(2); + auto data_shape = get_input_shape(0); + + NodeVector sliced_inputs; + + auto groups = get_groups(); + // slice data shape + data_shape[1] /= groups; + // slice delta + auto sliced_delta = builder::split(output_delta, groups, 1); + // slice filters + auto sliced_filters = builder::split(filters, groups, 0); + + auto num_spatials = get_window_movement_strides().size(); + + for (size_t i = 0; i < groups; ++i) + { + auto sliced_conv = std::make_shared( + data_shape, + sliced_filters[i], + sliced_delta[i], + get_window_movement_strides(), + get_window_dilation_strides(), + get_padding_below(), + get_padding_above(), + Strides(num_spatials, 1)); // default data dilation strides + + sliced_inputs.push_back(sliced_conv); + } + + size_t concatenation_axis = 1; + return {std::make_shared(sliced_inputs, concatenation_axis)}; +} diff --git a/ngraph/test/runtime/op/group_conv.hpp b/ngraph/test/runtime/op/group_conv.hpp new file mode 100644 index 0000000..6226f10 --- /dev/null +++ b/ngraph/test/runtime/op/group_conv.hpp @@ -0,0 +1,138 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "backend_visibility.hpp" +#include "ngraph/op/convolution.hpp" +#include "ngraph/op/op.hpp" +#include "ngraph/op/util/attr_types.hpp" +#include "ngraph/op/util/fused_op.hpp" + +namespace ngraph +{ + namespace op + { + namespace v0 + { + /// \brief Group Convolution + class BACKEND_API GroupConvolution : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"GroupConvolution", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + GroupConvolution() = default; + GroupConvolution(const Output& data_batch, + const Output& filters, + const Strides& window_movement_strides, + const Strides& window_dilation_strides, + const CoordinateDiff& padding_below, + const CoordinateDiff& padding_above, + const Strides& data_dilation_strides, + const size_t groups, + const PadType& pad_type = PadType::EXPLICIT); + + // constructor which accept groups included in filters shape. + GroupConvolution(const Output& data_batch, + const Output& filters, + const Strides& window_movement_strides, + const Strides& window_dilation_strides, + const CoordinateDiff& padding_below, + const CoordinateDiff& padding_above, + const Strides& data_dilation_strides, + const PadType& pad_type = PadType::EXPLICIT); + Shape get_weights_dimensions() const; + const Strides& get_window_movement_strides() const + { + return m_window_movement_strides; + } + const Strides& get_window_dilation_strides() const + { + return m_window_dilation_strides; + } + const CoordinateDiff& get_padding_below() const { return m_padding_below; } + const CoordinateDiff& get_padding_above() const { return m_padding_above; } + const Strides& get_data_dilation_strides() const { return m_data_dilation_strides; } + Output get_filters() { return input_value(1); } + Output get_data_batch() { return input_value(0); } + size_t get_groups() const { return m_groups; }; + const PadType& get_pad_type() const { return m_pad_type; } + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + virtual OutputVector decompose_op() const override; + + virtual void pre_validate_and_infer_types() override; + virtual void post_validate_and_infer_types() override; + + bool has_groups_in_filters() const { return m_groups_in_filters; } + protected: + Strides m_window_movement_strides; + Strides m_window_dilation_strides; + CoordinateDiff m_padding_below; + CoordinateDiff m_padding_above; + Strides m_data_dilation_strides; + size_t m_groups; + PadType m_pad_type{PadType::NOTSET}; + + private: + bool m_groups_in_filters; + }; + + /// \brief Group Convolution data batch backprop + class BACKEND_API GroupConvolutionBackpropData : public ngraph::op::util::FusedOp + { + public: + static constexpr NodeTypeInfo type_info{"GroupConvolutionBackpropData", 0}; + const NodeTypeInfo& get_type_info() const override { return type_info; } + GroupConvolutionBackpropData() = default; + GroupConvolutionBackpropData(const Output& data_batch, + const Output& filters, + const Output& output_delta, + const Strides& window_movement_strides, + const Strides& window_dilation_strides, + const CoordinateDiff& padding_below, + const CoordinateDiff& padding_above, + const size_t groups); + + const Strides& get_window_movement_strides() const + { + return m_window_movement_strides; + } + const Strides& get_window_dilation_strides() const + { + return m_window_dilation_strides; + } + const CoordinateDiff& get_padding_below() const { return m_padding_below; } + const CoordinateDiff& get_padding_above() const { return m_padding_above; } + size_t get_groups() const { return m_groups; }; + virtual std::shared_ptr + clone_with_new_inputs(const OutputVector& new_args) const override; + + virtual OutputVector decompose_op() const override; + + virtual void pre_validate_and_infer_types() override; + + protected: + Strides m_window_movement_strides; + Strides m_window_dilation_strides; + CoordinateDiff m_padding_below; + CoordinateDiff m_padding_above; + size_t m_groups; + }; + } + } // namespace op +} // namespace ngraph diff --git a/ngraph/test/runtime/opset0.hpp b/ngraph/test/runtime/opset0.hpp index e46665b..64057b1 100644 --- a/ngraph/test/runtime/opset0.hpp +++ b/ngraph/test/runtime/opset0.hpp @@ -18,6 +18,8 @@ #include "ngraph/ops.hpp" #include "op/avg_pool.hpp" +#include "op/convolution.hpp" +#include "op/group_conv.hpp" namespace ngraph { diff --git a/ngraph/test/runtime/opset0_downgrade.cpp b/ngraph/test/runtime/opset0_downgrade.cpp index 18fc778..d7e70ec 100644 --- a/ngraph/test/runtime/opset0_downgrade.cpp +++ b/ngraph/test/runtime/opset0_downgrade.cpp @@ -31,6 +31,8 @@ #include "ngraph/type.hpp" #include "ngraph/validation_util.hpp" #include "op/avg_pool.hpp" +#include "op/convolution.hpp" +#include "op/group_conv.hpp" #include "opset0_downgrade.hpp" #include "pass/implicit_broadcast_elimination.hpp" @@ -309,14 +311,14 @@ namespace const auto filters_arg = node->input_value(1); const auto strides = node->get_strides(); const size_t num_spatial_dims = strides.size(); - auto replacement_node = make_shared(data_arg, - filters_arg, - node->get_strides(), - node->get_dilations(), - node->get_pads_begin(), - node->get_pads_end(), - Strides(num_spatial_dims, 1), - node->get_auto_pad()); + auto replacement_node = make_shared(data_arg, + filters_arg, + node->get_strides(), + node->get_dilations(), + node->get_pads_begin(), + node->get_pads_end(), + Strides(num_spatial_dims, 1), + node->get_auto_pad()); replace_node(node, replacement_node); return replacement_node; } diff --git a/ngraph/test/runtime/opset0_tbl.hpp b/ngraph/test/runtime/opset0_tbl.hpp index 2575b7b..81fbe21 100644 --- a/ngraph/test/runtime/opset0_tbl.hpp +++ b/ngraph/test/runtime/opset0_tbl.hpp @@ -65,8 +65,8 @@ NGRAPH_OP(Clamp, ngraph::op) NGRAPH_OP(Concat, ngraph::op) NGRAPH_OP(Constant, ngraph::op) NGRAPH_OP(Convert, ngraph::op) -NGRAPH_OP(Convolution, ngraph::op) -NGRAPH_OP(ConvolutionBackpropData, ngraph::op) +NGRAPH_OP(Convolution, ngraph::op::v0) +NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v0) NGRAPH_OP(Cos, ngraph::op) NGRAPH_OP(Cosh, ngraph::op) NGRAPH_OP(CumSum, ngraph::op::v0) @@ -88,8 +88,8 @@ NGRAPH_OP(Gelu, ngraph::op) NGRAPH_OP(GetOutputElement, ngraph::op) NGRAPH_OP(Greater, ngraph::op) NGRAPH_OP(GreaterEq, ngraph::op) -NGRAPH_OP(GroupConvolution, ngraph::op) -NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op) +NGRAPH_OP(GroupConvolution, ngraph::op::v0) +NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v0) NGRAPH_OP(HardSigmoid, ngraph::op) NGRAPH_OP(Interpolate, ngraph::op) NGRAPH_OP(Less, ngraph::op) diff --git a/ngraph/test/runtime/opset1_upgrade.cpp b/ngraph/test/runtime/opset1_upgrade.cpp index 60d2175..b0d9872 100644 --- a/ngraph/test/runtime/opset1_upgrade.cpp +++ b/ngraph/test/runtime/opset1_upgrade.cpp @@ -27,6 +27,8 @@ #include "ngraph/ops.hpp" #include "ngraph/provenance.hpp" #include "op/avg_pool.hpp" +#include "op/convolution.hpp" +#include "op/group_conv.hpp" using namespace std; using namespace ngraph; @@ -59,7 +61,7 @@ namespace } shared_ptr op_cast(shared_ptr node) { return nullptr; } - shared_ptr op_cast(shared_ptr node) + shared_ptr op_cast(shared_ptr node) { auto strides = node->get_window_movement_strides(); auto dilations = node->get_window_dilation_strides(); @@ -88,7 +90,7 @@ namespace return replacement_node; } - shared_ptr op_cast(shared_ptr node) + shared_ptr op_cast(shared_ptr node) { auto data_batch_shape = node->get_data_batch_shape(); auto strides = node->get_window_movement_strides_forward(); diff --git a/ngraph/test/type_prop/convolution.cpp b/ngraph/test/type_prop/convolution.cpp index 29fd93e..a0d5b56 100644 --- a/ngraph/test/type_prop/convolution.cpp +++ b/ngraph/test/type_prop/convolution.cpp @@ -14,6 +14,7 @@ // limitations under the License. //***************************************************************************** +#include "op/convolution.hpp" #include "gtest/gtest.h" #include "ngraph/ngraph.hpp" #include "util/type_prop.hpp" @@ -26,7 +27,7 @@ TEST(type_prop, conv_1d_deduce) // Deduce type auto param0 = make_shared(element::f32, Shape{64, 3, 100}); auto param1 = make_shared(element::f32, Shape{128, 3, 10}); - auto conv = make_shared(param0, param1); + auto conv = make_shared(param0, param1); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 91})); @@ -44,14 +45,14 @@ TEST(type_prop, conv_1d_back_data_batch_deduce) Shape data_batch_shape{64, 3, 100}; auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters auto param1 = make_shared(element::f32, Shape{64, 128, 91}); // output delta - auto conv = make_shared(data_batch_shape, - param0, - param1, - Strides{1}, - Strides{1}, - CoordinateDiff{0}, - CoordinateDiff{0}, - Strides{1}); + auto conv = make_shared(data_batch_shape, + param0, + param1, + Strides{1}, + Strides{1}, + CoordinateDiff{0}, + CoordinateDiff{0}, + Strides{1}); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); @@ -72,7 +73,7 @@ TEST(type_prop, conv_1d_deduce_padded) auto dilation_strides = Strides{1}; auto padding_below = CoordinateDiff{2}; auto padding_above = CoordinateDiff{3}; - auto conv = make_shared( + auto conv = make_shared( param0, param1, move_strides, dilation_strides, padding_below, padding_above); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 96})); @@ -95,14 +96,14 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_padded) auto dilation_strides = Strides{1}; auto padding_below = CoordinateDiff{2}; auto padding_above = CoordinateDiff{3}; - auto conv = make_shared(data_batch_shape, - param0, - param1, - move_strides, - dilation_strides, - padding_below, - padding_above, - Strides{1}); + auto conv = make_shared(data_batch_shape, + param0, + param1, + move_strides, + dilation_strides, + padding_below, + padding_above, + Strides{1}); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); @@ -120,7 +121,7 @@ TEST(type_prop, conv_1d_deduce_strided) auto param0 = make_shared(element::f32, Shape{64, 3, 100}); auto param1 = make_shared(element::f32, Shape{128, 3, 10}); auto move_strides = Strides{2}; - auto conv = make_shared(param0, param1, move_strides); + auto conv = make_shared(param0, param1, move_strides); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 46})); @@ -139,14 +140,14 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided) auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters auto param1 = make_shared(element::f32, Shape{64, 128, 46}); // output delta auto move_strides = Strides{2}; - auto conv = make_shared(data_batch_shape, - param0, - param1, - move_strides, - Strides{1}, - CoordinateDiff{0}, - CoordinateDiff{0}, - Strides{1}); + auto conv = make_shared(data_batch_shape, + param0, + param1, + move_strides, + Strides{1}, + CoordinateDiff{0}, + CoordinateDiff{0}, + Strides{1}); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); @@ -167,7 +168,7 @@ TEST(type_prop, conv_1d_deduce_strided_padded) auto dilation_strides = Strides{1}; auto padding_below = CoordinateDiff{2}; auto padding_above = CoordinateDiff{3}; - auto conv = make_shared( + auto conv = make_shared( param0, param1, move_strides, dilation_strides, padding_below, padding_above); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 48})); @@ -190,14 +191,14 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_padded) auto dilation_strides = Strides{1}; auto padding_below = CoordinateDiff{2}; auto padding_above = CoordinateDiff{3}; - auto conv = make_shared(data_batch_shape, - param0, - param1, - move_strides, - dilation_strides, - padding_below, - padding_above, - Strides{1}); + auto conv = make_shared(data_batch_shape, + param0, + param1, + move_strides, + dilation_strides, + padding_below, + padding_above, + Strides{1}); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); @@ -215,7 +216,7 @@ TEST(type_prop, conv_1d_deduce_strided_small_uneven) auto param0 = make_shared(element::f32, Shape{64, 3, 5}); auto param1 = make_shared(element::f32, Shape{128, 3, 2}); auto move_strides = Strides{2}; - auto conv = make_shared(param0, param1, move_strides); + auto conv = make_shared(param0, param1, move_strides); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 2})); @@ -234,14 +235,14 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_uneven) auto param0 = make_shared(element::f32, Shape{128, 3, 2}); // filters auto param1 = make_shared(element::f32, Shape{64, 128, 2}); // output delta auto move_strides = Strides{2}; - auto conv = make_shared(data_batch_shape, - param0, - param1, - move_strides, - Strides{1}, - CoordinateDiff{0}, - CoordinateDiff{0}, - Strides{1}); + auto conv = make_shared(data_batch_shape, + param0, + param1, + move_strides, + Strides{1}, + CoordinateDiff{0}, + CoordinateDiff{0}, + Strides{1}); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); @@ -259,7 +260,7 @@ TEST(type_prop, conv_1d_deduce_strided_small_even) auto param0 = make_shared(element::f32, Shape{64, 3, 6}); auto param1 = make_shared(element::f32, Shape{128, 3, 2}); auto move_strides = Strides{2}; - auto conv = make_shared(param0, param1, move_strides); + auto conv = make_shared(param0, param1, move_strides); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 3})); @@ -278,14 +279,14 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_even) auto param0 = make_shared(element::f32, Shape{128, 3, 2}); // filters auto param1 = make_shared(element::f32, Shape{64, 128, 3}); // output delta auto move_strides = Strides{2}; - auto conv = make_shared(data_batch_shape, - param0, - param1, - move_strides, - Strides{1}, - CoordinateDiff{0}, - CoordinateDiff{0}, - Strides{1}); + auto conv = make_shared(data_batch_shape, + param0, + param1, + move_strides, + Strides{1}, + CoordinateDiff{0}, + CoordinateDiff{0}, + Strides{1}); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); @@ -304,7 +305,7 @@ TEST(type_prop, conv_1d_deduce_window_dilated) auto param1 = make_shared(element::f32, Shape{128, 3, 10}); auto move_strides = Strides{1}; auto dilate_strides = Strides{2}; - auto conv = make_shared(param0, param1, move_strides, dilate_strides); + auto conv = make_shared(param0, param1, move_strides, dilate_strides); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 82})); @@ -324,14 +325,14 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated) auto param1 = make_shared(element::f32, Shape{64, 128, 82}); // output delta auto move_strides = Strides{1}; auto dilate_strides = Strides{2}; - auto conv = make_shared(data_batch_shape, - param0, - param1, - move_strides, - dilate_strides, - CoordinateDiff{0}, - CoordinateDiff{0}, - Strides{1}); + auto conv = make_shared(data_batch_shape, + param0, + param1, + move_strides, + dilate_strides, + CoordinateDiff{0}, + CoordinateDiff{0}, + Strides{1}); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); @@ -352,7 +353,7 @@ TEST(type_prop, conv_1d_deduce_window_dilated_padded) auto dilate_strides = Strides{2}; auto padding_below = CoordinateDiff{2}; auto padding_above = CoordinateDiff{3}; - auto conv = make_shared( + auto conv = make_shared( param0, param1, move_strides, dilate_strides, padding_below, padding_above); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 87})); @@ -375,14 +376,14 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_padded) auto dilate_strides = Strides{2}; auto padding_below = CoordinateDiff{2}; auto padding_above = CoordinateDiff{3}; - auto conv = make_shared(data_batch_shape, - param0, - param1, - move_strides, - dilate_strides, - padding_below, - padding_above, - Strides{1}); + auto conv = make_shared(data_batch_shape, + param0, + param1, + move_strides, + dilate_strides, + padding_below, + padding_above, + Strides{1}); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); @@ -404,13 +405,13 @@ TEST(type_prop, conv_1d_deduce_window_dilated_data_dilated_padded) auto padding_below = CoordinateDiff{2}; auto padding_above = CoordinateDiff{3}; auto data_dilate_strides = Strides{3}; - auto conv = make_shared(param0, - param1, - move_strides, - dilate_strides, - padding_below, - padding_above, - data_dilate_strides); + auto conv = make_shared(param0, + param1, + move_strides, + dilate_strides, + padding_below, + padding_above, + data_dilate_strides); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 285})); @@ -433,14 +434,14 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_data_dilated_padde auto padding_below = CoordinateDiff{2}; auto padding_above = CoordinateDiff{3}; auto data_dilate_strides = Strides{3}; - auto conv = make_shared(data_batch_shape, - param0, - param1, - move_strides, - dilate_strides, - padding_below, - padding_above, - data_dilate_strides); + auto conv = make_shared(data_batch_shape, + param0, + param1, + move_strides, + dilate_strides, + padding_below, + padding_above, + data_dilate_strides); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); @@ -457,7 +458,7 @@ TEST(type_prop, conv_2d_deduce) // Deduce type auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); - auto conv = make_shared(param0, param1); + auto conv = make_shared(param0, param1); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 91, 131})); @@ -478,7 +479,7 @@ TEST(type_prop, conv_2d_deduce_padded) auto dilate_strides = Strides{1, 1}; auto padding_below = CoordinateDiff{2, 3}; auto padding_above = CoordinateDiff{3, 4}; - auto conv = make_shared( + auto conv = make_shared( param0, param1, move_strides, dilate_strides, padding_below, padding_above); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 96, 138})); @@ -500,7 +501,7 @@ TEST(type_prop, conv_2d_deduce_padded_neg) auto dilate_strides = Strides{1, 1}; auto padding_below = CoordinateDiff{2, -3}; auto padding_above = CoordinateDiff{3, -4}; - auto conv = make_shared( + auto conv = make_shared( param0, param1, move_strides, dilate_strides, padding_below, padding_above); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 96, 124})); @@ -528,14 +529,14 @@ TEST_P(DeduceAutoPadTest, same_lower) auto param0 = make_shared(element::f32, image_shape); auto param1 = make_shared(element::f32, filter_shape); - auto conv = make_shared(param0, - param1, - std::get<2>(GetParam()), - std::get<3>(GetParam()), - CoordinateDiff(), - CoordinateDiff(), - Strides(), - op::PadType::SAME_LOWER); + auto conv = make_shared(param0, + param1, + std::get<2>(GetParam()), + std::get<3>(GetParam()), + CoordinateDiff(), + CoordinateDiff(), + Strides(), + op::PadType::SAME_LOWER); EXPECT_EQ(conv->get_padding_above(), std::get<4>(GetParam())); EXPECT_EQ(conv->get_padding_below(), std::get<5>(GetParam())); } @@ -591,7 +592,7 @@ TEST(type_prop, conv_2d_deduce_strided) auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); auto move_strides = Strides{2, 3}; - auto conv = make_shared(param0, param1, move_strides); + auto conv = make_shared(param0, param1, move_strides); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 46, 44})); @@ -610,7 +611,7 @@ TEST(type_prop, conv_2d_deduce_strided_window_dilated) auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); auto move_strides = Strides{2, 3}; auto dilate_strides = Strides{3, 2}; - auto conv = make_shared(param0, param1, move_strides, dilate_strides); + auto conv = make_shared(param0, param1, move_strides, dilate_strides); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 37, 38})); @@ -632,13 +633,13 @@ TEST(type_prop, conv_2d_deduce_strided_window_dilated_data_dilated) auto padding_below = CoordinateDiff{0, 0}; auto padding_above = CoordinateDiff{0, 0}; auto data_dilate_strides = Strides{2, 3}; - auto conv = make_shared(param0, - param1, - move_strides, - dilate_strides, - padding_below, - padding_above, - data_dilate_strides); + auto conv = make_shared(param0, + param1, + move_strides, + dilate_strides, + padding_below, + padding_above, + data_dilate_strides); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 86, 137})); @@ -657,7 +658,7 @@ TEST(type_prop, conv_2d_deduce_strided_window_dilated_small) auto param1 = make_shared(element::f32, Shape{128, 3, 2, 3}); auto move_strides = Strides{2, 3}; auto dilate_strides = Strides{3, 2}; - auto conv = make_shared(param0, param1, move_strides, dilate_strides); + auto conv = make_shared(param0, param1, move_strides, dilate_strides); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 2, 2})); @@ -676,7 +677,7 @@ TEST(type_prop, conv_3d_deduce_strided_window_dilated_small) auto param1 = make_shared(element::f32, Shape{128, 3, 2, 3, 2}); auto move_strides = Strides{2, 3, 4}; auto dilate_strides = Strides{3, 2, 2}; - auto conv = make_shared(param0, param1, move_strides, dilate_strides); + auto conv = make_shared(param0, param1, move_strides, dilate_strides); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 2, 2, 2})); @@ -698,13 +699,13 @@ TEST(type_prop, conv_3d_deduce_strided_window_dilated_data_dilated_small) auto padding_below = CoordinateDiff{0, 0, 0}; auto padding_above = CoordinateDiff{0, 0, 0}; auto data_dilate_strides = Strides{2, 3, 2}; - auto conv = make_shared(param0, - param1, - move_strides, - dilate_strides, - padding_below, - padding_above, - data_dilate_strides); + auto conv = make_shared(param0, + param1, + move_strides, + dilate_strides, + padding_below, + padding_above, + data_dilate_strides); EXPECT_EQ(conv->get_element_type(), element::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 5, 6, 5})); @@ -723,7 +724,7 @@ TEST(type_prop, conv_invalid_element_type_mismatch) auto param1 = make_shared(element::i32, Shape{3, 3, 2, 2}); try { - auto conv = make_shared(param0, param1); + auto conv = make_shared(param0, param1); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with element type mismatch not detected"; @@ -746,7 +747,7 @@ TEST(type_prop, conv_invalid_0d_input) auto param1 = make_shared(element::f32, Shape{}); try { - auto conv = make_shared(param0, param1); + auto conv = make_shared(param0, param1); // Should have thrown, so fail if it didn't FAIL() << "Invalid 0D input not detected"; @@ -771,7 +772,7 @@ TEST(type_prop, conv_invalid_1d_input) auto param1 = make_shared(element::f32, Shape{2}); try { - auto conv = make_shared(param0, param1); + auto conv = make_shared(param0, param1); // Should have thrown, so fail if it didn't FAIL() << "Invalid 1D input not detected"; @@ -796,7 +797,7 @@ TEST(type_prop, conv_invalid_2d_input) auto param1 = make_shared(element::f32, Shape{2, 6}); try { - auto conv = make_shared(param0, param1); + auto conv = make_shared(param0, param1); // Should have thrown, so fail if it didn't FAIL() << "Invalid 2D input not detected"; @@ -821,7 +822,7 @@ TEST(type_prop, conv_invalid_0_batch_size) auto param1 = make_shared(element::f32, Shape{0, 6, 1}); try { - auto conv = make_shared(param0, param1); + auto conv = make_shared(param0, param1); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with 0 batch size not detected"; @@ -843,7 +844,7 @@ TEST(type_prop, conv_invalid_0_input_channels) auto param1 = make_shared(element::f32, Shape{5, 0, 1}); try { - auto conv = make_shared(param0, param1); + auto conv = make_shared(param0, param1); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with 0 input channels not detected"; @@ -867,7 +868,7 @@ TEST(type_prop, conv_invalid_wrong_number_of_filter_dimensions_too_many) auto param1 = make_shared(element::f32, Shape{5, 2, 3, 3, 3}); try { - auto conv = make_shared(param0, param1); + auto conv = make_shared(param0, param1); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with too many filter dimensions not detected"; @@ -889,7 +890,7 @@ TEST(type_prop, conv_invalid_wrong_number_of_filter_dimensions_too_few) auto param1 = make_shared(element::f32, Shape{5, 2, 3}); try { - auto conv = make_shared(param0, param1); + auto conv = make_shared(param0, param1); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with too few filter dimensions not detected"; @@ -911,7 +912,7 @@ TEST(type_prop, conv_invalid_0_output_channels) auto param1 = make_shared(element::f32, Shape{0, 2, 3, 3}); try { - auto conv = make_shared(param0, param1); + auto conv = make_shared(param0, param1); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with 0 output channels not detected"; @@ -933,7 +934,7 @@ TEST(type_prop, conv_invalid_input_channel_mismatch) auto param1 = make_shared(element::f32, Shape{6, 3, 3, 3}); try { - auto conv = make_shared(param0, param1); + auto conv = make_shared(param0, param1); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with channel count mismatch not detected"; @@ -958,7 +959,7 @@ TEST(type_prop, conv_invalid_movement_stride_rank) auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); try { - auto conv = make_shared(param0, param1, Strides{2, 3, 8}); + auto conv = make_shared(param0, param1, Strides{2, 3, 8}); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with wrong movement stride rank not detected"; @@ -987,7 +988,8 @@ TEST(type_prop, conv_invalid_window_dilation_stride_rank) auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); try { - auto conv = make_shared(param0, param1, Strides{2, 3}, Strides{2, 3, 8}); + auto conv = + make_shared(param0, param1, Strides{2, 3}, Strides{2, 3, 8}); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with wrong window dilation stride rank not detected"; @@ -1016,13 +1018,13 @@ TEST(type_prop, conv_invalid_data_dilation_stride_rank) auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); try { - auto conv = make_shared(param0, - param1, - Strides{2, 3}, - Strides{2, 3}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{2, 3, 8}); + auto conv = make_shared(param0, + param1, + Strides{2, 3}, + Strides{2, 3}, + CoordinateDiff{0, 0}, + CoordinateDiff{0, 0}, + Strides{2, 3, 8}); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with wrong data dilation stride rank not detected"; @@ -1051,12 +1053,12 @@ TEST(type_prop, conv_invalid_padding_below_rank) auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); try { - auto conv = make_shared(param0, - param1, - Strides{2, 3}, - Strides{1, 1}, - CoordinateDiff{0, 0, 0}, - CoordinateDiff{0, 0}); + auto conv = make_shared(param0, + param1, + Strides{2, 3}, + Strides{1, 1}, + CoordinateDiff{0, 0, 0}, + CoordinateDiff{0, 0}); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with wrong padding-below rank not detected"; @@ -1085,12 +1087,12 @@ TEST(type_prop, conv_invalid_padding_above_rank) auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); try { - auto conv = make_shared(param0, - param1, - Strides{2, 3}, - Strides{2, 3}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0, 0}); + auto conv = make_shared(param0, + param1, + Strides{2, 3}, + Strides{2, 3}, + CoordinateDiff{0, 0}, + CoordinateDiff{0, 0, 0}); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with wrong padding-above rank not detected"; @@ -1119,12 +1121,12 @@ TEST(type_prop, conv_invalid_input_spatial_size_negative_after_padding) auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); try { - auto conv = make_shared(param0, - param1, - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{-4, 0}, - CoordinateDiff{-7, 0}); + auto conv = make_shared(param0, + param1, + Strides{1, 1}, + Strides{1, 1}, + CoordinateDiff{-4, 0}, + CoordinateDiff{-7, 0}); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with negative-length post-padding spatial axis not detected"; @@ -1148,12 +1150,12 @@ TEST(type_prop, conv_invalid_input_spatial_size_zero_after_padding) auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); try { - auto conv = make_shared(param0, - param1, - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{-4, 0}, - CoordinateDiff{-6, 0}); + auto conv = make_shared(param0, + param1, + Strides{1, 1}, + Strides{1, 1}, + CoordinateDiff{-4, 0}, + CoordinateDiff{-6, 0}); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with zero-length post-padding spatial axis not detected"; @@ -1177,7 +1179,7 @@ TEST(type_prop, conv_invalid_input_spatial_size_0) auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); try { - auto conv = make_shared(param0, param1); + auto conv = make_shared(param0, param1); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with zero-length spatial axis not detected"; @@ -1201,7 +1203,7 @@ TEST(type_prop, conv_invalid_window_size_0) auto param1 = make_shared(element::f32, Shape{6, 2, 3, 0}); try { - auto conv = make_shared(param0, param1); + auto conv = make_shared(param0, param1); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with zero-length window axis not detected"; @@ -1225,7 +1227,7 @@ TEST(type_prop, conv_invalid_window_dilation_stride_0) auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); try { - auto conv = make_shared(param0, param1, Strides{2, 3}, Strides{2, 0}); + auto conv = make_shared(param0, param1, Strides{2, 3}, Strides{2, 0}); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with wrong 0-length window dilation stride axis not detected"; @@ -1249,13 +1251,13 @@ TEST(type_prop, conv_invalid_data_dilation_stride_0) auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); try { - auto conv = make_shared(param0, - param1, - Strides{2, 3}, - Strides{2, 3}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{2, 0}); + auto conv = make_shared(param0, + param1, + Strides{2, 3}, + Strides{2, 3}, + CoordinateDiff{0, 0}, + CoordinateDiff{0, 0}, + Strides{2, 0}); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with wrong 0-length data dilation stride axis not detected"; @@ -1279,7 +1281,7 @@ TEST(type_prop, conv_invalid_dilated_window_too_large) auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); try { - auto conv = make_shared(param0, param1, Strides{1, 1}, Strides{4, 4}); + auto conv = make_shared(param0, param1, Strides{1, 1}, Strides{4, 4}); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with oversized dilated window not detected"; @@ -1303,7 +1305,7 @@ TEST(type_prop, conv_invalid_movement_stride_0) auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); try { - auto conv = make_shared(param0, param1, Strides{0, 1}); + auto conv = make_shared(param0, param1, Strides{0, 1}); // Should have thrown, so fail if it didn't FAIL() << "Invalid input with wrong 0-length movement stride axis not detected"; @@ -1333,13 +1335,13 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_ok) auto param0 = make_shared(element::f32, data_batch_shape); auto param1 = make_shared(element::f32, filters_shape); - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); ASSERT_EQ(conv->get_output_element_type(0), element::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); @@ -1360,13 +1362,13 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_strides_rank_wrong try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Window stride rank mismatch not detected"; } @@ -1401,13 +1403,13 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_strides_dim_zero) try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Window stride with dimension zero not detected"; } @@ -1438,13 +1440,13 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_dilation_rank_wron try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Window dilation rank mismatch not detected"; } @@ -1479,13 +1481,13 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_dilation_dim_zero) try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Window dilation with dimension zero not detected"; } @@ -1516,13 +1518,13 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_padding_below_rank_wrong) try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Padding below rank mismatch not detected"; } @@ -1557,13 +1559,13 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_padding_above_rank_wrong) try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Padding above rank mismatch not detected"; } @@ -1598,13 +1600,13 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_data_dilation_rank_wrong) try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Data dilation rank mismatch not detected"; } @@ -1639,13 +1641,13 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_data_dilation_dim_zero) try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Data dilation with dimension zero not detected"; } @@ -1674,13 +1676,13 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_ok) auto param0 = make_shared(element::f32, data_batch_shape); auto param1 = make_shared(element::f32, filters_shape); - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); ASSERT_EQ(conv->get_output_element_type(0), element::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); @@ -1701,13 +1703,13 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_data_batch_rank_wr try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Data batch rank mismatch not detected"; } @@ -1742,13 +1744,13 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_batch_size_known_o auto param0 = make_shared(element::f32, data_batch_shape); auto param1 = make_shared(element::f32, filters_shape); - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); ASSERT_EQ(conv->get_output_element_type(0), element::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( @@ -1771,13 +1773,13 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_batch_size_known_z try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Zero batch size not detected"; } @@ -1805,13 +1807,13 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_input_channel_coun auto param0 = make_shared(element::f32, data_batch_shape); auto param1 = make_shared(element::f32, filters_shape); - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); ASSERT_EQ(conv->get_output_element_type(0), element::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); @@ -1833,13 +1835,13 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_input_channel_coun try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Zero input channel count not detected"; } @@ -1869,13 +1871,13 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_output_channel_cou auto param0 = make_shared(element::f32, data_batch_shape); auto param1 = make_shared(element::f32, filters_shape); - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); ASSERT_EQ(conv->get_output_element_type(0), element::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( @@ -1897,13 +1899,13 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_output_channel_cou try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Zero output channel count not detected"; } @@ -1930,13 +1932,13 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_input_channel_coun auto param0 = make_shared(element::f32, data_batch_shape); auto param1 = make_shared(element::f32, filters_shape); - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); ASSERT_EQ(conv->get_output_element_type(0), element::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); @@ -1957,13 +1959,13 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_input_channel_coun try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Zero input channel count not detected"; } @@ -1992,13 +1994,13 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_ok) auto param0 = make_shared(element::f32, data_batch_shape); auto param1 = make_shared(element::f32, filters_shape); - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); ASSERT_EQ(conv->get_output_element_type(0), element::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); @@ -2019,13 +2021,13 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_arg_ranks_m try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Argument rank mismatch not detected"; } @@ -2055,13 +2057,13 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_input_chann auto param0 = make_shared(element::f32, data_batch_shape); auto param1 = make_shared(element::f32, filters_shape); - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); ASSERT_EQ(conv->get_output_element_type(0), element::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); @@ -2084,13 +2086,13 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_input_chann try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Input channel count mismatch not detected"; } @@ -2120,13 +2122,13 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_all_nonspat auto param0 = make_shared(element::f32, data_batch_shape); auto param1 = make_shared(element::f32, filters_shape); - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); ASSERT_EQ(conv->get_output_element_type(0), element::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( @@ -2147,13 +2149,13 @@ TEST(type_prop, auto param0 = make_shared(element::f32, data_batch_shape); auto param1 = make_shared(element::f32, filters_shape); - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); ASSERT_EQ(conv->get_output_element_type(0), element::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( @@ -2177,13 +2179,13 @@ TEST( try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Oversize filter not detected"; } @@ -2214,13 +2216,13 @@ TEST( auto param0 = make_shared(element::f32, data_batch_shape); auto param1 = make_shared(element::f32, filters_shape); - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); ASSERT_EQ(conv->get_output_element_type(0), element::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( @@ -2242,13 +2244,13 @@ TEST( auto param0 = make_shared(element::f32, data_batch_shape); auto param1 = make_shared(element::f32, filters_shape); - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); ASSERT_EQ(conv->get_output_element_type(0), element::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( @@ -2270,13 +2272,13 @@ TEST( auto param0 = make_shared(element::f32, data_batch_shape); auto param1 = make_shared(element::f32, filters_shape); - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); ASSERT_EQ(conv->get_output_element_type(0), element::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( @@ -2300,13 +2302,13 @@ TEST( try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Oversize filter after window dilation not detected"; } @@ -2339,13 +2341,13 @@ TEST( try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Zero dimension in data batch not detected"; } @@ -2376,13 +2378,13 @@ TEST( auto param0 = make_shared(element::f32, data_batch_shape); auto param1 = make_shared(element::f32, filters_shape); - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); ASSERT_EQ(conv->get_output_element_type(0), element::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( @@ -2406,13 +2408,13 @@ TEST( try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Zero padded dimension in data batch not detected"; } @@ -2445,13 +2447,13 @@ TEST( try { - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); FAIL() << "Negative padded dimension in data batch not detected"; } @@ -2482,13 +2484,13 @@ TEST(type_prop, conv_partial_dynamic_et) auto param0 = make_shared(element::dynamic, data_batch_shape); auto param1 = make_shared(element::dynamic, filters_shape); - auto conv = make_shared(param0, - param1, - window_movement_strides, - window_dilation_strides, - padding_below, - padding_above, - data_dilation_strides); + auto conv = make_shared(param0, + param1, + window_movement_strides, + window_dilation_strides, + padding_below, + padding_above, + data_dilation_strides); ASSERT_TRUE(conv->get_output_element_type(0).is_dynamic()); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( diff --git a/ngraph/test/type_prop/group_convolution.cpp b/ngraph/test/type_prop/group_convolution.cpp index 3ec9865..054c186 100644 --- a/ngraph/test/type_prop/group_convolution.cpp +++ b/ngraph/test/type_prop/group_convolution.cpp @@ -21,113 +21,6 @@ using namespace std; using namespace ngraph; -TEST(type_prop, group_conv) -{ - // Deduce type - auto data = make_shared(element::f32, Shape{64, 4, 100, 150}); - auto filters = make_shared(element::f32, Shape{128, 2, 10, 20}); - auto conv = make_shared(data, - filters, - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{1, 1}, - 2); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 91, 131})); -} - -TEST(type_prop, group_conv_auto) -{ - // Deduce type - auto data = make_shared(element::f32, Shape{64, 4, 100, 150}); - auto filters = make_shared(element::f32, Shape{128, 2, 10, 20}); - auto conv = make_shared(data, - filters, - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{1, 1}, - 2, - op::PadType::AUTO); - EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 100, 150})); - EXPECT_EQ(conv->get_padding_below(), (CoordinateDiff{4, 9})); - EXPECT_EQ(conv->get_padding_above(), (CoordinateDiff{5, 10})); -} - -TEST(type_prop, group_conv_invalid_groups) -{ - // Deduce type - try - { - auto conv = make_shared( - make_shared(element::f32, Shape{64, 20, 100, 150}), - make_shared(element::f32, Shape{30, 10, 10, 20}), - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{1, 1}, - 3); - // Should have thrown, so fail if it didn't - FAIL() << "Invalid group conv"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), - std::string("Data channels not a multiple of group size")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } - try - { - auto conv = make_shared( - make_shared(element::f32, Shape{64, 30, 100, 150}), - make_shared(element::f32, Shape{20, 10, 10, 20}), - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{1, 1}, - 3); - // Should have thrown, so fail if it didn't - FAIL() << "Invalid group conv"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), std::string("# Filters not a multiple of group size")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } - try - { - auto conv = make_shared( - make_shared(element::f32, Shape{64, 30, 100, 150}), - make_shared(element::f32, Shape{30, 20, 10, 20}), - Strides{1, 1}, - Strides{1, 1}, - CoordinateDiff{0, 0}, - CoordinateDiff{0, 0}, - Strides{1, 1}, - 3); - // Should have thrown, so fail if it didn't - FAIL() << "Invalid group conv"; - } - catch (const NodeValidationFailure& error) - { - EXPECT_HAS_SUBSTRING(error.what(), std::string("Incorrect number of channels per filter")); - } - catch (...) - { - FAIL() << "Deduced type check failed for unexpected reason"; - } -} - TEST(type_prop, group_conv_v1_partial_auto_padding_same_lower) { const PartialShape data_batch_shape{1, 4, 5, 5}; diff --git a/ngraph/test/type_prop_benchmark.cpp b/ngraph/test/type_prop_benchmark.cpp deleted file mode 100644 index dde62be..0000000 --- a/ngraph/test/type_prop_benchmark.cpp +++ /dev/null @@ -1,75 +0,0 @@ -//***************************************************************************** -// Copyright 2017-2020 Intel Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -//***************************************************************************** - -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "util/type_prop.hpp" - -using namespace std; -using namespace ngraph; - -TEST(type_prop, DISABLED_benchmark_type_prop_add) -{ - auto p1 = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto p2 = make_shared(element::f32, Shape{1, 2, 3, 4}); - - constexpr size_t num_iterations = 1000000; - size_t total_nanosec = 0; - - stopwatch sw; - - for (size_t i = 0; i < num_iterations; i++) - { - sw.start(); - auto n = make_shared(p1, p2); - sw.stop(); - - total_nanosec += sw.get_nanoseconds(); - } - - std::cout.imbue(std::locale("")); - std::cout << "Constructed " << std::fixed << num_iterations << " Add ops in " << std::fixed - << total_nanosec << " ns" << std::endl; -} - -TEST(type_prop, DISABLED_benchmark_type_prop_convolution) -{ - auto d = make_shared(element::f32, Shape{64, 3, 224, 224}); - auto f = make_shared(element::f32, Shape{64, 3, 7, 7}); - auto strides = Strides{1, 1}; - auto dilation = Strides{1, 1}; - auto padding_below = CoordinateDiff{1, 1}; - auto padding_above = CoordinateDiff{1, 1}; - - constexpr size_t num_iterations = 1000000; - size_t total_nanosec = 0; - - stopwatch sw; - - for (size_t i = 0; i < num_iterations; i++) - { - sw.start(); - auto n = - make_shared(d, f, strides, dilation, padding_below, padding_above); - sw.stop(); - - total_nanosec += sw.get_nanoseconds(); - } - - std::cout.imbue(std::locale("")); - std::cout << "Constructed " << std::fixed << num_iterations << " Convolution ops in " - << std::fixed << total_nanosec << " ns" << std::endl; -} -- 2.7.4