* Aligned SpaceToBatch/BatchToSpace with the spec, converted from fused_op to op
* Implemented transformation to decompose STB/BTS
* Added unit tests
* Added new mode (INTERPRETER_TRANSFOMATIONS) for functional tests
} // namespace pass
} // namespace ngraph
-class ngraph::pass::ConvertBatchToSpace: public ngraph::pass::GraphRewrite {
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief ConvertBatchToSpace transformation decomposes BatchToSpace layer to Reshape->Transpose->Reshape->Crop.
+ *
+ * @param convert_by_elements - reduces the maximum number of dimensions that arise during the transformation
+ * if enabled. Default value: true.
+ * false - BatchToSpace decomposes to Reshape->Transpose->Reshape->Crop. During transformation, the number of
+ * tensor dimensions can be increased by length of block_shape input of BatchToSpace layer.
+ * true - BatchToSpace decomposes to N x (Reshape->Transpose->Reshape)->Crop, where N = length of block_shape input
+ * of BatchToSpace layer. During transformation, the number of tensor dimensions can be increased by 1.
+ *
+ */
+
+class ngraph::pass::ConvertBatchToSpace: public ngraph::pass::MatcherPass {
public:
- ConvertBatchToSpace() : GraphRewrite() {
- // convert_batch_to_space();
- convert_batch_to_space_ie_side();
+ explicit ConvertBatchToSpace(bool convert_by_elements = true) : MatcherPass() {
+ if (convert_by_elements)
+ convert_batch_to_space_by_elements();
+ else
+ convert_batch_to_space();
}
private:
void convert_batch_to_space();
- void convert_batch_to_space_ie_side();
+ void convert_batch_to_space_by_elements();
};
} // namespace pass
} // namespace ngraph
-class ngraph::pass::ConvertSpaceToBatch: public ngraph::pass::GraphRewrite {
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief ConvertSpaceToBatch transformation decomposes SpaceToBatch layer to Pad->Reshape->Transpose->Reshape.
+ *
+ * @param convert_by_elements - reduces the maximum number of dimensions that arise during the transformation
+ * if enabled. Default value: true.
+ * false - SpaceToBatch decomposes to Pad->Reshape->Transpose->Reshape. During transformation, the number of
+ * tensor dimensions can be increased by length of block_shape input of SpaceToBatch layer.
+ * true - SpaceToBatch decomposes to Pad-> N x (Reshape->Transpose->Reshape), where N = length of block_shape input
+ * of SpaceToBatch layer. During transformation, the number of tensor dimensions can be increased by 1.
+ *
+ */
+
+class ngraph::pass::ConvertSpaceToBatch: public ngraph::pass::MatcherPass {
public:
- ConvertSpaceToBatch() : GraphRewrite() {
- // convert_space_to_batch();
- convert_space_to_batch_by_elements();
+ explicit ConvertSpaceToBatch(bool convert_by_elements = true) : MatcherPass() {
+ if (convert_by_elements)
+ convert_space_to_batch_by_elements();
+ else
+ convert_space_to_batch();
}
private:
#include <memory>
#include <vector>
-#include <ngraph/opsets/opset2.hpp>
+#include <ngraph/opsets/opset3.hpp>
+#include <ngraph/pattern/op/wrap_type.hpp>
#include <ngraph/rt_info.hpp>
void ngraph::pass::ConvertBatchToSpace::convert_batch_to_space() {
- auto input0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
- auto input1 = ngraph::op::Constant::create(element::i64, Shape{4}, Shape{1, 1, 1, 1});
- auto input2 = ngraph::op::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0});
- auto input3 = ngraph::op::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0});
- auto batch_to_space = std::make_shared<ngraph::opset2::BatchToSpace>(input0, input1, input2, input3);
-
+ auto batch_to_space = ngraph::pattern::wrap_type<ngraph::opset3::BatchToSpace>();
ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) {
- auto batch_to_space = std::dynamic_pointer_cast<ngraph::opset2::BatchToSpace> (m.get_match_root());
+ auto batch_to_space = std::dynamic_pointer_cast<ngraph::opset3::BatchToSpace> (m.get_match_root());
if (!batch_to_space) {
return false;
}
- auto last_node = batch_to_space->decompose_op()[0];
- last_node.get_node()->set_friendly_name(batch_to_space->get_friendly_name());
- ngraph::replace_node(batch_to_space, last_node.get_node_shared_ptr());
+
+ NodeVector new_ops;
+ auto data = batch_to_space->input_value(0);
+ auto block = batch_to_space->input_value(1);
+ auto crops_begin = batch_to_space->input_value(2);
+ auto crops_end = batch_to_space->input_value(3);
+
+ if (data.get_partial_shape().is_dynamic()) {
+ return false;
+ }
+ const auto& data_shape = data.get_shape();
+
+ const auto block_const = std::dynamic_pointer_cast<opset3::Constant>(block.get_node_shared_ptr());
+ const auto crops_begin_const = std::dynamic_pointer_cast<opset3::Constant>(crops_begin.get_node_shared_ptr());
+ const auto crops_end_const = std::dynamic_pointer_cast<opset3::Constant>(crops_end.get_node_shared_ptr());
+
+ if (!block_const || !crops_begin_const || !crops_end_const) {
+ return false;
+ }
+
+ const std::vector<int64_t> &block_values = block_const->cast_vector<int64_t>();
+ const std::vector<int64_t> &crops_end_values = crops_end_const->cast_vector<int64_t>();
+
+ // First we have to disperse the data from batch, then rearrange them
+ // so as appropriate chunks of data where close to their destination place.
+ // Finally squeeze data from respective dimensions.ss
+ std::vector<int64_t> dispersed_shape;
+ int64_t b_dim_divider = 1;
+ for (const auto& el : block_values) {
+ b_dim_divider *= el;
+ }
+
+ // note: B_0 is expected to be 1.
+ // x' = reshape(`data`, [B_1, ..., B_{N - 1}, batch / (B_1 * ... B_{N - 1}), D_1, D_2, ...,
+ // D_{N - 1}]),
+ // where B_i = block_shape[i]
+ dispersed_shape.insert(dispersed_shape.begin(), block_values.begin() + 1, block_values.end());
+ dispersed_shape.push_back(data_shape.at(0) / b_dim_divider);
+ for (size_t i = 1; i < data_shape.size(); ++i) {
+ dispersed_shape.push_back(data_shape.at(i));
+ }
+
+ const auto out_pattern_1 =
+ opset3::Constant::create(element::i64, Shape{dispersed_shape.size()}, dispersed_shape);
+ const bool special_zero = false;
+ std::shared_ptr<Node> flat_node = std::make_shared<ngraph::opset3::Reshape>(data, out_pattern_1, special_zero);
+ new_ops.push_back(flat_node);
+ // calculate axes to transpose
+ // x'' = transpose(x', [N, N + 1, 0, N + 2, 1, ..., N + N - 1, N - 1])
+ std::vector<size_t> axes_order{block_values.size() - 1};
+ for (size_t i = 0; i < block_values.size() - 1; ++i) {
+ axes_order.push_back(i + block_values.size());
+ axes_order.push_back(i);
+ }
+
+ const auto axes_order_const =
+ opset3::Constant::create(element::i64,
+ Shape{axes_order.size()},
+ std::vector<int64_t>(axes_order.begin(), axes_order.end()));
+ flat_node = std::make_shared<ngraph::opset3::Transpose>(flat_node, axes_order_const);
+ new_ops.push_back(flat_node);
+ // x''' = reshape(x'', [batch / (B_1 * ... * B_{N - 1}), D_1 * B_1, D_2 * B_2, ... , D_{N - 1}
+ // * B_{N - 1}])
+ std::vector<int64_t> squeezed_shape;
+ squeezed_shape.push_back(data_shape.at(0) / b_dim_divider);
+ for (size_t i = 1; i < block_values.size(); ++i) {
+ squeezed_shape.push_back(data_shape.at(i) * block_values.at(i));
+ }
+
+ const auto out_pattern_2 =
+ opset3::Constant::create(element::i64, Shape{squeezed_shape.size()}, squeezed_shape);
+ flat_node = std::make_shared<opset3::Reshape>(flat_node, out_pattern_2, special_zero);
+ new_ops.push_back(flat_node);
+
+ // Crop the start and end of dimensions according to `crops_begin`, `crops_end` to produce
+ // the output of shape:
+ // note: `crops_begin[0], crops_end[0]` are expected to be 0.
+ // `y = [batch / (B_1 * ... * B_{N - 1}), crop(D_1 * B_1, crops_begin[1], crops_end[1]),
+ // crop(D_2 * B_2, crops_begin[2], crops_end[2]), ... ,
+ // crop(D_{N - 1} * B_{N - 1}, crops_begin[N - 1], crops_end[N - 1])]`
+ std::vector<int64_t> upperbounds_values;
+ auto flat_node_shape = flat_node->get_shape();
+ for (size_t i = 0; i < flat_node_shape.size(); ++i) {
+ upperbounds_values.push_back(flat_node_shape.at(i) - crops_end_values.at(i));
+ }
+
+ const auto upperbounds = opset3::Constant::create(
+ crops_end.get_element_type(), Shape{upperbounds_values.size()}, upperbounds_values);
+
+ std::vector<int64_t> begin_mask(data_shape.size(), 0);
+ std::vector<int64_t> end_mask(data_shape.size(), 0);
+ flat_node = std::make_shared<opset3::StridedSlice>(
+ flat_node, crops_begin_const, upperbounds, begin_mask, end_mask);
+ new_ops.push_back(flat_node);
+
+ flat_node->set_friendly_name(batch_to_space->get_friendly_name());
+ ngraph::copy_runtime_info(batch_to_space, new_ops);
+ ngraph::replace_node(batch_to_space, flat_node);
return true;
};
auto m = std::make_shared<ngraph::pattern::Matcher>(batch_to_space, "ConvertBatchToSpace");
- this->add_matcher(m, callback, PassProperty::CHANGE_DYNAMIC_STATE);
+ this->register_matcher(m, callback);
}
-void ngraph::pass::ConvertBatchToSpace::convert_batch_to_space_ie_side() {
- auto input0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
- auto input1 = ngraph::op::Constant::create(element::i64, Shape{4}, Shape{1, 1, 1, 1});
- auto input2 = ngraph::op::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0});
- auto input3 = ngraph::op::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0});
- auto batch_to_space = std::make_shared<ngraph::opset2::BatchToSpace>(input0, input1, input2, input3);
-
+void ngraph::pass::ConvertBatchToSpace::convert_batch_to_space_by_elements() {
+ auto batch_to_space = ngraph::pattern::wrap_type<ngraph::opset3::BatchToSpace>();
ngraph::graph_rewrite_callback callback = [this](pattern::Matcher& m) {
- auto batch_to_space = std::dynamic_pointer_cast<ngraph::opset2::BatchToSpace> (m.get_match_root());
+ auto batch_to_space = std::dynamic_pointer_cast<ngraph::opset3::BatchToSpace> (m.get_match_root());
if (!batch_to_space) {
return false;
}
auto data = batch_to_space->input_value(0);
+
+ if (data.get_partial_shape().is_dynamic()) {
+ return false;
+ }
auto data_shape = data.get_shape();
if (m_transformation_callback(batch_to_space) && (data_shape.size() == 4 || data_shape.size() == 5)) {
auto crops_begin = batch_to_space->input_value(2);
auto crops_end = batch_to_space->input_value(3);
- const auto block_const = as_type_ptr<op::Constant>(block.get_node_shared_ptr());
- const auto crops_begin_const = as_type_ptr<op::Constant>(crops_begin.get_node_shared_ptr());
- const auto crops_end_const = as_type_ptr<op::Constant>(crops_end.get_node_shared_ptr());
+ const auto block_const = as_type_ptr<opset3::Constant>(block.get_node_shared_ptr());
+ const auto crops_begin_const = as_type_ptr<opset3::Constant>(crops_begin.get_node_shared_ptr());
+ const auto crops_end_const = as_type_ptr<opset3::Constant>(crops_end.get_node_shared_ptr());
- std::vector<int64_t> block_values, crops_end_values;
- block_values = block_const->cast_vector<int64_t>();
- crops_end_values = crops_end_const->cast_vector<int64_t>();
+ const std::vector<int64_t> &block_values = block_const->cast_vector<int64_t>();
+ const std::vector<int64_t> &crops_end_values = crops_end_const->cast_vector<int64_t>();
std::vector<int64_t> dispersed_shape(1);
dispersed_shape.insert(dispersed_shape.end(), data_shape.begin(), data_shape.end());
dispersed_shape[0] = block_values[block_idx];
dispersed_shape[1] /= block_values[block_idx];
const auto out_pattern_1 =
- op::Constant::create(element::i64, Shape{dispersed_shape.size()}, dispersed_shape);
+ opset3::Constant::create(element::i64, Shape{dispersed_shape.size()}, dispersed_shape);
const bool special_zero = false;
- flat_node = std::make_shared<ngraph::op::v1::Reshape>(flat_node, out_pattern_1, special_zero)
- ->add_provenance_group_members_above({data});
+ flat_node = std::make_shared<ngraph::opset3::Reshape>(flat_node, out_pattern_1, special_zero);
new_ops.push_back(flat_node);
size_t val = 1;
}
const auto axes_order_const =
- ngraph::op::Constant::create(element::i64,
+ ngraph::opset3::Constant::create(element::i64,
Shape{axes_order.size()},
std::vector<int64_t>(axes_order.begin(), axes_order.end()));
- flat_node = std::make_shared<ngraph::opset1::Transpose>(flat_node, axes_order_const)
- ->add_provenance_group_members_above({flat_node});
+ flat_node = std::make_shared<ngraph::opset3::Transpose>(flat_node, axes_order_const);
new_ops.push_back(flat_node);
squeezed_shape[0] = dispersed_shape[1];
squeezed_shape[block_idx] *= block_values[block_idx];
dispersed_shape[block_idx + 1] = squeezed_shape[block_idx];
const auto out_pattern_2 =
- op::Constant::create(element::i64, Shape{squeezed_shape.size()}, squeezed_shape);
- flat_node = std::make_shared<ngraph::op::v1::Reshape>(flat_node, out_pattern_2, special_zero)
- ->add_provenance_group_members_above({data});
+ opset3::Constant::create(element::i64, Shape{squeezed_shape.size()}, squeezed_shape);
+ flat_node = std::make_shared<ngraph::opset3::Reshape>(flat_node, out_pattern_2, special_zero);
new_ops.push_back(flat_node);
}
for (size_t i = 0; i < flat_node_shape.size(); ++i) {
upperbounds_values.push_back(flat_node_shape.at(i) - crops_end_values.at(i));
}
- const auto upperbounds = op::Constant::create(
+ const auto upperbounds = opset3::Constant::create(
crops_end.get_element_type(), Shape{upperbounds_values.size()}, upperbounds_values);
std::vector<int64_t> begin_mask(data_shape.size(), 0);
std::vector<int64_t> end_mask(data_shape.size(), 0);
- flat_node = std::make_shared<op::v1::StridedSlice>(
+ flat_node = std::make_shared<opset3::StridedSlice>(
flat_node, crops_begin_const, upperbounds, begin_mask, end_mask);
new_ops.push_back(flat_node);
flat_node->set_friendly_name(batch_to_space->get_friendly_name());
- ngraph::copy_runtime_info(batch_to_space, flat_node);
+ ngraph::copy_runtime_info(batch_to_space, new_ops);
ngraph::replace_node(batch_to_space, flat_node);
return true;
};
auto m = std::make_shared<ngraph::pattern::Matcher>(batch_to_space, "ConvertBatchToSpace");
- this->add_matcher(m, callback, PassProperty::CHANGE_DYNAMIC_STATE);
+ this->register_matcher(m, callback);
}
\ No newline at end of file
#include <memory>
#include <vector>
-#include <ngraph/opsets/opset2.hpp>
+#include <ngraph/opsets/opset3.hpp>
+#include <ngraph/pattern/op/wrap_type.hpp>
#include <ngraph/rt_info.hpp>
void ngraph::pass::ConvertSpaceToBatch::convert_space_to_batch() {
- auto input0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
- auto input1 = ngraph::op::Constant::create(element::i64, Shape{4}, Shape{1, 1, 1, 1});
- auto input2 = ngraph::op::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0});
- auto input3 = ngraph::op::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0});
- auto space_to_batch = std::make_shared<ngraph::opset2::SpaceToBatch>(input0, input1, input2, input3);
-
- ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) {
- auto space_to_batch = std::dynamic_pointer_cast<ngraph::opset2::SpaceToBatch> (m.get_match_root());
+ auto space_to_batch = ngraph::pattern::wrap_type<ngraph::opset3::SpaceToBatch>();
+ ngraph::matcher_pass_callback callback = [](pattern::Matcher& m) {
+ auto space_to_batch = std::dynamic_pointer_cast<ngraph::opset3::SpaceToBatch> (m.get_match_root());
if (!space_to_batch) {
return false;
}
- auto last_node = space_to_batch->decompose_op()[0];
- last_node.get_node()->set_friendly_name(space_to_batch->get_friendly_name());
- ngraph::replace_node(space_to_batch, last_node.get_node_shared_ptr());
+
+ NodeVector new_ops;
+ auto data = space_to_batch->input_value(0);
+ auto block = space_to_batch->input_value(1);
+ auto pads_begin = space_to_batch->input_value(2);
+ auto pads_end = space_to_batch->input_value(3);
+
+ if (data.get_partial_shape().is_dynamic()) {
+ return false;
+ }
+ const auto& data_shape = data.get_shape();
+
+ const auto block_const = std::dynamic_pointer_cast<opset3::Constant>(block.get_node_shared_ptr());
+ const auto pads_begin_const = std::dynamic_pointer_cast<opset3::Constant>(pads_begin.get_node_shared_ptr());
+ const auto pads_end_const = std::dynamic_pointer_cast<opset3::Constant>(pads_end.get_node_shared_ptr());
+
+ if (!block_const || !pads_begin_const || !pads_end_const) {
+ return false;
+ }
+
+ const std::vector<int64_t> &block_values = block_const->cast_vector<int64_t>();
+
+ // Zero-pad the start and end of dimensions [D_0, ..., D_{N - 1}] of the input according to
+ // `pads_begin`
+ // and `pads_end`:
+ // note: P_0 for batch dimension is expected to be 0 (no-padding).
+ // x = [batch + P_0, D_1 + P_1, D_2 + P_2, ..., D_{N - 1} + P_{N - 1}], where P_i =
+ // pads_begin[i] + pads_end[i]
+ std::shared_ptr<Node> flat_node = std::make_shared<opset3::Pad>(data, pads_begin_const, pads_end_const,
+ ngraph::op::PadMode::CONSTANT);
+ auto out_shape = flat_node->get_shape();
+ new_ops.push_back(flat_node);
+
+ // First we have to disperse the data from spatial dimensions, then
+ // rearrange them so as appropriate chunks of data where close to their
+ // destination place. Finally squeeze data from respective dimensions.
+ Shape dispersed_shape{out_shape.at(0)};
+
+ // note: B_0 for batch is ignored.
+ // x' = reshape(x, [batch, (D_1 + P_1) / B_1, B_1, (D_2 + P_2) / B_2, B_2, ...,
+ // (D_{N - 1} + P_{N - 1}) / B_{N - 1}, B_{N - 1}]), where B_i = block_shape[i]
+ for (size_t i = 1; i < block_values.size(); ++i) {
+ dispersed_shape.push_back(out_shape.at(i) / block_values.at(i));
+ dispersed_shape.push_back(block_values.at(i));
+ }
+
+ const auto out_pattern =
+ opset3::Constant::create(element::i64, Shape{dispersed_shape.size()}, dispersed_shape);
+ flat_node = std::make_shared<ngraph::opset3::Reshape>(flat_node, out_pattern, false);
+ new_ops.push_back(flat_node);
+
+ // x'' = transpose(x', [2, 4, ..., (N - 1) + (N - 1), 0, 1, 3, ..., N + (N - 1)])
+ std::vector<size_t> axes_order;
+ for (size_t i = 0, j = 2; i < block_values.size() - 1; ++i, j += 2) {
+ axes_order.push_back(j);
+ }
+ axes_order.push_back(0);
+ for (size_t i = 0, j = 1; i < block_values.size() - 1; ++i, j += 2) {
+ axes_order.push_back(j);
+ }
+
+ const auto axes_order_const =
+ opset3::Constant::create(element::i64,
+ Shape{axes_order.size()},
+ std::vector<int64_t>(axes_order.begin(), axes_order.end()));
+ flat_node = std::make_shared<ngraph::opset3::Transpose>(flat_node, axes_order_const);
+ new_ops.push_back(flat_node);
+
+ Shape squeezed_shape;
+ int64_t prod = 1;
+ for (const auto& el : block_values) {
+ prod *= el;
+ }
+
+ // y = reshape(x'', [batch * B_1 * ... * B_{N - 1}, (D_1 + P_1) / B_1, (D_2 + P_2) / B_2, ...
+ // ,
+ // (D_{N - 1} + P_{N - 1}) / B_{N - 1}])
+ squeezed_shape.push_back(out_shape.at(0) * prod);
+ for (size_t i = 1; i < block_values.size(); ++i) {
+ squeezed_shape.push_back(out_shape.at(i) / block_values.at(i));
+ }
+
+ const auto out_pattern_2 =
+ opset3::Constant::create(element::i64, Shape{squeezed_shape.size()}, squeezed_shape);
+ flat_node = std::make_shared<ngraph::opset3::Reshape>(flat_node, out_pattern_2, false);
+ new_ops.push_back(flat_node);
+
+ flat_node->set_friendly_name(space_to_batch->get_friendly_name());
+ ngraph::copy_runtime_info(space_to_batch, new_ops);
+ ngraph::replace_node(space_to_batch, flat_node);
return true;
};
auto m = std::make_shared<ngraph::pattern::Matcher>(space_to_batch, "ConvertSpaceToBatch");
- this->add_matcher(m, callback, PassProperty::CHANGE_DYNAMIC_STATE);
+ this->register_matcher(m, callback);
}
void ngraph::pass::ConvertSpaceToBatch::convert_space_to_batch_by_elements() {
- auto input0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
- auto input1 = ngraph::op::Constant::create(element::i64, Shape{4}, Shape{1, 1, 1, 1});
- auto input2 = ngraph::op::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0});
- auto input3 = ngraph::op::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0});
- auto space_to_batch = std::make_shared<ngraph::opset2::SpaceToBatch>(input0, input1, input2, input3);
-
- ngraph::graph_rewrite_callback callback = [this](pattern::Matcher& m) {
- auto space_to_batch = std::dynamic_pointer_cast<ngraph::opset2::SpaceToBatch> (m.get_match_root());
+ auto space_to_batch = ngraph::pattern::wrap_type<ngraph::opset3::SpaceToBatch>();
+ ngraph::matcher_pass_callback callback = [this](pattern::Matcher& m) {
+ auto space_to_batch = std::dynamic_pointer_cast<ngraph::opset3::SpaceToBatch> (m.get_match_root());
if (!space_to_batch) {
return false;
}
auto data = space_to_batch->input_value(0);
+
+ if (data.get_partial_shape().is_dynamic()) {
+ return false;
+ }
const auto& data_shape = data.get_shape();
if (m_transformation_callback(space_to_batch) && (data_shape.size() == 4 || data_shape.size() == 5)) {
auto pads_begin = space_to_batch->input_value(2);
auto pads_end = space_to_batch->input_value(3);
- const auto block_const = as_type_ptr<op::Constant>(block.get_node_shared_ptr());
- const auto pads_begin_const = as_type_ptr<op::Constant>(pads_begin.get_node_shared_ptr());
- const auto pads_end_const = as_type_ptr<op::Constant>(pads_end.get_node_shared_ptr());
+ const auto block_const = as_type_ptr<opset3::Constant>(block.get_node_shared_ptr());
+ const auto pads_begin_const = as_type_ptr<opset3::Constant>(pads_begin.get_node_shared_ptr());
+ const auto pads_end_const = as_type_ptr<opset3::Constant>(pads_end.get_node_shared_ptr());
- std::vector<int64_t> block_values;
- block_values = block_const->cast_vector<int64_t>();
+ const std::vector<int64_t> &block_values = block_const->cast_vector<int64_t>();
NodeVector new_ops;
std::shared_ptr<Node> flat_node = data.get_node_shared_ptr();
- flat_node = std::make_shared<opset2::Pad>(flat_node, pads_begin_const, pads_end_const, ngraph::op::PadMode::CONSTANT);
+ flat_node = std::make_shared<opset3::Pad>(flat_node, pads_begin_const, pads_end_const, ngraph::op::PadMode::CONSTANT);
new_ops.push_back(flat_node);
auto out_shape = flat_node->get_shape();
}
const auto out_pattern_1 =
- op::Constant::create(element::i64, Shape{dispersed_shape.size()}, dispersed_shape);
+ opset3::Constant::create(element::i64, Shape{dispersed_shape.size()}, dispersed_shape);
const bool special_zero = false;
- flat_node = std::make_shared<ngraph::op::v1::Reshape>(flat_node, out_pattern_1, special_zero);
+ flat_node = std::make_shared<ngraph::opset3::Reshape>(flat_node, out_pattern_1, special_zero);
new_ops.push_back(flat_node);
const auto axes_order_const =
- op::Constant::create(element::i64,
+ opset3::Constant::create(element::i64,
Shape{axes_order.size()},
std::vector<int64_t>(axes_order.begin(), axes_order.end()));
- flat_node = std::make_shared<ngraph::opset1::Transpose>(flat_node, axes_order_const)
- ->add_provenance_group_members_above({flat_node});
+ flat_node = std::make_shared<ngraph::opset3::Transpose>(flat_node, axes_order_const);
new_ops.push_back(flat_node);
squeezed_shape[0] *= block_values[block_idx];
squeezed_shape[block_idx] /= block_values[block_idx];
const auto out_pattern_2 =
- op::Constant::create(element::i64, Shape{squeezed_shape.size()}, squeezed_shape);
- flat_node = std::make_shared<ngraph::op::v1::Reshape>(flat_node, out_pattern_2, special_zero)
- ->add_provenance_group_members_above({data});
+ opset3::Constant::create(element::i64, Shape{squeezed_shape.size()}, squeezed_shape);
+ flat_node = std::make_shared<ngraph::opset3::Reshape>(flat_node, out_pattern_2, special_zero);
new_ops.push_back(flat_node);
}
};
auto m = std::make_shared<ngraph::pattern::Matcher>(space_to_batch, "ConvertSpaceToBatch");
- this->add_matcher(m, callback, PassProperty::CHANGE_DYNAMIC_STATE);
+ this->register_matcher(m, callback);
}
\ No newline at end of file
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+
+#include "common_test_utils/test_common.hpp"
+#include <string>
+#include <sstream>
+#include <memory>
+#include <queue>
+
+#include <ngraph/function.hpp>
+#include <ngraph/opsets/opset3.hpp>
+#include <ngraph/pass/manager.hpp>
+#include <ngraph_ops/fully_connected.hpp>
+#include <transformations/convert_batch_to_space.hpp>
+#include <transformations/convert_space_to_batch.hpp>
+#include <transformations/utils/utils.hpp>
+#include <transformations/init_node_info.hpp>
+
+#include "common_test_utils/ngraph_test_utils.hpp"
+
+using namespace testing;
+using namespace ngraph;
+
+TEST(TransformationTests, BatchToSpaceDecompositionByElements) {
+ std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
+
+ {
+ auto data = std::make_shared<opset3::Parameter>(element::f32, Shape{100, 7, 13, 3});
+ auto block_shape = std::make_shared<opset3::Constant>(element::i64, Shape{4}, std::vector<int64_t>{1, 10, 5, 1});
+ auto crops_begin = std::make_shared<opset3::Constant>(element::i64, Shape{4}, std::vector<int64_t>{0, 3, 1, 0});
+ auto crops_end = std::make_shared<opset3::Constant>(element::i64, Shape{4}, std::vector<int64_t>{0, 3, 0, 0});
+ auto batch_to_space = std::make_shared<ngraph::opset3::BatchToSpace>(data, block_shape, crops_begin, crops_end);
+
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{batch_to_space}, ngraph::ParameterVector{data});
+
+ ngraph::pass::Manager m;
+ m.register_pass<ngraph::pass::InitNodeInfo>();
+ m.register_pass<ngraph::pass::ConvertBatchToSpace>();
+ m.run_passes(f);
+
+ ASSERT_NO_THROW(check_rt_info(f));
+ }
+
+ {
+ auto data = std::make_shared<opset3::Parameter>(element::f32, Shape{100, 7, 13, 3});
+
+ auto dispresed_shape_1 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{5}, {10, 10, 7, 13, 3});
+ auto axis_order_1 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{5}, {1, 2, 0, 3, 4});
+ auto squeezed_order_1 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {10, 70, 13, 3});
+
+ auto reshape_before_1 = std::make_shared<ngraph::opset3::Reshape> (data, dispresed_shape_1, false);
+ auto permute_1 = std::make_shared<ngraph::opset3::Transpose> (reshape_before_1, axis_order_1);
+ auto reshape_after_1 = std::make_shared<ngraph::opset3::Reshape> (permute_1, squeezed_order_1, false);
+
+ auto dispresed_shape_2 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{5}, {5, 2, 70, 13, 3});
+ auto axis_order_2 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{5}, {1, 2, 3, 0, 4});
+ auto squeezed_order_2 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {2, 70, 65, 3});
+
+ auto reshape_before_2 = std::make_shared<ngraph::opset3::Reshape> (reshape_after_1, dispresed_shape_2, false);
+ auto permute_2 = std::make_shared<ngraph::opset3::Transpose> (reshape_before_2, axis_order_2);
+ auto reshape_after_2 = std::make_shared<ngraph::opset3::Reshape> (permute_2, squeezed_order_2, false);
+
+ auto dispresed_shape_3 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{5}, {1, 2, 70, 65, 3});
+ auto axis_order_3 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{5}, {1, 2, 3, 4, 0});
+ auto squeezed_order_3 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {2, 70, 65, 3});
+
+ auto reshape_before_3 = std::make_shared<ngraph::opset3::Reshape> (reshape_after_2, dispresed_shape_3, false);
+ auto permute_3 = std::make_shared<ngraph::opset3::Transpose> (reshape_before_3, axis_order_3);
+ auto reshape_after_3 = std::make_shared<ngraph::opset3::Reshape> (permute_3, squeezed_order_3, false);
+
+ auto begin = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 0});
+ auto end = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {2, 67, 65, 3});
+ std::vector<int64_t> begin_mask(4, 0);
+ std::vector<int64_t> end_mask(4, 0);
+ auto ss = std::make_shared<opset3::StridedSlice>(reshape_after_3, begin, end, begin_mask, end_mask);
+
+ f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ss}, ngraph::ParameterVector{data});
+ }
+ auto res = compare_functions(f, f_ref);
+ ASSERT_TRUE(res.first) << res.second;
+}
+
+TEST(TransformationTests, SpaceToBatchDecompositionByElements) {
+ std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
+
+ {
+ auto data = std::make_shared<opset3::Parameter>(element::f32, Shape{2, 64, 64, 3});
+ auto block_shape = std::make_shared<opset3::Constant>(element::i64, Shape{4}, std::vector<int64_t>{1, 10, 5, 1});
+ auto pads_begin = std::make_shared<opset3::Constant>(element::i64, Shape{4}, std::vector<int64_t>{0, 3, 1, 0});
+ auto pads_end = std::make_shared<opset3::Constant>(element::i64, Shape{4}, std::vector<int64_t>{0, 3, 0, 0});
+ auto batch_to_space = std::make_shared<ngraph::opset3::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
+
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{batch_to_space}, ngraph::ParameterVector{data});
+
+ ngraph::pass::Manager m;
+ m.register_pass<ngraph::pass::InitNodeInfo>();
+ m.register_pass<ngraph::pass::ConvertSpaceToBatch>();
+ m.run_passes(f);
+
+ ASSERT_NO_THROW(check_rt_info(f));
+ }
+
+ {
+ auto data = std::make_shared<opset3::Parameter>(element::f32, Shape{2, 64, 64, 3});
+ auto pads_begin = std::make_shared<opset3::Constant>(element::i64, Shape{4}, std::vector<int64_t>{0, 3, 1, 0});
+ auto pads_end = std::make_shared<opset3::Constant>(element::i64, Shape{4}, std::vector<int64_t>{0, 3, 0, 0});
+ auto pads = std::make_shared<opset3::Pad>(data, pads_begin, pads_end, ngraph::op::PadMode::CONSTANT);
+
+ auto dispresed_shape_1 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{5}, {2, 70, 65, 3, 1});
+ auto axis_order_1 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{5}, {4, 0, 1, 2, 3});
+ auto squeezed_order_1 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {2, 70, 65, 3});
+
+ auto reshape_before_1 = std::make_shared<ngraph::opset3::Reshape> (pads, dispresed_shape_1, false);
+ auto permute_1 = std::make_shared<ngraph::opset3::Transpose> (reshape_before_1, axis_order_1);
+ auto reshape_after_1 = std::make_shared<ngraph::opset3::Reshape> (permute_1, squeezed_order_1, false);
+
+ auto dispresed_shape_2 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{5}, {2, 70, 13, 5, 3});
+ auto axis_order_2 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{5}, {3, 0, 1, 2, 4});
+ auto squeezed_order_2 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {10, 70, 13, 3});
+
+ auto reshape_before_2 = std::make_shared<ngraph::opset3::Reshape> (reshape_after_1, dispresed_shape_2, false);
+ auto permute_2 = std::make_shared<ngraph::opset3::Transpose> (reshape_before_2, axis_order_2);
+ auto reshape_after_2 = std::make_shared<ngraph::opset3::Reshape> (permute_2, squeezed_order_2, false);
+
+ auto dispresed_shape_3 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{5}, {10, 7, 10, 13, 3});
+ auto axis_order_3 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{5}, {2, 0, 1, 3, 4});
+ auto squeezed_order_3 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {100, 7, 13, 3});
+
+ auto reshape_before_3 = std::make_shared<ngraph::opset3::Reshape> (reshape_after_2, dispresed_shape_3, false);
+ auto permute_3 = std::make_shared<ngraph::opset3::Transpose> (reshape_before_3, axis_order_3);
+ auto reshape_after_3 = std::make_shared<ngraph::opset3::Reshape> (permute_3, squeezed_order_3, false);
+
+ auto dispresed_shape_4 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{5}, {100, 1, 7, 13, 3});
+ auto axis_order_4 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{5}, {1, 0, 2, 3, 4});
+ auto squeezed_order_4 = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {100, 7, 13, 3});
+
+ auto reshape_before_4 = std::make_shared<ngraph::opset3::Reshape> (reshape_after_3, dispresed_shape_4, false);
+ auto permute_4 = std::make_shared<ngraph::opset3::Transpose> (reshape_before_4, axis_order_4);
+ auto reshape_after_4 = std::make_shared<ngraph::opset3::Reshape> (permute_4, squeezed_order_4, false);
+
+ f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{reshape_after_4}, ngraph::ParameterVector{data});
+ }
+ auto res = compare_functions(f, f_ref);
+ ASSERT_TRUE(res.first) << res.second;
+}
+
+TEST(TransformationTests, SpaceToBatchDecomposition) {
+ std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
+
+ {
+ auto data = std::make_shared<opset3::Parameter>(element::f32, Shape{2, 64, 64, 3});
+ auto block_shape = std::make_shared<opset3::Constant>(element::i64, Shape{4}, std::vector<int64_t>{1, 10, 5, 1});
+ auto pads_begin = std::make_shared<opset3::Constant>(element::i64, Shape{4}, std::vector<int64_t>{0, 3, 1, 0});
+ auto pads_end = std::make_shared<opset3::Constant>(element::i64, Shape{4}, std::vector<int64_t>{0, 3, 0, 0});
+ auto batch_to_space = std::make_shared<ngraph::opset3::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
+
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{batch_to_space}, ngraph::ParameterVector{data});
+
+ ngraph::pass::Manager m;
+ m.register_pass<ngraph::pass::InitNodeInfo>();
+ m.register_pass<ngraph::pass::ConvertSpaceToBatch>(false);
+ m.run_passes(f);
+
+ ASSERT_NO_THROW(check_rt_info(f));
+ }
+
+ {
+ auto data = std::make_shared<opset3::Parameter>(element::f32, Shape{2, 64, 64, 3});
+ auto pads_begin = std::make_shared<opset3::Constant>(element::i64, Shape{4}, std::vector<int64_t>{0, 3, 1, 0});
+ auto pads_end = std::make_shared<opset3::Constant>(element::i64, Shape{4}, std::vector<int64_t>{0, 3, 0, 0});
+ auto pads = std::make_shared<opset3::Pad>(data, pads_begin, pads_end, ngraph::op::PadMode::CONSTANT);
+
+ auto dispresed_shape = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{7},
+ {2, 7, 10, 13, 5, 3, 1});
+ auto axis_order = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{7}, {2, 4, 6, 0, 1, 3, 5});
+ auto squeezed_order = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{4},
+ {100, 7, 13, 3});
+
+ auto reshape_before = std::make_shared<ngraph::opset3::Reshape>(pads, dispresed_shape, false);
+ auto permute = std::make_shared<ngraph::opset3::Transpose>(reshape_before, axis_order);
+ auto reshape_after = std::make_shared<ngraph::opset3::Reshape>(permute, squeezed_order, false);
+
+ f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{reshape_after}, ngraph::ParameterVector{data});
+ }
+ auto res = compare_functions(f, f_ref);
+ ASSERT_TRUE(res.first) << res.second;
+}
+
+TEST(TransformationTests, BatchToSpaceDecomposition) {
+ std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
+
+ {
+ auto data = std::make_shared<opset3::Parameter>(element::f32, Shape{100, 7, 13, 3});
+ auto block_shape = std::make_shared<opset3::Constant>(element::i64, Shape{4},
+ std::vector<int64_t>{1, 10, 5, 1});
+ auto crops_begin = std::make_shared<opset3::Constant>(element::i64, Shape{4}, std::vector<int64_t>{0, 3, 1, 0});
+ auto crops_end = std::make_shared<opset3::Constant>(element::i64, Shape{4}, std::vector<int64_t>{0, 3, 0, 0});
+ auto batch_to_space = std::make_shared<ngraph::opset3::BatchToSpace>(data, block_shape, crops_begin, crops_end);
+
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{batch_to_space}, ngraph::ParameterVector{data});
+
+ ngraph::pass::Manager m;
+ m.register_pass<ngraph::pass::InitNodeInfo>();
+ m.register_pass<ngraph::pass::ConvertBatchToSpace>(false);
+ m.run_passes(f);
+
+ ASSERT_NO_THROW(check_rt_info(f));
+ }
+
+ {
+ auto data = std::make_shared<opset3::Parameter>(element::f32, Shape{100, 7, 13, 3});
+
+ auto dispresed_shape = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{7},
+ {10, 5, 1, 2, 7, 13, 3});
+ auto axis_order = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{7}, {3, 4, 0, 5, 1, 6, 2});
+ auto squeezed_order = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{4},
+ {2, 70, 65, 3});
+
+ auto reshape_before = std::make_shared<ngraph::opset3::Reshape>(data, dispresed_shape, false);
+ auto permute = std::make_shared<ngraph::opset3::Transpose>(reshape_before, axis_order);
+ auto reshape_after = std::make_shared<ngraph::opset3::Reshape>(permute, squeezed_order, false);
+
+ auto begin = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 0});
+ auto end = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {2, 67, 65, 3});
+ std::vector<int64_t> begin_mask(4, 0);
+ std::vector<int64_t> end_mask(4, 0);
+ auto ss = std::make_shared<opset3::StridedSlice>(reshape_after, begin, end, begin_mask, end_mask);
+ f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{ss}, ngraph::ParameterVector{data});
+ }
+ auto res = compare_functions(f, f_ref);
+ ASSERT_TRUE(res.first) << res.second;
+}
}
void BatchToSpaceLayerTest::SetUp() {
+ SetRefMode(LayerTestsUtils::RefMode::INTERPRETER_TRANSFORMATIONS);
std::vector<size_t> inputShape, blockShape, cropsBegin, cropsEnd;
InferenceEngine::Precision netPrecision;
std::tie(blockShape, cropsBegin, cropsEnd, inputShape, netPrecision, targetDevice) = this->GetParam();
}
void SpaceToBatchLayerTest::SetUp() {
+ SetRefMode(LayerTestsUtils::RefMode::INTERPRETER_TRANSFORMATIONS);
std::vector<size_t> inputShape, blockShape, padsBegin, padsEnd;
InferenceEngine::Precision inputPrecision, netPrecision;
std::tie(blockShape, padsBegin, padsEnd, inputShape, netPrecision, targetDevice) = this->GetParam();
// SPDX-License-Identifier: Apache-2.0
//
+#include <transformations/convert_batch_to_space.hpp>
+#include <transformations/convert_space_to_batch.hpp>
+
#include "layer_test_utils.hpp"
namespace LayerTestsUtils {
// reference inference on device with other options and nGraph function has to be implemented here
break;
}
+ case INTERPRETER_TRANSFORMATIONS: {
+ auto cloned_function = ngraph::clone_function(*function);
+
+ // todo: add functionality to configure the necessary transformations for each test separately
+ ngraph::pass::Manager m;
+ m.register_pass<ngraph::pass::ConvertSpaceToBatch>();
+ m.register_pass<ngraph::pass::ConvertBatchToSpace>();
+ m.run_passes(cloned_function);
+ expectedOutputs = ngraph::helpers::interpreterFunction(cloned_function, referenceInputs, convertType);
+ break;
+ }
}
return expectedOutputs;
#include <ngraph/function.hpp>
#include <ie_plugin_config.hpp>
#include <ngraph/function.hpp>
+#include <ngraph/pass/manager.hpp>
#include "common_test_utils/common_utils.hpp"
#include "common_test_utils/test_common.hpp"
enum RefMode {
INTERPRETER,
+ INTERPRETER_TRANSFORMATIONS,
CONSTANT_FOLDING,
IE
};
op/avg_pool.hpp
op/batch_norm.cpp
op/batch_norm.hpp
+ op/batch_to_space.cpp
+ op/batch_to_space.hpp
op/binary_convolution.cpp
op/binary_convolution.hpp
op/broadcast.cpp
op/slice.hpp
op/softmax.cpp
op/softmax.hpp
+ op/space_to_batch.cpp
+ op/space_to_batch.hpp
op/split.cpp
op/split.hpp
op/strided_slice.cpp
op/transpose.hpp
op/xor.cpp
op/xor.hpp
- op/fused/batch_to_space.cpp
- op/fused/batch_to_space.hpp
op/fused/clamp.cpp
op/fused/clamp.hpp
op/fused/hard_sigmoid.cpp
op/fused/selu.hpp
op/fused/shuffle_channels.cpp
op/fused/shuffle_channels.hpp
- op/fused/space_to_batch.cpp
- op/fused/space_to_batch.hpp
op/fused/space_to_depth.cpp
op/fused/space_to_depth.hpp
op/fused/squared_difference.cpp
--- /dev/null
+//*****************************************************************************
+// Copyright 2017-2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//*****************************************************************************
+#include <cmath>
+#include <cstddef>
+#include <memory>
+#include <ops.hpp>
+
+#include "ngraph/builder/make_constant.hpp"
+#include "ngraph/node.hpp"
+#include "ngraph/op/batch_to_space.hpp"
+#include "ngraph/shape.hpp"
+
+using namespace std;
+using namespace ngraph;
+
+constexpr NodeTypeInfo op::v1::BatchToSpace::type_info;
+
+ngraph::op::v1::BatchToSpace::BatchToSpace(const ngraph::Output<ngraph::Node>& data,
+ const ngraph::Output<ngraph::Node>& block_shape,
+ const ngraph::Output<ngraph::Node>& crops_begin,
+ const ngraph::Output<ngraph::Node>& crops_end)
+ : Op({data, block_shape, crops_begin, crops_end})
+{
+ constructor_validate_and_infer_types();
+}
+
+void op::v1::BatchToSpace::validate_and_infer_types()
+{
+ PartialShape data_pshape = get_input_partial_shape(0);
+
+ const auto& data_type = get_input_element_type(0);
+ const auto& block_shape_type = get_input_element_type(1);
+ const auto& crops_begin_type = get_input_element_type(2);
+ const auto& crops_end_type = get_input_element_type(3);
+
+ NODE_VALIDATION_CHECK(this,
+ block_shape_type.is_integral_number(),
+ "block_shape must be an integral number but got (",
+ block_shape_type,
+ ").");
+
+ NODE_VALIDATION_CHECK(this,
+ crops_begin_type.is_integral_number(),
+ "crops_begin must be an integral number but got (",
+ crops_begin_type,
+ ").");
+
+ NODE_VALIDATION_CHECK(this,
+ crops_end_type.is_integral_number(),
+ "crops_end must be an integral number but got (",
+ crops_end_type,
+ ").");
+
+ auto data = input_value(0);
+ auto block = input_value(1);
+ auto crops_begin = input_value(2);
+ auto crops_end = input_value(3);
+
+ if (ngraph::op::is_constant(block.get_node_shared_ptr()) &&
+ ngraph::op::is_constant(crops_begin.get_node_shared_ptr()) &&
+ ngraph::op::is_constant(crops_end.get_node_shared_ptr()) && data_pshape.is_static())
+ {
+ const auto& data_shape = data.get_shape();
+
+ NODE_VALIDATION_CHECK(
+ this,
+ (data_shape.size() >= 2),
+ "The data tensor with rank lower than 2 is not supported (data rank: ",
+ data_shape.size(),
+ ")");
+
+ auto block_val = std::dynamic_pointer_cast<op::Constant>(block.get_node_shared_ptr())
+ ->cast_vector<int64_t>();
+ auto crops_begin_val =
+ std::dynamic_pointer_cast<op::Constant>(crops_begin.get_node_shared_ptr())
+ ->cast_vector<int64_t>();
+ auto crops_end_val =
+ std::dynamic_pointer_cast<op::Constant>(crops_end.get_node_shared_ptr())
+ ->cast_vector<int64_t>();
+
+ int64_t block_prod = 1;
+ for (long val : block_val)
+ {
+ NODE_VALIDATION_CHECK(this, val > 0, "block_shape values must be greater than 0");
+ block_prod *= val;
+ }
+
+ NODE_VALIDATION_CHECK(this,
+ data_shape.at(0) % block_prod == 0,
+ "BatchToSpace: The input data's 'batch' axis size: ",
+ data_shape.at(0),
+ " must be a multiple of ",
+ " product of block_shape values: ",
+ block_prod);
+
+ Shape output_shape = {static_cast<size_t>(data_shape[0] / block_prod)};
+ for (size_t idx = 1; idx < data_shape.size(); ++idx)
+ {
+ output_shape.push_back(static_cast<size_t>(data_shape[idx] * block_val[idx] -
+ crops_begin_val[idx] - crops_end_val[idx]));
+ }
+
+ set_output_size(1);
+ set_output_type(0, data_type, output_shape);
+ }
+ else
+ {
+ set_output_type(0, data_type, PartialShape::dynamic());
+ }
+}
+
+std::shared_ptr<ngraph::Node>
+ ngraph::op::v1::BatchToSpace::clone_with_new_inputs(const OutputVector& new_args) const
+{
+ check_new_args_count(this, new_args);
+ return make_shared<BatchToSpace>(
+ new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3));
+}
+
+bool ngraph::op::v1::BatchToSpace::visit_attributes(ngraph::AttributeVisitor& visitor)
+{
+ return true;
+}
/// D_2 * block_shape[2] - crops_begin[2] - crops_end[2], ...,
/// D_{N - 1} * block_shape[N - 1] - crops_begin[N - 1] - crops_end[N - 1]`
/// of the same type as `data` input.
- class NGRAPH_API BatchToSpace : public ngraph::op::util::FusedOp
+ class NGRAPH_API BatchToSpace : public Op
{
public:
static constexpr NodeTypeInfo type_info{"BatchToSpace", 1};
const Output<Node>& crops_begin,
const Output<Node>& crops_end);
- OutputVector decompose_op() const override;
- void pre_validate_and_infer_types() override;
+ void validate_and_infer_types() override;
std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
--- /dev/null
+//*****************************************************************************
+// Copyright 2017-2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//*****************************************************************************
+#include <cmath>
+#include <cstddef>
+#include <memory>
+
+#include "ngraph/builder/make_constant.hpp"
+#include "ngraph/node.hpp"
+#include "ngraph/op/space_to_batch.hpp"
+#include "ngraph/ops.hpp"
+#include "ngraph/shape.hpp"
+
+using namespace std;
+using namespace ngraph;
+
+constexpr NodeTypeInfo op::v1::SpaceToBatch::type_info;
+
+ngraph::op::v1::SpaceToBatch::SpaceToBatch(const ngraph::Output<ngraph::Node>& data,
+ const ngraph::Output<ngraph::Node>& block_shape,
+ const ngraph::Output<ngraph::Node>& pads_begin,
+ const ngraph::Output<ngraph::Node>& pads_end)
+ : Op({data, block_shape, pads_begin, pads_end})
+{
+ constructor_validate_and_infer_types();
+}
+
+void op::v1::SpaceToBatch::validate_and_infer_types()
+{
+ PartialShape data_pshape = get_input_partial_shape(0);
+ const auto& data_type = get_input_element_type(0);
+ const auto& block_shape_type = get_input_element_type(1);
+ const auto& pads_begin_type = get_input_element_type(2);
+ const auto& pads_end_type = get_input_element_type(3);
+
+ NODE_VALIDATION_CHECK(this,
+ block_shape_type.is_integral_number(),
+ "block_shape must be an integral number but got (",
+ block_shape_type,
+ ").");
+
+ NODE_VALIDATION_CHECK(this,
+ pads_begin_type.is_integral_number(),
+ "crops_begin must be an integral number but got (",
+ pads_begin_type,
+ ").");
+
+ NODE_VALIDATION_CHECK(this,
+ pads_end_type.is_integral_number(),
+ "crops_end must be an integral number but got (",
+ pads_end_type,
+ ").");
+
+ auto data = input_value(0);
+ auto block = input_value(1);
+ auto pads_begin = input_value(2);
+ auto pads_end = input_value(3);
+
+ if (ngraph::op::is_constant(block.get_node_shared_ptr()) &&
+ ngraph::op::is_constant(pads_begin.get_node_shared_ptr()) &&
+ ngraph::op::is_constant(pads_end.get_node_shared_ptr()) && data_pshape.is_static())
+ {
+ const auto& data_shape = data.get_shape();
+
+ NODE_VALIDATION_CHECK(
+ this,
+ (data_shape.size() >= 2),
+ "The data tensor with rank lower than 2 is not supported (data rank: ",
+ data_shape.size(),
+ ")");
+
+ auto block_val = std::dynamic_pointer_cast<op::Constant>(block.get_node_shared_ptr())
+ ->cast_vector<int64_t>();
+ auto pads_begin_val =
+ std::dynamic_pointer_cast<op::Constant>(pads_begin.get_node_shared_ptr())
+ ->cast_vector<int64_t>();
+ auto pads_end_val = std::dynamic_pointer_cast<op::Constant>(pads_end.get_node_shared_ptr())
+ ->cast_vector<int64_t>();
+
+ int64_t block_prod = 1;
+ for (long idx : block_val)
+ block_prod *= idx;
+
+ Shape output_shape = {static_cast<size_t>(data_shape[0] * block_prod)};
+ for (size_t idx = 1; idx < data_shape.size(); ++idx)
+ {
+ NODE_VALIDATION_CHECK(
+ this, block_val.at(idx) > 0, "block_shape values must be greater than 0");
+ NODE_VALIDATION_CHECK(
+ this,
+ (pads_begin_val.at(idx) + data_shape.at(idx) + pads_end_val.at(idx)) %
+ block_val.at(idx) ==
+ 0,
+ "The dimension on position: ",
+ idx,
+ " equal to: ",
+ pads_begin_val.at(idx) + data_shape.at(idx) + pads_end_val.at(idx),
+ " must be a multiple of block_values[i]: ",
+ block_val.at(idx));
+ output_shape.push_back(
+ static_cast<size_t>(pads_begin_val[idx] + data_shape[idx] + pads_end_val[idx]) /
+ block_val[idx]);
+ }
+
+ set_output_size(1);
+ set_output_type(0, data_type, output_shape);
+ }
+ else
+ {
+ set_output_type(0, data_type, PartialShape::dynamic());
+ }
+}
+
+std::shared_ptr<Node>
+ ngraph::op::v1::SpaceToBatch::clone_with_new_inputs(const OutputVector& new_args) const
+{
+ check_new_args_count(this, new_args);
+ return make_shared<SpaceToBatch>(
+ new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3));
+}
+
+bool ngraph::op::v1::SpaceToBatch::visit_attributes(ngraph::AttributeVisitor& visitor)
+{
+ return true;
+}
/// (pads_begin[2] + D_2 + pads_end[2]) / block_shape[2], ...,
/// (pads_begin[N - 1] + D_{N - 1} + pads_end[N - 1]) / block_shape[N - 1]`
/// of the same type as `data` input.
- class NGRAPH_API SpaceToBatch : public ngraph::op::util::FusedOp
+ class NGRAPH_API SpaceToBatch : public Op
{
public:
static constexpr NodeTypeInfo type_info{"SpaceToBatch", 1};
const Output<ngraph::Node>& pads_begin,
const Output<ngraph::Node>& pads_end);
- OutputVector decompose_op() const override;
- void pre_validate_and_infer_types() override;
+ void validate_and_infer_types() override;
std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
bool visit_attributes(AttributeVisitor& visitor) override;
#include "ngraph/op/atanh.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/batch_norm.hpp"
+#include "ngraph/op/batch_to_space.hpp"
#include "ngraph/op/binary_convolution.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/bucketize.hpp"
#include "ngraph/op/extractimagepatches.hpp"
#include "ngraph/op/floor.hpp"
#include "ngraph/op/floor_mod.hpp"
-#include "ngraph/op/fused/batch_to_space.hpp"
#include "ngraph/op/fused/clamp.hpp"
#include "ngraph/op/fused/depth_to_space.hpp"
#include "ngraph/op/fused/fake_quantize.hpp"
#include "ngraph/op/fused/rnn_cell.hpp"
#include "ngraph/op/fused/selu.hpp"
#include "ngraph/op/fused/shuffle_channels.hpp"
-#include "ngraph/op/fused/space_to_batch.hpp"
#include "ngraph/op/fused/space_to_depth.hpp"
#include "ngraph/op/fused/squared_difference.hpp"
#include "ngraph/op/fused/squeeze.hpp"
#include "ngraph/op/sinh.hpp"
#include "ngraph/op/slice.hpp"
#include "ngraph/op/softmax.hpp"
+#include "ngraph/op/space_to_batch.hpp"
#include "ngraph/op/split.hpp"
#include "ngraph/op/sqrt.hpp"
#include "ngraph/op/stop_gradient.hpp"
EXPECT_EQ(expected, read_vector<float>(result0));
}
-NGRAPH_TEST(${BACKEND_NAME}, space_to_batch)
-{
- auto data = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2, 3});
- auto block_shape =
- make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 2, 3, 2});
- auto pads_begin =
- make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 0, 1, 0});
- auto pads_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 0, 0, 1});
- auto space_to_batch =
- make_shared<op::v1::SpaceToBatch>(data, block_shape, pads_begin, pads_end);
- auto function = make_shared<Function>(NodeVector{space_to_batch}, ParameterVector{data});
- auto test_case = test::TestCase<TestEngine>(function);
- test_case.add_input<float>({0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f});
- test_case.add_expected_output<float>(Shape{12, 1, 1, 2},
- {
- 0.f, 0.f, 0.f, 0.f, 0.f, 2.f, 1.f, 0.f,
- 3.f, 5.f, 4.f, 0.f, 0.f, 0.f, 0.f, 0.f,
- 6.f, 8.f, 7.f, 0.f, 9.f, 11.f, 10.f, 0.f,
- });
- test_case.run();
-}
-
-NGRAPH_TEST(${BACKEND_NAME}, batch_to_space)
-{
- auto data = make_shared<op::Parameter>(element::f32, Shape{12, 1, 1, 2});
- auto block_shape =
- make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 2, 3, 2});
- auto pads_begin =
- make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 0, 1, 0});
- auto pads_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 0, 0, 1});
- auto batch_to_space =
- make_shared<op::v1::BatchToSpace>(data, block_shape, pads_begin, pads_end);
- auto function = make_shared<Function>(NodeVector{batch_to_space}, ParameterVector{data});
-
- auto test_case = test::TestCase<TestEngine>(function);
- test_case.add_input<float>({
- 0.f, 0.f, 0.f, 0.f, 0.f, 2.f, 1.f, 0.f, 3.f, 5.f, 4.f, 0.f,
- 0.f, 0.f, 0.f, 0.f, 6.f, 8.f, 7.f, 0.f, 9.f, 11.f, 10.f, 0.f,
- });
- test_case.add_expected_output<float>(
- Shape{1, 2, 2, 3}, {0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f});
- test_case.run();
-}
-
NGRAPH_TEST(${BACKEND_NAME}, space_to_depth_block_first)
{
auto A = make_shared<op::Parameter>(element::f32, Shape{1, 2, 4, 4});