#include <string>
#include <transformations/utils/utils.hpp>
+#include <transformations/smart_reshape/set_batch_size.hpp>
#include <transformations/smart_reshape/smart_reshape.hpp>
#include <legacy/transformations/convert_opset1_to_legacy/convert_one_hot_to_one_hot_ie.hpp>
// Add shape infer method for old operations which are not included to opset1, opset2 and opset3
::ngraph::op::GenericIE::addExtension(_ngraph_function, std::make_shared<ShapeInfer::BuiltInShapeInferHolder>());
- ngraph::pass::Manager ssr_manager;
- ssr_manager.register_pass<ngraph::pass::SmartReshape>();
- ssr_manager.run_passes(_ngraph_function);
-
reshape();
for (const auto& layer : _ngraph_function->get_parameters()) {
std::string outName = layer->get_friendly_name();
}
auto params = _ngraph_function->get_parameters();
for (const auto& param : params) {
- if (param->get_partial_shape().is_dynamic())
+ if (param->get_partial_shape().rank().is_dynamic())
continue;
- auto shape = param->get_shape();
+ auto pshape = param->get_partial_shape();
+ auto rank = pshape.rank().get_length();
// WA: for speech recognition and scalar layouts (copy-past from CNNNetwork)
- if (!shape.empty() && shape.size() != 3 && shape.size() != 1)
- return shape[0];
+ if ((rank == 2 || rank > 3) && pshape[0].is_static()) {
+ return pshape[0].get_length();
+ }
}
return 1;
}
if (param->get_partial_shape().is_dynamic() || param->get_shape() != it->second)
needReshape = true;
}
- if (needReshape)
+ if (needReshape) {
+ ngraph::pass::Manager ssr_manager;
+ ssr_manager.register_pass<ngraph::pass::SmartReshape>();
+ ssr_manager.run_passes(_ngraph_function);
+
reshape(inputShapes);
+ }
} catch (std::exception& ex) {
return DescriptionBuffer(GENERAL_ERROR, responseDesc) << ex.what();
}
}
StatusCode CNNNetworkNGraphImpl::setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept {
- try {
- if (size == getBatchSize())
- return OK;
- if (!cnnNetwork)
- convertToCNNNetworkImpl();
- return cnnNetwork->setBatchSize(size, responseDesc);
- } catch (std::exception& ex) {
- return DescriptionBuffer(GENERAL_ERROR, responseDesc) << ex.what();
- }
-}
-
-StatusCode CNNNetworkNGraphImpl::setBatchSizeReshape(size_t size, ResponseDesc* responseDesc) noexcept {
if (cnnNetwork)
- return cnnNetwork->setBatchSizeReshape(size, responseDesc);
+ return cnnNetwork->setBatchSize(size, responseDesc);
try {
+ if (getBatchSize() == size) return OK;
auto original_parameters = _ngraph_function->get_parameters();
+ if (original_parameters.empty()) return DescriptionBuffer(GENERAL_ERROR, responseDesc) << "Cannot set batch! Function doesn't contain parameters!";
+
+ stringstream ss;
+ ss << " Please use reshape method instead. Original parameter shapes are: ";
+ for (size_t i = 0; i < original_parameters.size(); ++i) {
+ if (i) ss << ", ";
+ ss << "\"" << original_parameters[i]->get_friendly_name() << "\": " << original_parameters[i]->get_partial_shape();
+ }
+
+ // ill-formed logic from the past setBatchSize (we keep it for backward-compatibility)
+ const auto first_parameter = *std::min_element(original_parameters.begin(), original_parameters.end(),
+ [](std::shared_ptr<ngraph::Node> lhs, std::shared_ptr<ngraph::Node> rhs){return lhs->get_friendly_name() < rhs->get_friendly_name();});
+ const auto first_parameter_pshape = first_parameter->get_partial_shape();
+ if (first_parameter_pshape.is_dynamic()) return DescriptionBuffer(PARAMETER_MISMATCH, responseDesc) <<
+ "Cannot set batch! Function contains parameter with partially defined shape!" << ss.str();
+ const auto first_parameter_rank = first_parameter_pshape.rank().get_length();
+ if (first_parameter_rank == 0 || first_parameter_rank == 1 || first_parameter_rank == 3) return DescriptionBuffer(PARAMETER_MISMATCH, responseDesc) <<
+ "Cannot set batch! Function contains 0D/1D/3D parameter with unknown batch dimension placement." << ss.str();
- std::map<std::string, std::vector<size_t>> origShapes;
std::map<std::string, std::vector<size_t>> inShapes;
for (const auto ¶meter : original_parameters) {
- if (parameter->get_partial_shape().is_dynamic())
- THROW_IE_EXCEPTION << "Cannot setBatch! Network contains inputs with dynamic shapes!";
- std::vector<size_t> shape = parameter->get_shape();
- origShapes[parameter->get_friendly_name()] = shape;
- shape[0] = size;
+ const auto & pshape = parameter->get_partial_shape();
+ if (pshape.is_dynamic()) return DescriptionBuffer(PARAMETER_MISMATCH, responseDesc) <<
+ "Cannot set batch! Function contains parameter with partially defined shape!" << ss.str();
+ const auto & rank = pshape.rank().get_length();
+ if (rank == 0) return DescriptionBuffer(PARAMETER_MISMATCH, responseDesc) <<
+ "Cannot set batch! Function contains 0D/1D/3D parameter with unknown batch dimension placement." << ss.str();
+ auto shape = parameter->get_shape();
+ shape[0] = {static_cast<size_t>(std::ceil(size * static_cast<float>(shape[0]) / static_cast<float>(getBatchSize())))};
inShapes[parameter->get_friendly_name()] = shape;
}
- auto sts = reshape(inShapes, responseDesc);
- if (sts == OK) return OK;
- for (size_t i = 0; i < original_parameters.size(); i++) {
- const auto& param = original_parameters[i];
- if (origShapes.find(param->get_friendly_name()) == origShapes.end())
- continue;
- ::ngraph::PartialShape shape(origShapes.at(param->get_friendly_name()));
- auto newParam = std::make_shared<::ngraph::op::Parameter>(param->get_element_type(), shape);
- newParam->set_friendly_name(param->get_friendly_name());
- _ngraph_function->replace_parameter(i, newParam);
- }
- convertToCNNNetworkImpl();
- return cnnNetwork->setBatchSize(size, responseDesc);
+ ngraph::pass::Manager ssr_manager;
+ ssr_manager.register_pass<ngraph::pass::SetBatchSize>();
+ ssr_manager.run_passes(_ngraph_function);
+
+ return reshape(inShapes, responseDesc);
} catch (std::exception& ex) {
return DescriptionBuffer(GENERAL_ERROR, responseDesc) << ex.what();
}
// public version
StatusCode setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept override;
- // for internal usage (e.g. setBatch via reshape in tests)
- StatusCode setBatchSizeReshape(size_t size, ResponseDesc* responseDesc) noexcept;
-
size_t getBatchSize() const noexcept override;
StatusCode addOutput(const std::string& layerName, size_t outputIndex, ResponseDesc* resp) noexcept override;
#include <transformations_visibility.hpp>
+#include <ngraph/opsets/opset1.hpp>
#include <ngraph/pass/graph_rewrite.hpp>
#include <ngraph/slice_plan.hpp>
#include <ngraph/util.hpp>
return rewritten;
}
};
+
+ngraph::SlicePlan get_slice_plan(std::shared_ptr<ngraph::opset1::StridedSlice> slice);
\ No newline at end of file
--- /dev/null
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <functional>
+#include <memory>
+#include <numeric>
+
+#include <ngraph/ngraph.hpp>
+#include <ngraph/opsets/opset1.hpp>
+#include <ngraph/opsets/opset3.hpp>
+#include <ngraph/opsets/opset5.hpp>
+#include <ngraph/pattern/matcher.hpp>
+#include <ngraph/pattern/op/wrap_type.hpp>
+#include <ngraph/rt_info.hpp>
+
+#include <transformations_visibility.hpp>
+#include <ngraph/pass/graph_rewrite.hpp>
+
+namespace ngraph {
+namespace pass {
+
+class TRANSFORMATIONS_API MimicSetBatchSize;
+class TRANSFORMATIONS_API DisableCFForPriorBoxes;
+class TRANSFORMATIONS_API EnableCFForPriorBoxes;
+
+} // namespace pass
+} // namespace ngraph
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief MimicSetBatchSize transformation relaxes hard-coded output batch dimension of Reshape operation.
+ * For Reshape with input shape [in_batch, ...] and pattern value [out_batch, ...] it generates a sub-graph
+ * which basically keeps ratio of input and output batch size and performs the following calculation:
+ *
+ * scale = float(out_batch) / float(in_batch)
+ * modified_batch_dim = int(ceil(float(shape(input)[0]) * scale))
+ *
+ * This transformation should be executed only while setBatchSize method call
+ */
+
+class ngraph::pass::MimicSetBatchSize: public ngraph::pass::MatcherPass {
+public:
+ NGRAPH_RTTI_DECLARATION;
+ MimicSetBatchSize();
+};
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief DisableCFForPriorBoxes and EnableCFForPriorBoxes transformations are needed to avoid unnecessary PriorBox folding
+ */
+class ngraph::pass::DisableCFForPriorBoxes: public ngraph::pass::FunctionPass {
+public:
+ NGRAPH_RTTI_DECLARATION;
+ bool run_on_function(std::shared_ptr<ngraph::Function> f) override;
+};
+
+class ngraph::pass::EnableCFForPriorBoxes: public ngraph::pass::FunctionPass {
+public:
+ NGRAPH_RTTI_DECLARATION;
+ bool run_on_function(std::shared_ptr<ngraph::Function> f) override;
+};
\ No newline at end of file
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <vector>
+#include <memory>
+
+#include <transformations_visibility.hpp>
+
+#include <ngraph/pass/graph_rewrite.hpp>
+
+namespace ngraph {
+namespace pass {
+
+class TRANSFORMATIONS_API Proposal1Scales;
+class TRANSFORMATIONS_API Proposal4Scales;
+
+} // namespace pass
+} // namespace ngraph
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief ProposalScales transformation helps to silently avoid reshape issues on the scale-input of Proposal layer.
+ *
+ * Expected sub-graph looks like:
+ * Parameter [batch, 3 or 4] -> Reshape [-1] -(in: 3)-> PriorBox
+ *
+ * PriorBox operation accepts 3 or 4 values as scales from specification standpoint
+ * PriorBox uses first set (batch) of scale values to proceed in the plugins
+ * According to this we explicitly take first batch of scales with StridedSlice operation
+ *
+ * Resulting sub-graph:
+ * Parameter [batch, 3 or 4] -> Reshape [-1] -> StridedSlice[0: 3 or 4] -(in: 3)-> PriorBox
+ */
+
+class ngraph::pass::Proposal1Scales : public ngraph::pass::MatcherPass {
+public:
+ NGRAPH_RTTI_DECLARATION;
+ Proposal1Scales();
+};
+
+class ngraph::pass::Proposal4Scales : public ngraph::pass::MatcherPass {
+public:
+ NGRAPH_RTTI_DECLARATION;
+ Proposal4Scales();
+};
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <vector>
+#include <memory>
+
+#include <transformations_visibility.hpp>
+
+#include <ngraph/pass/graph_rewrite.hpp>
+
+namespace ngraph {
+namespace pass {
+
+class TRANSFORMATIONS_API ReshapeTo1D;
+
+} // namespace pass
+} // namespace ngraph
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief ReshapeTo1D transformation looks for Reshape from nD to 1D tensor and replaces its pattern to [-1]
+ */
+
+class ngraph::pass::ReshapeTo1D : public ngraph::pass::MatcherPass {
+public:
+ NGRAPH_RTTI_DECLARATION;
+ ReshapeTo1D();
+};
} // namespace pass
} // namespace ngraph
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief ReshapeAMatMul and ReshapeBMatMul transformations relax hard-coded Reshape followed by MatMul operation
+ * For 2D Reshape search patterns are:
+ * - MatMul(Reshape(any_input, any_input), any_input)
+ * - MatMul(any_input, Reshape(any_input, any_input))
+ */
+
class ngraph::pass::ReshapeAMatMul: public ngraph::pass::MatcherPass {
public:
+ NGRAPH_RTTI_DECLARATION;
ReshapeAMatMul();
};
class ngraph::pass::ReshapeBMatMul: public ngraph::pass::MatcherPass {
public:
+ NGRAPH_RTTI_DECLARATION;
ReshapeBMatMul();
};
\ No newline at end of file
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <vector>
+#include <memory>
+
+#include <transformations_visibility.hpp>
+
+#include <ngraph/pass/graph_rewrite.hpp>
+
+
+namespace ngraph {
+namespace pass {
+
+class TRANSFORMATIONS_API SetBatchSize;
+
+} // namespace pass
+} // namespace ngraph
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief Generic caller for all the transformations responsible to make model reshape-able by batch dimension
+ */
+
+class ngraph::pass::SetBatchSize: public ngraph::pass::FunctionPass {
+public:
+ NGRAPH_RTTI_DECLARATION;
+ bool run_on_function(std::shared_ptr<ngraph::Function> f) override;
+};
class ngraph::pass::SmartReshape: public ngraph::pass::FunctionPass {
public:
+ NGRAPH_RTTI_DECLARATION;
bool run_on_function(std::shared_ptr<ngraph::Function> f) override;
};
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <vector>
+#include <memory>
+
+#include <transformations_visibility.hpp>
+
+#include <ngraph/pass/graph_rewrite.hpp>
+
+namespace ngraph {
+namespace pass {
+
+class TRANSFORMATIONS_API StridedSliceSqueeze;
+class TRANSFORMATIONS_API SqueezeStridedSlice;
+class TRANSFORMATIONS_API SharedSqueeze;
+
+} // namespace pass
+} // namespace ngraph
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief StridedSliceSqueeze transformation looks for SS -> Squeeze and corrects SS inputs and attributes for SS output to be squeeze-able
+ */
+
+class ngraph::pass::StridedSliceSqueeze : public ngraph::pass::MatcherPass {
+public:
+ NGRAPH_RTTI_DECLARATION;
+ StridedSliceSqueeze();
+};
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief StridedSliceSqueeze transformation looks for Squeeze -> SSe and corrects SS inputs and attributes for SS output to be squeeze-able
+ */
+
+class ngraph::pass::SqueezeStridedSlice : public ngraph::pass::MatcherPass {
+public:
+ NGRAPH_RTTI_DECLARATION;
+ SqueezeStridedSlice();
+};
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief SharedSqueeze transformation looks for shared Squeezes and leaves only one Squeeze reconnecting all the outputs to it
+ */
+
+class ngraph::pass::SharedSqueeze : public ngraph::pass::FunctionPass {
+public:
+ NGRAPH_RTTI_DECLARATION;
+ bool run_on_function(std::shared_ptr<ngraph::Function> f) override;
+};
--- /dev/null
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <transformations/smart_reshape/mimic_set_batch_size.hpp>
+
+NGRAPH_RTTI_DEFINITION(ngraph::pass::MimicSetBatchSize, "MimicSetBatchSize", 0);
+
+ngraph::pass::MimicSetBatchSize::MimicSetBatchSize() {
+ auto reshape_label = ngraph::pattern::wrap_type<opset5::Reshape>({pattern::any_input(pattern::has_static_dim(0)),
+ ngraph::pattern::wrap_type<opset5::Constant>()},
+ [](const Output<Node> &output) { return output.get_partial_shape().rank().is_static() && output.get_partial_shape().rank().get_length() > 1; });
+
+ matcher_pass_callback callback = [=](pattern::Matcher &m) -> bool {
+ const auto & reshape = m.get_match_root();
+ auto pattern = std::dynamic_pointer_cast<opset5::Constant>(reshape->get_input_node_shared_ptr(1));
+ if (!pattern)
+ return false;
+
+ const auto & pattern_vector = pattern->cast_vector<int64_t>();
+ if (pattern_vector.empty() || pattern_vector[0] < 1)
+ return false;
+
+ // mimicking old setBatchSize style (copied):
+ // float diff = static_cast<float>(dims.at(0)) / static_cast<float>(originalBatchSize);
+ // dims.at(0) = static_cast<size_t>(std::ceil(size * diff));
+
+ const auto & old_input_batch = static_cast<float>(reshape->get_input_partial_shape(0)[0].get_length());
+ const auto & old_output_batch = static_cast<float>(pattern_vector[0]);
+
+ const auto & scale = old_output_batch / old_input_batch;
+
+ const auto & shape_of = std::make_shared<opset5::ShapeOf>(reshape->get_input_source_output(0), pattern->get_element_type());
+ const auto & new_input_batch = std::make_shared<ngraph::opset5::Gather>(
+ shape_of, ngraph::opset5::Constant::create(ngraph::element::i64, {1}, std::vector<int64_t>{0}),
+ ngraph::opset5::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0}));
+
+ const std::shared_ptr<Node> & new_output_batch = std::make_shared<opset5::Convert>(
+ std::make_shared<opset5::Ceiling>(
+ std::make_shared<opset5::Multiply>(
+ std::make_shared<opset5::Convert>(new_input_batch, element::f32),
+ opset5::Constant::create(element::f32, {1}, {scale}))),
+ pattern->get_element_type());
+
+ auto new_reshape_pattern = new_output_batch;
+ const auto rank = pattern_vector.size();
+ if (rank > 1) {
+ std::vector<int64_t> non_batch_dims(rank - 1);
+ std::iota(non_batch_dims.begin(), non_batch_dims.end(), 1);
+ const auto & non_batch_dims_node = std::make_shared<ngraph::opset5::Gather>(
+ pattern,
+ ngraph::opset5::Constant::create(ngraph::element::i64, {non_batch_dims.size()}, non_batch_dims),
+ ngraph::opset5::Constant::create(ngraph::element::i64, {}, std::vector<int64_t>{0}));
+ new_reshape_pattern = std::make_shared<opset5::Concat>(OutputVector{new_reshape_pattern, non_batch_dims_node}, 0);
+ }
+ reshape->input(1).replace_source_output(new_reshape_pattern->output(0));
+ return true;
+ };
+ auto m = std::make_shared<ngraph::pattern::Matcher>(reshape_label, "MimicSetBatchSize");
+ register_matcher(m, callback);
+}
+
+
+void set_folding_for_PriorBox(std::shared_ptr<ngraph::Node> prior_box, bool flag) {
+ std::string rt_info_disable_cf = "DISABLED_CONSTANT_FOLDING";
+ static std::unordered_set<ngraph::NodeTypeInfo> allowed_to_skip = {
+ ngraph::opset1::Convert::type_info,
+ ngraph::opset1::StridedSlice::type_info,
+ };
+ static std::unordered_set<ngraph::NodeTypeInfo> types_to_find = {
+ ngraph::opset1::ShapeOf::type_info,
+ ngraph::opset3::ShapeOf::type_info,
+ };
+
+ std::deque<std::shared_ptr<ngraph::Node>> nodes;
+ nodes.push_back(prior_box->get_input_node_shared_ptr(0));
+ nodes.push_back(prior_box->get_input_node_shared_ptr(1));
+
+ while (!nodes.empty()) {
+ auto curr_node = nodes.front();
+ nodes.pop_front();
+ if (allowed_to_skip.count(curr_node->get_type_info())) {
+ nodes.push_back(curr_node->get_input_node_shared_ptr(0));
+ } else if (types_to_find.count(curr_node->get_type_info())) {
+ auto& rt_info = curr_node->get_rt_info();
+ if (flag && rt_info.count(rt_info_disable_cf))
+ rt_info.erase(rt_info_disable_cf);
+ if (!flag)
+ rt_info[rt_info_disable_cf];
+ }
+ }
+}
+
+NGRAPH_RTTI_DEFINITION(ngraph::pass::DisableCFForPriorBoxes, "DisableCFForPriorBoxes", 0);
+
+bool ngraph::pass::DisableCFForPriorBoxes::run_on_function(std::shared_ptr<ngraph::Function> f) {
+ for (const auto & node : f->get_ops())
+ if (ngraph::is_type<opset1::PriorBox>(node) || ngraph::is_type<opset1::PriorBoxClustered>(node)) {
+ set_folding_for_PriorBox(node, false);
+ }
+ return false;
+}
+
+NGRAPH_RTTI_DEFINITION(ngraph::pass::EnableCFForPriorBoxes, "EnableCFForPriorBoxes", 0);
+
+bool ngraph::pass::EnableCFForPriorBoxes::run_on_function(std::shared_ptr<ngraph::Function> f) {
+ for (const auto & node : f->get_ops())
+ if (ngraph::is_type<opset1::PriorBox>(node) || ngraph::is_type<opset1::PriorBoxClustered>(node)) {
+ set_folding_for_PriorBox(node, true);
+ }
+ return false;
+}
+
--- /dev/null
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <transformations/smart_reshape/proposal_scales_stridedslice.hpp>
+
+#include <ngraph/ngraph.hpp>
+#include <ngraph/opsets/opset1.hpp>
+#include <ngraph/opsets/opset4.hpp>
+#include <ngraph/opsets/opset5.hpp>
+#include <ngraph/pattern/matcher.hpp>
+#include <ngraph/pattern/op/wrap_type.hpp>
+#include <ngraph/rt_info.hpp>
+
+bool crop_scales_for_proposal(const ngraph::pattern::PatternValueMap & pattern_to_output,
+ std::shared_ptr<ngraph::Node> parameter_label, std::shared_ptr<ngraph::Node> proposal_label) {
+ const auto & parameter = pattern_to_output.at(parameter_label);
+ const auto & proposal = pattern_to_output.at(proposal_label).get_node_shared_ptr();
+
+ auto cropped_scales = std::make_shared<ngraph::opset5::StridedSlice>(
+ proposal->input_value(2),
+ ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}),
+ ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {parameter.get_partial_shape()[1].get_length()}),
+ ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}),
+ std::vector<int64_t>{0}, std::vector<int64_t>{0});
+
+ proposal->input(2).replace_source_output(cropped_scales->output(0));
+ return true;
+}
+
+NGRAPH_RTTI_DEFINITION(ngraph::pass::Proposal1Scales, "Proposal1Scales", 0);
+
+ngraph::pass::Proposal1Scales::Proposal1Scales() {
+ auto parameter_label = ngraph::pattern::wrap_type<opset5::Parameter>([](const Output<Node> &output) {
+ const auto & shape = output.get_partial_shape();
+ return shape.rank().is_static() && shape.rank().get_length() == 2 && shape[1].is_static() && (shape[1].get_length() == 3 || shape[1].get_length() == 4);
+ });
+ auto reshape_label = ngraph::pattern::wrap_type<opset5::Reshape>({parameter_label, ngraph::pattern::wrap_type<opset5::Constant>()},
+ [](const Output<Node> &output) { return output.get_partial_shape().rank().is_static() && output.get_partial_shape().rank().get_length() == 1; });
+ auto proposal_label = ngraph::pattern::wrap_type<opset1::Proposal>({pattern::any_input(), pattern::any_input(), reshape_label});
+
+ matcher_pass_callback callback = [parameter_label, proposal_label](pattern::Matcher &m) -> bool {
+ return crop_scales_for_proposal(m.get_pattern_value_map(), parameter_label, proposal_label);
+ };
+ auto m = std::make_shared<ngraph::pattern::Matcher>(proposal_label, "Proposal1Scales");
+ register_matcher(m, callback);
+}
+
+NGRAPH_RTTI_DEFINITION(ngraph::pass::Proposal4Scales, "Proposal4Scales", 0);
+
+ngraph::pass::Proposal4Scales::Proposal4Scales() {
+ auto parameter_label = ngraph::pattern::wrap_type<opset5::Parameter>([](const Output<Node> &output) {
+ const auto & shape = output.get_partial_shape();
+ return shape.rank().is_static() && shape.rank().get_length() == 2 && shape[1].is_static() && (shape[1].get_length() == 3 || shape[1].get_length() == 4);
+ });
+ auto reshape_label = ngraph::pattern::wrap_type<opset5::Reshape>({parameter_label, ngraph::pattern::wrap_type<opset5::Constant>()},
+ [](const Output<Node> &output) { return output.get_partial_shape().rank().is_static() && output.get_partial_shape().rank().get_length() == 1; });
+ auto proposal_label = ngraph::pattern::wrap_type<opset4::Proposal>({pattern::any_input(), pattern::any_input(), reshape_label});
+
+ matcher_pass_callback callback = [parameter_label, proposal_label](pattern::Matcher &m) -> bool {
+ return crop_scales_for_proposal(m.get_pattern_value_map(), parameter_label, proposal_label);
+ };
+ auto m = std::make_shared<ngraph::pattern::Matcher>(proposal_label, "Proposal4Scales");
+ register_matcher(m, callback);
+}
--- /dev/null
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <transformations/smart_reshape/reshape_to_1D.hpp>
+
+#include <ngraph/ngraph.hpp>
+#include <ngraph/opsets/opset5.hpp>
+#include <ngraph/pattern/matcher.hpp>
+#include <ngraph/pattern/op/wrap_type.hpp>
+#include <ngraph/rt_info.hpp>
+
+NGRAPH_RTTI_DEFINITION(ngraph::pass::ReshapeTo1D, "ReshapeTo1D", 0);
+
+ngraph::pass::ReshapeTo1D::ReshapeTo1D() {
+ auto reshape_label = ngraph::pattern::wrap_type<opset5::Reshape>({pattern::any_input(), ngraph::pattern::wrap_type<opset5::Constant>()},
+ [](const Output<Node> & output) { return output.get_partial_shape().rank().is_static() && output.get_partial_shape().rank().get_length() == 1; });
+
+ matcher_pass_callback callback = [](pattern::Matcher &m) -> bool {
+ m.get_match_root()->input(1).replace_source_output(ngraph::opset5::Constant::create(ngraph::element::i64, {1}, {-1}));
+ return true;
+ };
+ auto m = std::make_shared<ngraph::pattern::Matcher>(reshape_label, "ReshapeTo1D");
+ register_matcher(m, callback);
+}
return true;
}
+NGRAPH_RTTI_DEFINITION(ngraph::pass::ReshapeAMatMul, "ReshapeAMatMul", 0);
+
ngraph::pass::ReshapeAMatMul::ReshapeAMatMul() {
auto other_input_label = pattern::any_input();
auto reshape_input_label = pattern::any_input();
register_matcher(m, callback);
}
+NGRAPH_RTTI_DEFINITION(ngraph::pass::ReshapeBMatMul, "ReshapeBMatMul", 0);
+
ngraph::pass::ReshapeBMatMul::ReshapeBMatMul() {
auto other_input_label = pattern::any_input();
auto reshape_input_label = pattern::any_input();
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <memory>
+
+#include <ngraph/pass/manager.hpp>
+#include <ngraph/pass/constant_folding.hpp>
+
+#include <transformations/init_node_info.hpp>
+#include <transformations/itt.hpp>
+#include <transformations/smart_reshape/mimic_set_batch_size.hpp>
+#include <transformations/smart_reshape/reshape_to_1D.hpp>
+#include <transformations/smart_reshape/set_batch_size.hpp>
+#include <transformations/smart_reshape/strided_slice_squeeze.hpp>
+
+NGRAPH_RTTI_DEFINITION(ngraph::pass::SetBatchSize, "SetBatchSize", 0);
+
+bool ngraph::pass::SetBatchSize::run_on_function(std::shared_ptr<ngraph::Function> f) {
+ OV_ITT_SCOPED_TASK(itt::domains::IETransform, "ngraph::pass::SetBatchSize");
+
+ ngraph::pass::Manager manager;
+ // This pass must be called first in pipeline
+ manager.register_pass<ngraph::pass::InitNodeInfo>();
+
+ manager.register_pass<ngraph::pass::DisableCFForPriorBoxes>();
+ manager.register_pass<ngraph::pass::ConstantFolding>();
+ manager.register_pass<ngraph::pass::EnableCFForPriorBoxes>();
+ manager.register_pass<ngraph::pass::SharedSqueeze>();
+ manager.register_pass<ngraph::pass::SqueezeStridedSlice>();
+ manager.register_pass<ngraph::pass::StridedSliceSqueeze>();
+ manager.register_pass<ngraph::pass::ReshapeTo1D>();
+
+ manager.register_pass<ngraph::pass::MimicSetBatchSize>();
+ manager.run_passes(f);
+ return true;
+}
+
#include <memory>
-#include "transformations/smart_reshape/smart_reshape.hpp"
-#include "transformations/smart_reshape/reshape_with_hc_output.hpp"
-#include "transformations/itt.hpp"
-
#include <ngraph/pass/manager.hpp>
-#include <ngraph/pass/constant_folding.hpp>
+
#include <transformations/init_node_info.hpp>
+#include <transformations/itt.hpp>
+#include <transformations/smart_reshape/proposal_scales_stridedslice.hpp>
+#include <transformations/smart_reshape/reshape_to_1D.hpp>
+#include <transformations/smart_reshape/reshape_with_hc_output.hpp>
+#include <transformations/smart_reshape/smart_reshape.hpp>
+#include <transformations/smart_reshape/strided_slice_squeeze.hpp>
+#include <transformations/smart_reshape/mimic_set_batch_size.hpp>
+
+NGRAPH_RTTI_DEFINITION(ngraph::pass::SmartReshape, "SmartReshape", 0);
bool ngraph::pass::SmartReshape::run_on_function(std::shared_ptr<ngraph::Function> f) {
OV_ITT_SCOPED_TASK(itt::domains::IETransform, "ngraph::pass::SmartReshape");
- ngraph::pass::Manager manager;
+ ngraph::pass::Manager static_manager;
// This pass must be called first in pipeline
- manager.register_pass<ngraph::pass::InitNodeInfo>();
-
- manager.register_pass<ngraph::pass::ReshapeAMatMul>();
- manager.register_pass<ngraph::pass::ReshapeBMatMul>();
+ static_manager.register_pass<ngraph::pass::InitNodeInfo>();
+ static_manager.register_pass<ngraph::pass::ReshapeTo1D>();
+ static_manager.register_pass<ngraph::pass::Proposal1Scales>();
+ static_manager.register_pass<ngraph::pass::Proposal4Scales>();
+ static_manager.register_pass<ngraph::pass::SharedSqueeze>();
+ static_manager.register_pass<ngraph::pass::SqueezeStridedSlice>();
+ static_manager.register_pass<ngraph::pass::StridedSliceSqueeze>();
+ static_manager.register_pass<ngraph::pass::ReshapeTo1D>();
+ static_manager.run_passes(f);
- manager.run_passes(f);
+ ngraph::pass::Manager dynamic_manager;
+ // function revalidation will cause "fake" dynamism due to ShapeOf ops insertions
+ // we turn it off to have access to originally static shapes
+ dynamic_manager.set_per_pass_validation(false);
+ dynamic_manager.register_pass<ngraph::pass::ReshapeAMatMul>();
+ dynamic_manager.register_pass<ngraph::pass::ReshapeBMatMul>();
+ dynamic_manager.run_passes(f);
return true;
}
--- /dev/null
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <transformations/common_optimizations/optimize_strided_slice.hpp>
+#include <transformations/smart_reshape/strided_slice_squeeze.hpp>
+
+#include <ngraph/ngraph.hpp>
+#include <ngraph/opsets/opset5.hpp>
+#include <ngraph/pattern/matcher.hpp>
+#include <ngraph/pattern/op/wrap_type.hpp>
+#include <ngraph/rt_info.hpp>
+#include <transformations/itt.hpp>
+
+NGRAPH_RTTI_DEFINITION(ngraph::pass::StridedSliceSqueeze, "StridedSliceSqueeze", 0);
+
+ngraph::pass::StridedSliceSqueeze::StridedSliceSqueeze() {
+ auto ss_label = ngraph::pattern::wrap_type<opset5::StridedSlice>(pattern::consumers_count(1));
+ auto squeeze_label = ngraph::pattern::wrap_type<opset5::Squeeze>({ss_label, ngraph::pattern::wrap_type<opset5::Constant>()});
+
+ matcher_pass_callback callback = [](pattern::Matcher &m) -> bool {
+ const auto & squeeze = m.get_match_root();
+ const auto & const_axes = std::dynamic_pointer_cast<ngraph::opset5::Constant>(squeeze->get_input_node_shared_ptr(1));
+
+ auto slice = std::dynamic_pointer_cast<ngraph::opset5::StridedSlice>(squeeze->get_input_node_shared_ptr(0));
+ if (!const_axes || !slice)
+ return false;
+
+ const auto & slice_plan = get_slice_plan(slice);
+ if (slice_plan.begins.empty() || slice_plan.reshape_in_shape != slice_plan.reshape_out_shape || !slice_plan.reverse_axes.empty())
+ return false;
+
+ const auto & axes = normalize_axes(squeeze->description(), const_axes->cast_vector<int64_t>(), squeeze->get_input_partial_shape(0).rank());
+
+ auto begin = std::dynamic_pointer_cast<ngraph::opset5::Constant>(slice->input_value(1).get_node_shared_ptr());
+ auto end = std::dynamic_pointer_cast<ngraph::opset5::Constant>(slice->input_value(2).get_node_shared_ptr());
+ auto strides = std::dynamic_pointer_cast<ngraph::opset5::Constant>(slice->input_value(3).get_node_shared_ptr());
+ if (!begin || !end || !strides)
+ return false;
+
+ auto begin_vec = begin->cast_vector<int64_t>();
+ auto end_vec = end->cast_vector<int64_t>();
+ auto strides_vec = strides->cast_vector<int64_t>();
+ auto begin_mask = slice->get_begin_mask();
+ auto end_mask = slice->get_end_mask();
+ auto new_axis_mask = slice->get_new_axis_mask().empty() ? std::vector<int64_t>(begin_mask.size(), 0) : slice->get_new_axis_mask();
+ auto shrink_axis_mask = slice->get_shrink_axis_mask().empty() ? std::vector<int64_t>(begin_mask.size(), 0) : slice->get_shrink_axis_mask();
+ auto ellipsis_mask = slice->get_ellipsis_mask().empty() ? std::vector<int64_t>(begin_mask.size(), 0) : slice->get_ellipsis_mask();
+
+ for (const auto & axis : axes) {
+ if ((slice_plan.ends[axis] - slice_plan.begins[axis]) != 1 && slice_plan.strides[axis] == 1)
+ return false;
+ begin_vec[axis] = slice_plan.begins[axis];
+ end_vec[axis] = slice_plan.ends[axis];
+ strides_vec[axis] = 1;
+ begin_mask[axis] = 0;
+ end_mask[axis] = 0;
+ new_axis_mask[axis] = 0;
+ shrink_axis_mask[axis] = 1;
+ ellipsis_mask[axis] = 0;
+ }
+
+ auto new_slice = std::make_shared<opset5::StridedSlice>(
+ slice->input_value(0),
+ opset5::Constant::create(element::i64, {begin_vec.size()}, begin_vec),
+ opset5::Constant::create(element::i64, {end_vec.size()}, end_vec),
+ opset5::Constant::create(element::i64, {strides_vec.size()}, strides_vec),
+ begin_mask, end_mask, new_axis_mask, shrink_axis_mask, ellipsis_mask);
+
+ replace_node(squeeze, new_slice);
+ new_slice->set_friendly_name(slice->get_friendly_name());
+ copy_runtime_info(slice, new_slice);
+ return true;
+ };
+ auto m = std::make_shared<ngraph::pattern::Matcher>(squeeze_label, "StridedSliceSqueeze");
+ register_matcher(m, callback);
+}
+NGRAPH_RTTI_DEFINITION(ngraph::pass::SqueezeStridedSlice, "SqueezeStridedSlice", 0);
+
+ngraph::pass::SqueezeStridedSlice::SqueezeStridedSlice() {
+ auto squeeze_label = ngraph::pattern::wrap_type<opset5::Squeeze>(
+ {pattern::any_input(), ngraph::pattern::wrap_type<opset5::Constant>()}, pattern::consumers_count(1));
+ auto ss_label = ngraph::pattern::wrap_type<opset5::StridedSlice>({squeeze_label, pattern::any_input(), pattern::any_input(), pattern::any_input()});
+
+ matcher_pass_callback callback = [](pattern::Matcher &m) -> bool {
+ auto slice = std::dynamic_pointer_cast<ngraph::opset5::StridedSlice>(m.get_match_root());
+ auto squeeze = slice->get_input_node_shared_ptr(0);
+ const auto & const_axes = std::dynamic_pointer_cast<ngraph::opset5::Constant>(squeeze->get_input_node_shared_ptr(1));
+ if (!const_axes || !slice)
+ return false;
+
+ const auto & slice_plan = get_slice_plan(slice);
+ if (slice_plan.begins.empty() || slice_plan.reshape_in_shape != slice_plan.reshape_out_shape || !slice_plan.reverse_axes.empty())
+ return false;
+
+ auto axes = normalize_axes(squeeze->description(), const_axes->cast_vector<int64_t>(), squeeze->get_input_partial_shape(0).rank());
+ std::sort(axes.begin(), axes.end());
+ auto begin = std::dynamic_pointer_cast<ngraph::opset5::Constant>(slice->input_value(1).get_node_shared_ptr());
+ auto end = std::dynamic_pointer_cast<ngraph::opset5::Constant>(slice->input_value(2).get_node_shared_ptr());
+ auto strides = std::dynamic_pointer_cast<ngraph::opset5::Constant>(slice->input_value(3).get_node_shared_ptr());
+ if (!begin || !end || !strides)
+ return false;
+
+ auto begin_vec = begin->cast_vector<int64_t>();
+ auto end_vec = end->cast_vector<int64_t>();
+ auto strides_vec = strides->cast_vector<int64_t>();
+ auto begin_mask = slice->get_begin_mask();
+ auto end_mask = slice->get_end_mask();
+ auto new_axis_mask = slice->get_new_axis_mask().empty() ? std::vector<int64_t>(begin_mask.size(), 0) : slice->get_new_axis_mask();
+ auto shrink_axis_mask = slice->get_shrink_axis_mask().empty() ? std::vector<int64_t>(begin_mask.size(), 0) : slice->get_shrink_axis_mask();
+ auto ellipsis_mask = slice->get_ellipsis_mask().empty() ? std::vector<int64_t>(begin_mask.size(), 0) : slice->get_ellipsis_mask();
+
+ for (const auto & axis : axes) {
+ begin_vec.insert(begin_vec.begin() + axis, 0);
+ end_vec.insert(end_vec.begin() + axis, 1);
+ strides_vec.insert(strides_vec.begin() + axis, 1);
+ begin_mask.insert(begin_mask.begin() + axis, 0);
+ end_mask.insert(end_mask.begin() + axis, 0);
+ new_axis_mask.insert(new_axis_mask.begin() + axis, 0);
+ shrink_axis_mask.insert(shrink_axis_mask.begin() + axis, 1);
+ ellipsis_mask.insert(ellipsis_mask.begin() + axis, 0);
+ }
+
+ auto new_slice = std::make_shared<opset5::StridedSlice>(
+ slice->get_input_node_shared_ptr(0)->input_value(0),
+ opset5::Constant::create(element::i64, {begin_vec.size()}, begin_vec),
+ opset5::Constant::create(element::i64, {end_vec.size()}, end_vec),
+ opset5::Constant::create(element::i64, {strides_vec.size()}, strides_vec),
+ begin_mask, end_mask, new_axis_mask, shrink_axis_mask, ellipsis_mask);
+
+ replace_node(slice, new_slice);
+ new_slice->set_friendly_name(slice->get_friendly_name());
+ copy_runtime_info(slice, new_slice);
+ return true;
+ };
+ auto m = std::make_shared<ngraph::pattern::Matcher>(ss_label, "SqueezeStridedSlice");
+ register_matcher(m, callback);
+}
+
+NGRAPH_RTTI_DEFINITION(ngraph::pass::SharedSqueeze, "SharedSqueeze", 0);
+
+bool squeezes_perform_the_same(std::shared_ptr<ngraph::opset1::Squeeze> lhs, std::shared_ptr<ngraph::opset1::Squeeze> rhs) {
+ size_t l_input_size = lhs->inputs().size(), r_input_size = rhs->inputs().size();
+ if (l_input_size != r_input_size)
+ return false;
+ if (lhs->inputs().size() == 1 && rhs->inputs().size() == 1)
+ return true;
+ const auto rank = lhs->get_input_partial_shape(0).rank();
+ if (rank.is_dynamic())
+ return false;
+ const auto l_axes = std::dynamic_pointer_cast<ngraph::opset1::Constant>(lhs->get_input_node_shared_ptr(1));
+ const auto r_axes = std::dynamic_pointer_cast<ngraph::opset1::Constant>(rhs->get_input_node_shared_ptr(1));
+ if (l_axes && r_axes)
+ return normalize_axes(lhs->description(), l_axes->cast_vector<int64_t>(), rank) ==
+ normalize_axes(rhs->description(), r_axes->cast_vector<int64_t>(), rank);
+ return false;
+}
+
+bool ngraph::pass::SharedSqueeze::run_on_function(std::shared_ptr<ngraph::Function> f) {
+ OV_ITT_SCOPED_TASK(itt::domains::IETransform, "ngraph::pass::SharedSqueeze");
+
+ bool graph_rewritten = false;
+
+ std::map<ngraph::Output<Node>, std::vector<std::shared_ptr<ngraph::opset1::Squeeze>>> source_to_squeeze;
+ for (const auto & node : f->get_ordered_ops()) {
+ // Recursively apply transformation for sub-graph based operations
+ if (auto sub_graph_node = std::dynamic_pointer_cast<op::util::SubGraphOp>(node)) {
+ if (auto sub_graph = sub_graph_node->get_function()) {
+ graph_rewritten |= run_on_function(sub_graph);
+ }
+ }
+ if (auto squeeze = std::dynamic_pointer_cast<ngraph::opset1::Squeeze>(node)) {
+ source_to_squeeze[squeeze->input_value(0)].push_back(squeeze);
+ }
+ }
+
+ for (auto& item : source_to_squeeze) {
+ if (item.second.size() < 2)
+ continue;
+ auto root_squeeze = item.second[0];
+ for (auto& child_squeeze : item.second) {
+ if (root_squeeze->get_instance_id() != child_squeeze->get_instance_id() && squeezes_perform_the_same(root_squeeze, child_squeeze)) {
+ graph_rewritten |= replace_output_update_name(child_squeeze->output(0), root_squeeze->output(0));
+ }
+ }
+ }
+ return graph_rewritten;
+}
\ No newline at end of file
InferenceEngine::details::CNNNetworkNGraphImpl cnnNet(ngraph);
ASSERT_EQ(1, cnnNet.getBatchSize());
- ASSERT_EQ(OK, cnnNet.setBatchSize(2, nullptr)); // triggers conversion
+ ASSERT_EQ(OK, cnnNet.setBatchSize(2, nullptr)); // must not trigger conversion
ASSERT_EQ(2, cnnNet.getBatchSize());
- ASSERT_EQ(nullptr, cnnNet.getFunction());
+ ASSERT_NE(nullptr, cnnNet.getFunction());
}
TEST(CNNNGraphImplTests, TestGetBatchScalar) {
InferenceEngine::details::CNNNetworkNGraphImpl cnnNet(ngraph);
ASSERT_EQ(1, cnnNet.getBatchSize());
- ASSERT_EQ(PARAMETER_MISMATCH, cnnNet.setBatchSize(2, nullptr)); // triggers conversion
+ ASSERT_EQ(PARAMETER_MISMATCH, cnnNet.setBatchSize(2, nullptr)); // must not trigger conversion
+}
+
+TEST(CNNNGraphImplTests, TestGetBatchDynamic) {
+ std::shared_ptr<ngraph::Function> ngraph;
+ {
+ auto param = std::make_shared<ngraph::op::Parameter>(ngraph::element::Type_t::f32, ngraph::PartialShape{5, ngraph::Dimension::dynamic()});
+ auto relu = std::make_shared<ngraph::op::Relu>(param);
+ auto result = std::make_shared<ngraph::op::Result>(relu);
+ ngraph = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
+ }
+
+ InferenceEngine::details::CNNNetworkNGraphImpl cnnNet(ngraph);
+ ASSERT_TRUE(cnnNet.getFunction()->get_parameters()[0]->get_partial_shape().is_dynamic());
+ ASSERT_EQ(5, cnnNet.getBatchSize());
+}
+
+TEST(CNNNGraphImplTests, TestSetBatchDynamic) {
+ std::shared_ptr<ngraph::Function> ngraph;
+ {
+ auto param = std::make_shared<ngraph::op::Parameter>(ngraph::element::Type_t::f32, ngraph::PartialShape::dynamic());
+ auto relu = std::make_shared<ngraph::op::Relu>(param);
+ auto result = std::make_shared<ngraph::op::Result>(relu);
+ ngraph = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
+ }
+
+ InferenceEngine::details::CNNNetworkNGraphImpl cnnNet(ngraph);
+ ASSERT_EQ(1, cnnNet.getBatchSize());
+ ASSERT_EQ(PARAMETER_MISMATCH, cnnNet.setBatchSize(2, nullptr)); // must not trigger conversion
}
TEST(CNNNGraphImplTests, TestSaveAffinity) {
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+
+#include <ngraph/function.hpp>
+#include <ngraph/opsets/opset5.hpp>
+#include <cpp/ie_cnn_network.h>
+
+
+TEST(SmartReshapeTests, MimickingSBS) {
+ std::shared_ptr<ngraph::Function> f(nullptr);
+ {
+ auto input = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::Shape{1, 2, 3, 4});
+ auto reshape = std::make_shared<ngraph::opset5::Reshape>(input, ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {6, -1}), true);
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{reshape}, ngraph::ParameterVector{input});
+ }
+
+ InferenceEngine::CNNNetwork network(f);
+ ASSERT_NO_THROW(network.setBatchSize(2));
+
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({12, 4}));
+ ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 2, 3, 4}));
+}
+
+TEST(SmartReshapeTests, MimickingSBS_1) {
+ std::shared_ptr<ngraph::Function> f(nullptr);
+ {
+ auto input = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::Shape{1, 2, 3, 4});
+ auto reshape = std::make_shared<ngraph::opset5::Reshape>(input, ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {1, -1}), true);
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{reshape}, ngraph::ParameterVector{input});
+ }
+
+ InferenceEngine::CNNNetwork network(f);
+ ASSERT_NO_THROW(network.setBatchSize(2));
+
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({2, 24}));
+ ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 2, 3, 4}));
+}
+
+TEST(SmartReshapeTests, MimickingSBS_2) {
+ std::shared_ptr<ngraph::Function> f(nullptr);
+ {
+ auto input = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::Shape{2, 2, 3, 4});
+ auto reshape = std::make_shared<ngraph::opset5::Reshape>(input, ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {12, -1}), true);
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{reshape}, ngraph::ParameterVector{input});
+ }
+
+ InferenceEngine::CNNNetwork network(f);
+ ASSERT_NO_THROW(network.setBatchSize(1));
+
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({6, 4}));
+ ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 2, 3, 4}));
+}
\ No newline at end of file
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+
+#include <ngraph/function.hpp>
+#include <ngraph/opsets/opset1.hpp>
+#include <ngraph/opsets/opset5.hpp>
+#include <cpp/ie_cnn_network.h>
+
+
+TEST(SmartReshapeTests, Proposal1Scales) {
+ std::shared_ptr<ngraph::Function> f(nullptr);
+ {
+ auto input_0 = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::Shape{1, 24, 75, 128});
+ auto input_1 = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::Shape{1, 48, 75, 128});
+ auto input_2 = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::Shape{1, 3});
+ auto reshape = std::make_shared<ngraph::opset5::Reshape>(input_2, ngraph::opset5::Constant::create(ngraph::element::i64, {1}, {3}), true);
+ ngraph::op::ProposalAttrs attrs;
+ attrs.base_size = 256;
+ attrs.box_coordinate_scale = 10.0;
+ attrs.box_size_scale = 5.0;
+ attrs.clip_after_nms = false;
+ attrs.clip_before_nms = true;
+ attrs.feat_stride = 8;
+ attrs.framework = "tensorflow";
+ attrs.min_size = 1;
+ attrs.nms_thresh = 0.699999988079;
+ attrs.normalize = true;
+ attrs.post_nms_topn = 300;
+ attrs.pre_nms_topn = 2147483647;
+ attrs.ratio = {0.5, 1.0, 2.0};
+ attrs.scale = {0.25, 0.5, 1.0, 2.0};
+ auto proposal = std::make_shared<ngraph::opset1::Proposal>(input_0, input_1, reshape, attrs);
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{proposal}, ngraph::ParameterVector{input_0, input_1, input_2});
+ }
+
+ InferenceEngine::CNNNetwork network(f);
+ ASSERT_NO_THROW(network.setBatchSize(2));
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({600, 5}));
+}
+
+TEST(SmartReshapeTests, Proposal4Scales) {
+ std::shared_ptr<ngraph::Function> f(nullptr);
+ {
+ auto input_0 = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::Shape{1, 24, 75, 128});
+ auto input_1 = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::Shape{1, 48, 75, 128});
+ auto input_2 = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::Shape{1, 4});
+ auto reshape = std::make_shared<ngraph::opset5::Reshape>(input_2, ngraph::opset5::Constant::create(ngraph::element::i64, {1}, {-1}), true);
+ ngraph::op::ProposalAttrs attrs;
+ attrs.base_size = 256;
+ attrs.box_coordinate_scale = 10.0;
+ attrs.box_size_scale = 5.0;
+ attrs.clip_after_nms = false;
+ attrs.clip_before_nms = true;
+ attrs.feat_stride = 8;
+ attrs.framework = "tensorflow";
+ attrs.min_size = 1;
+ attrs.nms_thresh = 0.699999988079;
+ attrs.normalize = true;
+ attrs.post_nms_topn = 300;
+ attrs.pre_nms_topn = 2147483647;
+ attrs.ratio = {0.5, 1.0, 2.0};
+ attrs.scale = {0.25, 0.5, 1.0, 2.0};
+ auto proposal = std::make_shared<ngraph::opset5::Proposal>(input_0, input_1, reshape, attrs);
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{proposal}, ngraph::ParameterVector{input_0, input_1, input_2});
+ }
+
+ InferenceEngine::CNNNetwork network(f);
+ ASSERT_NO_THROW(network.setBatchSize(2));
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({600, 5}));
+}
\ No newline at end of file
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+
+#include <ngraph/function.hpp>
+#include <ngraph/opsets/opset5.hpp>
+#include <cpp/ie_cnn_network.h>
+
+
+TEST(SmartReshapeTests, Reshape1d) {
+ std::shared_ptr<ngraph::Function> f(nullptr);
+ {
+ auto input = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::PartialShape::dynamic());
+ input->set_friendly_name("input");
+ auto reshape = std::make_shared<ngraph::opset5::Reshape>(input, ngraph::opset5::Constant::create(ngraph::element::i64, {1}, {5}), true);
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{reshape}, ngraph::ParameterVector{input});
+ }
+
+ InferenceEngine::CNNNetwork network(f);
+
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible(ngraph::PartialShape::dynamic()));
+ ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({5}));
+
+ ASSERT_NO_THROW(network.reshape({{"input", {1, 3, 300, 300}}}));
+
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({270000}));
+ ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3, 300, 300}));
+}
+
+TEST(SmartReshapeTests, Reshape1d_negative) {
+ std::shared_ptr<ngraph::Function> f(nullptr);
+ {
+ auto input = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::PartialShape::dynamic());
+ auto pattern = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::i64, ngraph::Shape{1});
+ input->set_friendly_name("input");
+ auto reshape = std::make_shared<ngraph::opset5::Reshape>(input, pattern, false);
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{reshape}, ngraph::ParameterVector{input, pattern});
+ }
+
+ InferenceEngine::CNNNetwork network(f);
+
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible(ngraph::PartialShape::dynamic()));
+ ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().is_dynamic());
+
+ ASSERT_NO_THROW(network.reshape({{"input", {1, 3, 300, 300}}}));
+
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({270000}));
+ ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3, 300, 300}));
+ ASSERT_FALSE(network.getFunction()->get_parameters()[1]->get_output_target_inputs(0).empty());
+}
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+
+#include <ngraph/function.hpp>
+#include <ngraph/opsets/opset5.hpp>
+#include <cpp/ie_cnn_network.h>
+
+
+TEST(SmartReshapeTests, SS_Squeeze) {
+ std::shared_ptr<ngraph::Function> f(nullptr);
+ {
+ auto input = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::Shape{1, 3});
+ auto ss = std::make_shared<ngraph::opset5::StridedSlice>(
+ input,
+ ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {0, 0}),
+ ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {0, 0}),
+ ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {1, 1}),
+ std::vector<int64_t>{1, 1}, std::vector<int64_t>{1, 1});
+ auto squeeze = std::make_shared<ngraph::opset5::Squeeze>(ss, ngraph::opset5::Constant::create(ngraph::element::i64, {1}, {0}));
+
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{squeeze}, ngraph::ParameterVector{input});
+ }
+
+ InferenceEngine::CNNNetwork network(f);
+
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3})) <<
+ network.getFunction()->get_results()[0]->get_output_partial_shape(0);
+ ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3}));
+
+ ASSERT_NO_THROW(network.setBatchSize(2));
+
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3})) <<
+ network.getFunction()->get_results()[0]->get_output_partial_shape(0);
+ ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 3}));
+}
+
+
+TEST(SmartReshapeTests, SS_Squeeze_mask_use_negative) {
+ std::shared_ptr<ngraph::Function> f(nullptr);
+ {
+ auto input = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::Shape{1, 3});
+ auto ss = std::make_shared<ngraph::opset5::StridedSlice>(
+ input,
+ ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {0, 0}),
+ ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {0, 0}),
+ ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {1, 1}),
+ std::vector<int64_t>{1, 1}, std::vector<int64_t>{1, 1}, std::vector<int64_t>{0, 1});
+ auto squeeze = std::make_shared<ngraph::opset5::Squeeze>(ss, ngraph::opset5::Constant::create(ngraph::element::i64, {1}, {0}));
+
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{squeeze}, ngraph::ParameterVector{input});
+ }
+
+
+ InferenceEngine::CNNNetwork network(f);
+
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({1, 3})) <<
+ network.getFunction()->get_results()[0]->get_output_partial_shape(0);
+ ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3}));
+
+ ASSERT_ANY_THROW(network.setBatchSize(2));
+}
+
+
+TEST(SmartReshapeTests, SS_Squeeze_negative_stride_negative) {
+ std::shared_ptr<ngraph::Function> f(nullptr);
+ {
+ auto input = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::Shape{1, 3});
+ auto ss = std::make_shared<ngraph::opset5::StridedSlice>(
+ input,
+ ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {0, 0}),
+ ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {0, 0}),
+ ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {-1, -1}),
+ std::vector<int64_t>{1, 1}, std::vector<int64_t>{1, 1});
+ auto squeeze = std::make_shared<ngraph::opset5::Squeeze>(ss, ngraph::opset5::Constant::create(ngraph::element::i64, {1}, {0}));
+
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{squeeze}, ngraph::ParameterVector{input});
+ }
+
+
+ InferenceEngine::CNNNetwork network(f);
+
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3})) <<
+ network.getFunction()->get_results()[0]->get_output_partial_shape(0);
+ ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3}));
+
+ ASSERT_ANY_THROW(network.setBatchSize(2));
+}
+
+TEST(SmartReshapeTests, SS_SharedSqueezes) {
+ std::shared_ptr<ngraph::Function> f(nullptr);
+ {
+ auto input = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::Shape{1, 3});
+ auto ss = std::make_shared<ngraph::opset5::StridedSlice>(
+ input,
+ ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {0, 0}),
+ ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {0, 0}),
+ ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {1, 1}),
+ std::vector<int64_t>{1, 1}, std::vector<int64_t>{1, 1});
+ auto squeeze_1 = std::make_shared<ngraph::opset5::Squeeze>(ss, ngraph::opset5::Constant::create(ngraph::element::i64, {1}, {0}));
+ auto squeeze_2 = std::make_shared<ngraph::opset5::Squeeze>(ss, ngraph::opset5::Constant::create(ngraph::element::i64, {1}, {0}));
+
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{squeeze_1, squeeze_2}, ngraph::ParameterVector{input});
+ }
+
+ InferenceEngine::CNNNetwork network(f);
+
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3})) <<
+ network.getFunction()->get_results()[0]->get_output_partial_shape(0);
+ ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3}));
+
+ ASSERT_NO_THROW(network.setBatchSize(2));
+
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3})) <<
+ network.getFunction()->get_results()[0]->get_output_partial_shape(0);
+ ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 3}));
+}
+
+
+TEST(SmartReshapeTests, SS_SqueezeNegativeAxes) {
+ std::shared_ptr<ngraph::Function> f(nullptr);
+ {
+ auto input = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::Shape{1, 3, 1, 8, 1, 2});
+ auto ss = std::make_shared<ngraph::opset5::StridedSlice>(
+ input,
+ ngraph::opset5::Constant::create(ngraph::element::i64, {6}, {0, 0, 0, 0, 0, 0}),
+ ngraph::opset5::Constant::create(ngraph::element::i64, {6}, {0, 0, 0, 0, 0, 0}),
+ ngraph::opset5::Constant::create(ngraph::element::i64, {6}, {1, 1, 1, 1, 1, 1}),
+ std::vector<int64_t>{1, 1, 1, 1, 1, 1}, std::vector<int64_t>{1, 1, 1, 1, 1, 1});
+ auto squeeze = std::make_shared<ngraph::opset5::Squeeze>(ss, ngraph::opset5::Constant::create(ngraph::element::i64, {3}, {-2, 0, -4}));
+
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{squeeze}, ngraph::ParameterVector{input});
+ }
+
+ InferenceEngine::CNNNetwork network(f);
+
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3, 8, 2})) <<
+ network.getFunction()->get_results()[0]->get_output_partial_shape(0);
+ ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3, 1, 8, 1, 2}));
+
+ ASSERT_NO_THROW(network.setBatchSize(2));
+
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3, 8, 2})) <<
+ network.getFunction()->get_results()[0]->get_output_partial_shape(0);
+ ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 3, 1, 8, 1, 2}));
+}
+
+TEST(SmartReshapeTests, Squeeze_SSNegativeAxes) {
+ std::shared_ptr<ngraph::Function> f(nullptr);
+ {
+ auto input = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::f32, ngraph::Shape{1, 3, 1, 8, 1, 2});
+ auto squeeze = std::make_shared<ngraph::opset5::Squeeze>(input, ngraph::opset5::Constant::create(ngraph::element::i64, {3}, {-2, 0, -4}));
+ auto ss = std::make_shared<ngraph::opset5::StridedSlice>(
+ squeeze,
+ ngraph::opset5::Constant::create(ngraph::element::i64, {3}, {0, 0, 0}),
+ ngraph::opset5::Constant::create(ngraph::element::i64, {3}, {0, 0, 0}),
+ ngraph::opset5::Constant::create(ngraph::element::i64, {3}, {1, 1, 1}),
+ std::vector<int64_t>{1, 1, 1}, std::vector<int64_t>{1, 1, 1});
+
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{ss}, ngraph::ParameterVector{input});
+ }
+
+ InferenceEngine::CNNNetwork network(f);
+
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3, 8, 2})) <<
+ network.getFunction()->get_results()[0]->get_output_partial_shape(0);
+ ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({1, 3, 1, 8, 1, 2}));
+
+ ASSERT_NO_THROW(network.setBatchSize(2));
+
+ ASSERT_TRUE(network.getFunction()->get_results()[0]->get_output_partial_shape(0).compatible({3, 8, 2})) <<
+ network.getFunction()->get_results()[0]->get_output_partial_shape(0);
+ ASSERT_TRUE(network.getFunction()->get_parameters()[0]->get_partial_shape().compatible({2, 3, 1, 8, 1, 2}));
+}
}
if (data_batch_shape.rank().is_static() && data_batch_shape.rank().get_length() > 2 &&
- data_batch_shape[1].is_static())
+ data_batch_shape[1].is_static() && groups.is_static())
{
data_batch_shape[1] = Dimension(data_batch_shape[1].get_length() / groups.get_length());
}
bool op::v3::ShapeOf::constant_fold(OutputVector& output_values, const OutputVector& input_values)
{
OV_ITT_SCOPED_TASK(itt::domains::nGraph, "op::v3::ShapeOf::constant_fold");
+ if (get_rt_info().count("DISABLED_CONSTANT_FOLDING"))
+ return false;
return shape_of::constant_fold_shape_of(this, output_values[0], input_values[0], m_is_foldable);
}
bool op::v0::ShapeOf::constant_fold(OutputVector& output_values, const OutputVector& input_values)
{
OV_ITT_SCOPED_TASK(itt::domains::nGraph, "op::v0::ShapeOf::constant_fold");
+ if (get_rt_info().count("DISABLED_CONSTANT_FOLDING"))
+ return false;
return shape_of::constant_fold_shape_of(this, output_values[0], input_values[0], m_is_foldable);
}