termination_cond =
ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true});
}
- else if (ngraph::op::is_constant(ng_inputs.at(1).get_node_shared_ptr()))
+ else if (ngraph::op::is_constant(ng_inputs.at(1).get_node_shared_ptr()) &&
+ as_type_ptr<default_opset::Constant>(
+ ng_inputs.at(1).get_node_shared_ptr())
+ ->cast_vector<bool>()[0] == false)
{
- const auto term_cond_const = as_type_ptr<default_opset::Constant>(
- ng_inputs.at(1).get_node_shared_ptr());
- if (term_cond_const->cast_vector<bool>()[0])
+ // no iteration is performed so initial values are returned
+ OutputVector node_outputs;
+ // final values
+ for (const auto& dep : loop_carried_dependencies)
{
- termination_cond =
- ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true});
+ node_outputs.push_back(dep);
}
- else
+ // scan outputs
+ for (const auto& dep : loop_carried_dependencies)
{
- // no iteration is performed so initial values are returned
- OutputVector node_outputs;
- // final values
- for (const auto& dep : loop_carried_dependencies)
- {
- node_outputs.push_back(dep);
- }
- // scan outputs
- for (const auto& dep : loop_carried_dependencies)
- {
- node_outputs.push_back(dep);
- }
- return node_outputs;
+ node_outputs.push_back(dep);
}
+ return node_outputs;
}
else
{
- // It is temporary solution caused by not supported termination_cond==false
- // (for not consant case) by nG Loop
- termination_cond =
- ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true});
+ termination_cond = ng_inputs.at(1);
}
const int64_t concat_axis = 0;
body_outputs[0] =
ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true});
}
- else
- {
- NGRAPH_WARN
- << "ONNX Loop: No identity or constant termination condition output "
- << "body is not supported in current version\n";
- // TODO: It should be removed after introduction fix to nG Loop
- }
CHECK_VALID_NODE(node,
body_inputs.size() >= loop_carried_dependencies.size() + 2,
ParameterVector body_params(body_inputs.begin() + 2, body_inputs.end());
body_params.emplace(body_params.begin(),
- body_inputs[0]); // termination condition body input
+ body_inputs[0]); // current iteration body input
const auto body = std::make_shared<ngraph::Function>(body_outputs, body_params);
auto loop = std::make_shared<default_opset::Loop>(trip_count, termination_cond);
ngraph::opset5::Loop::SpecialBodyPorts spec_ports{0, 0};
--- /dev/null
+ir_version: 6
+producer_name: "nGraph ONNX Importer"
+graph {
+ name: "basic loop"
+ node {
+ input: "trip_count"
+ input: "cond_in"
+ input: "a_init"
+ output: "a_final"
+ op_type: "Loop"
+ attribute {
+ name: "body"
+ g {
+ node {
+ input: "a_in"
+ input: "b"
+ output: "current_a"
+ name: "loop_body_add"
+ op_type: "Add"
+ }
+ node {
+ input: "i"
+ input: "threshold"
+ output: "cond_out"
+ name: "condition_calc"
+ op_type: "Less"
+ }
+ node {
+ input: "current_a"
+ output: "a_out"
+ name: "output_accumulator"
+ op_type: "Identity"
+ }
+ name: "simple add"
+ initializer {
+ dims: 1
+ dims: 2
+ data_type: 1
+ float_data: 1
+ float_data: 1
+ name: "b"
+ }
+ input {
+ name: "i"
+ type {
+ tensor_type {
+ elem_type: 7
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "cond"
+ type {
+ tensor_type {
+ elem_type: 9
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "a_in"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ output {
+ name: "cond_out"
+ type {
+ tensor_type {
+ elem_type: 9
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ output {
+ name: "current_a"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ }
+ type: GRAPH
+ }
+ }
+ initializer {
+ dims: 1
+ data_type: 7
+ int64_data: 5
+ name: "threshold"
+ }
+ input {
+ name: "trip_count"
+ type {
+ tensor_type {
+ elem_type: 7
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "cond_in"
+ type {
+ tensor_type {
+ elem_type: 9
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "a_init"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ output {
+ name: "a_final"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+}
+opset_import {
+ version: 11
+}
--- /dev/null
+ir_version: 6
+producer_name: "nGraph ONNX Importer"
+graph {
+ name: "basic loop"
+ node {
+ input: "trip_count"
+ input: "cond_in"
+ input: "a_init"
+ output: "a_final"
+ op_type: "Loop"
+ attribute {
+ name: "body"
+ g {
+ node {
+ input: "a_in"
+ input: "b"
+ output: "current_a"
+ name: "loop_body_add"
+ op_type: "Add"
+ }
+ node {
+ input: "i"
+ input: "threshold"
+ output: "cond_out"
+ name: "condition_calc"
+ op_type: "Less"
+ }
+ node {
+ input: "current_a"
+ output: "a_out"
+ name: "output_accumulator"
+ op_type: "Identity"
+ }
+ name: "simple add"
+ initializer {
+ dims: 1
+ dims: 2
+ data_type: 1
+ float_data: 1
+ float_data: 1
+ name: "b"
+ }
+ input {
+ name: "i"
+ type {
+ tensor_type {
+ elem_type: 7
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "cond"
+ type {
+ tensor_type {
+ elem_type: 9
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "a_in"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ output {
+ name: "cond_out"
+ type {
+ tensor_type {
+ elem_type: 9
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ output {
+ name: "current_a"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ }
+ type: GRAPH
+ }
+ }
+ initializer {
+ dims: 1
+ data_type: 7
+ int64_data: 10
+ name: "trip_count"
+ }
+ initializer {
+ dims: 1
+ data_type: 7
+ int64_data: 3
+ name: "threshold"
+ }
+ initializer {
+ dims: 1
+ data_type: 9
+ int32_data: 00000001
+ name: "cond_in"
+ }
+ input {
+ name: "a_init"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ output {
+ name: "a_final"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+}
+opset_import {
+ version: 11
+}
--- /dev/null
+ir_version: 6
+producer_name: "nGraph ONNX Importer"
+graph {
+ name: "basic loop"
+ node {
+ input: ""
+ input: "cond_in"
+ input: "a_init"
+ output: "a_final"
+ op_type: "Loop"
+ attribute {
+ name: "body"
+ g {
+ node {
+ input: "a_in"
+ input: "b"
+ output: "current_a"
+ name: "loop_body_add"
+ op_type: "Add"
+ }
+ node {
+ input: "i"
+ input: "threshold"
+ output: "cond_out"
+ name: "condition_calc"
+ op_type: "Less"
+ }
+ node {
+ input: "current_a"
+ output: "a_out"
+ name: "output_accumulator"
+ op_type: "Identity"
+ }
+ name: "simple add"
+ initializer {
+ dims: 1
+ dims: 2
+ data_type: 1
+ float_data: 1
+ float_data: 1
+ name: "b"
+ }
+ input {
+ name: "i"
+ type {
+ tensor_type {
+ elem_type: 7
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "cond"
+ type {
+ tensor_type {
+ elem_type: 9
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "a_in"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ output {
+ name: "cond_out"
+ type {
+ tensor_type {
+ elem_type: 9
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ output {
+ name: "current_a"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ }
+ type: GRAPH
+ }
+ }
+ initializer {
+ dims: 1
+ data_type: 7
+ int64_data: 5
+ name: "threshold"
+ }
+ input {
+ name: "cond_in"
+ type {
+ tensor_type {
+ elem_type: 9
+ shape {
+ dim {
+ dim_value: 1
+ }
+ }
+ }
+ }
+ }
+ input {
+ name: "a_init"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+ output {
+ name: "a_final"
+ type {
+ tensor_type {
+ elem_type: 1
+ shape {
+ dim {
+ dim_value: 1
+ }
+ dim {
+ dim_value: 2
+ }
+ }
+ }
+ }
+ }
+}
+opset_import {
+ version: 11
+}
const auto function = onnx_import::import_onnx_model(file_util::path_join(
SERIALIZED_ZOO, "onnx/loop/loop_2d_add_no_identity_termination_cond.prototxt"));
+ auto test_case = test::TestCase<TestEngine, TestCaseType::DYNAMIC>(function);
+ // termination condition
+ test_case.add_input<bool>({true});
+ // a_init
+ test_case.add_input<float>({0.f, 0.f});
+
+ test_case.add_expected_output<float>(Shape{1, 2}, {6.f, 6.f});
+ test_case.add_expected_output<float>(
+ Shape{6, 2}, {1.f, 1.f, 2.f, 2.f, 3.f, 3.f, 4.f, 4.f, 5.f, 5.f, 6.f, 6.f});
+ test_case.run();
+}
+
+NGRAPH_TEST(${BACKEND_NAME}, onnx_controlflow_loop_2d_no_identity_termination_cond_static_shapes)
+{
+ const auto function = onnx_import::import_onnx_model(file_util::path_join(
+ SERIALIZED_ZOO,
+ "onnx/loop/loop_2d_add_no_identity_termination_cond_static_shapes.prototxt"));
+
auto test_case = test::TestCase<TestEngine>(function);
// termination condition
test_case.add_input<bool>({true});
test_case.add_input<float>({0.f, 0.f});
test_case.add_expected_output<float>(Shape{1, 2}, {6.f, 6.f});
- test_case.add_expected_output<float>(Shape{5, 2},
- {1.f, 1.f, 2.f, 2.f, 3.f, 3.f, 4.f, 4.f, 5.f, 5.f});
test_case.run();
}
const auto function = onnx_import::import_onnx_model(file_util::path_join(
SERIALIZED_ZOO, "onnx/loop/loop_2d_add_const_no_identity_termination_cond.prototxt"));
+ auto test_case = test::TestCase<TestEngine, TestCaseType::DYNAMIC>(function);
+ // a_init
+ test_case.add_input<float>({0.f, 0.f});
+
+ test_case.add_expected_output<float>(Shape{1, 2}, {4.f, 4.f});
+ test_case.add_expected_output<float>(Shape{4, 2}, {1, 1, 2, 2, 3, 3, 4, 4});
+ test_case.run();
+}
+
+NGRAPH_TEST(${BACKEND_NAME},
+ onnx_controlflow_loop_2d_const_no_identity_termination_cond_static_shapes)
+{
+ const auto function = onnx_import::import_onnx_model(file_util::path_join(
+ SERIALIZED_ZOO,
+ "onnx/loop/loop_2d_add_const_no_identity_termination_cond_static_shapes.prototxt"));
+
auto test_case = test::TestCase<TestEngine>(function);
// a_init
test_case.add_input<float>({0.f, 0.f});
- test_case.add_expected_output<float>(Shape{1, 2}, {3.f, 3.f});
- test_case.add_expected_output<float>(Shape{3, 2}, {1.f, 1.f, 2.f, 2.f, 3.f, 3.f});
+ test_case.add_expected_output<float>(Shape{1, 2}, {4.f, 4.f});
test_case.run();
}
const auto function = onnx_import::import_onnx_model(file_util::path_join(
SERIALIZED_ZOO, "onnx/loop/loop_2d_add_cond_and_trip_count_as_inputs.prototxt"));
+ auto test_case = test::TestCase<TestEngine, TestCaseType::DYNAMIC>(function);
+ // trip count
+ test_case.add_input<int64_t>({10});
+
+ // termination condition
+ test_case.add_input<bool>({true});
+
+ // a_init
+ test_case.add_input<float>({0.f, 0.f});
+
+ test_case.add_expected_output<float>(Shape{1, 2}, {6.f, 6.f});
+ test_case.add_expected_output<float>(
+ Shape{6, 2}, {1.f, 1.f, 2.f, 2.f, 3.f, 3.f, 4.f, 4.f, 5.f, 5.f, 6.f, 6.f});
+ test_case.run();
+}
+
+NGRAPH_TEST(${BACKEND_NAME},
+ onnx_controlflow_loop_2d_both_cond_and_trip_count_as_inputs_static_shapes)
+{
+ const auto function = onnx_import::import_onnx_model(file_util::path_join(
+ SERIALIZED_ZOO,
+ "onnx/loop/loop_2d_add_cond_and_trip_count_as_inputs_static_shapes.prototxt"));
+
auto test_case = test::TestCase<TestEngine>(function);
// trip count
test_case.add_input<int64_t>({10});
// a_init
test_case.add_input<float>({0.f, 0.f});
- test_case.add_expected_output<float>(Shape{1, 2}, {5.f, 5.f});
- test_case.add_expected_output<float>(Shape{5, 2},
- {1.f, 1.f, 2.f, 2.f, 3.f, 3.f, 4.f, 4.f, 5.f, 5.f});
+ test_case.add_expected_output<float>(Shape{1, 2}, {6.f, 6.f});
test_case.run();
}
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/loop/loop_2d_add_trip_count_dynamic.prototxt"));
- auto test_case = test::TestCase<TestEngine>(function);
+ auto test_case = test::TestCase<TestEngine, TestCaseType::DYNAMIC>(function);
// trip count
test_case.add_input<int64_t>({3});
// a_init
const auto function = onnx_import::import_onnx_model(file_util::path_join(
SERIALIZED_ZOO, "onnx/loop/loop_no_variadic_inputs_and_outputs.prototxt"));
- auto test_case = test::TestCase<TestEngine>(function);
+ auto test_case = test::TestCase<TestEngine, TestCaseType::DYNAMIC>(function);
// trip_count
test_case.add_input<int64_t>({1});
// init condition
const auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/loop/loop_pow.prototxt"));
- auto test_case = test::TestCase<TestEngine>(function);
+ auto test_case = test::TestCase<TestEngine, TestCaseType::DYNAMIC>(function);
// trip_count
test_case.add_input<int64_t>({5});
// pow init
std::vector<std::shared_ptr<runtime::Tensor>> wrapped_outputs;
const ResultVector& results = clone->get_results();
- for (auto& result : results)
- {
- NGRAPH_CHECK(result->get_output_partial_shape(0).is_static(),
- "Shape staticization failed for result node ",
- *result);
- }
NGRAPH_CHECK(results.size() == outputs.size());
for (size_t i = 0; i < outputs.size(); i++)
std::dynamic_pointer_cast<runtime::dynamic::DynamicTensor>(outputs[i]))
{
dynamic_tensor->make_storage(results[i]->get_output_element_type(0),
- results[i]->get_output_shape(0));
+ results[i]->get_output_partial_shape(0));
wrapped_outputs.push_back(dynamic_tensor->get_wrapped_tensor());
}
else
}
void runtime::dynamic::DynamicTensor::make_storage(const element::Type& element_type,
- const Shape& shape)
+ const PartialShape& shape)
{
NGRAPH_CHECK(element_type.is_static(), "make_storage requires a static element type");
NGRAPH_CHECK(get_element_type().is_dynamic() || get_element_type() == element_type,
shape,
" which is incompatible with dynamic tensor shape ",
get_partial_shape());
- m_wrapped_tensor = m_wrapped_backend->create_tensor(element_type, shape);
+ if (shape.is_static())
+ {
+ m_wrapped_tensor = m_wrapped_backend->create_tensor(element_type, shape.get_shape());
+ }
+ else
+ {
+ m_wrapped_tensor = m_wrapped_backend->create_dynamic_tensor(element_type, shape);
+ }
}
const std::shared_ptr<ngraph::runtime::Tensor>&
virtual void read(void* p, size_t n) const override;
bool has_storage() const;
void release_storage();
- void make_storage(const element::Type& element_type, const Shape& shape);
+ void make_storage(const element::Type& element_type, const PartialShape& shape);
const std::shared_ptr<ngraph::runtime::Tensor>& get_wrapped_tensor() const;
private:
IE_GPU.onnx_model_fake_quantize_const_inputs_infer
IE_GPU.onnx_model_fake_quantize_nonconst_inputs_infer
-# No Constant/Identity termination condition output body is not supported by current nG Loop
-# is_termination_condition_always_true returns false
+# Not supported dynamic shapes cases for Loop
onnx_controlflow_loop_2d_no_identity_termination_cond
+onnx_controlflow_loop_2d_no_identity_termination_cond_false
onnx_controlflow_loop_2d_const_no_identity_termination_cond
onnx_controlflow_loop_2d_both_cond_and_trip_count_as_inputs
-
-#dynamic trip count
-onnx_controlflow_loop_2d_trip_count_dynamic
+onnx_controlflow_loop_no_variadic_inputs_and_outputs
+onnx_controlflow_loop_power
# Input body shape is changed during Loop iterations
# Exception is throw during Loop shape inference
-# Is it expected?
onnx_controlflow_loop_concat_values
+# dynamic trip count
+onnx_controlflow_loop_2d_trip_count_dynamic
+
# Infinitive Loop is not supported
onnx_controlflow_loop_infinite
-
-# Loop is not supported yet by IE backend
-onnx_controlflow_loop_2d_add
-onnx_controlflow_loop_2d_no_identity_termination_cond_false
-onnx_controlflow_loop_add_initializer_from_parent_scope
-onnx_controlflow_loop_add_node_from_parent_scope
-onnx_controlflow_loop_add_value_the_same_node_from_parent_and_subgraph
-onnx_controlflow_loop_scalars
-onnx_controlflow_loop_2d_add_const_cond
-onnx_controlflow_loop_no_variadic_inputs_and_outputs
-onnx_controlflow_loop_power
# LogSoftmax's reference implementation doesn't handle scalar input properly
onnx_model_logsoftmax_0D
-# No Constant/Identity termination condition output body is not supported by current nG Loop
-# is_termination_condition_always_true returns false
-onnx_controlflow_loop_2d_no_identity_termination_cond
-onnx_controlflow_loop_2d_const_no_identity_termination_cond
-onnx_controlflow_loop_2d_both_cond_and_trip_count_as_inputs
-
# Input body shape is changed during Loop iterations
# Exception is throw during Loop shape inference
-# Is it expected?
onnx_controlflow_loop_concat_values
# Infinitive Loop is not supported
onnx_controlflow_loop_infinite
-# Dynamic shape support?
+# Dynamic shape support
onnx_controlflow_loop_2d_trip_count_dynamic
onnx_controlflow_loop_no_variadic_inputs_and_outputs
onnx_controlflow_loop_power
const auto& function_output =
m_function->get_results()[m_allocated_expected_outputs];
- network_out_name = function_output->get_friendly_name();
+ // determine output name in IE convention
+ // (based on name of node which produces the result)
+ const auto& prev_layer = function_output->input_value(0);
+ network_out_name = prev_layer.get_node_shared_ptr()->get_friendly_name();
+ if (prev_layer.get_node_shared_ptr()->get_output_size() != 1)
+ {
+ network_out_name += "." + std::to_string(prev_layer.get_index());
+ }
NGRAPH_CHECK(
m_network_outputs.count(network_out_name) == 1,
"nGraph function's output number ",
m_allocated_expected_outputs,
" was not found in the CNNNetwork built from it. Function's output name: ",
- function_output->get_friendly_name());
+ network_out_name);
- network_output = m_network_outputs[function_output->get_friendly_name()];
+ network_output = m_network_outputs[network_out_name];
}
auto blob =