[IE VPU] Evaluate DSR (#770)
authorMaksim Doronin <maksim.doronin@intel.com>
Fri, 19 Jun 2020 10:22:31 +0000 (13:22 +0300)
committerGitHub <noreply@github.com>
Fri, 19 Jun 2020 10:22:31 +0000 (13:22 +0300)
* [IE VPU] Add evaluate method to DSR

* [IE VPU] Enable DSR_Reshape tests

* [IE VPU] Improvements in DSR op

* [IE VPU] Fix typo in copyBlobAccordingUpperBound

* [IE VPU] Support dynamic inputs

* [IE VPU] Use dynamic inputs in tests

* [IE VPU] Improve conditions in propogateDynamism pass

* [IE VPU] Fix Myriad2 tests via dosabling reorder

* [IE VPU] make error message more explicit

* [IE VPU] Fix Win compilation: std::stoi in <string>

* [IE VPU] Improve data transferring to work with ND tensors

* [IE VPU] Avoid ODR in myriad common test utils

* [IE VPU] Split code in propagate dynamism into separate methods

* [IE VPU] Simplify conditions in DSR parsing

* [IE VPU] Emplace data in initialStages when remove stage order

17 files changed:
inference-engine/src/vpu/common/include/vpu/ngraph/operations/dynamic_shape_resolver.hpp
inference-engine/src/vpu/common/src/ngraph/operations/dynamic_shape_resolver.cpp
inference-engine/src/vpu/graph_transformer/include/vpu/middleend/pass_manager.hpp
inference-engine/src/vpu/graph_transformer/src/backend/serialize.cpp
inference-engine/src/vpu/graph_transformer/src/middleend/allocator/allocator.cpp
inference-engine/src/vpu/graph_transformer/src/middleend/pass_manager.cpp
inference-engine/src/vpu/graph_transformer/src/middleend/passes/allocate_resources.cpp
inference-engine/src/vpu/graph_transformer/src/middleend/passes/convert_shape_notation.cpp
inference-engine/src/vpu/graph_transformer/src/middleend/passes/propagate_dynamism.cpp [new file with mode: 0644]
inference-engine/src/vpu/graph_transformer/src/middleend/passes/propagate_dynamism_to_outputs.cpp [deleted file]
inference-engine/src/vpu/graph_transformer/src/model/model.cpp
inference-engine/src/vpu/graph_transformer/src/stages/dynamic_shape_resolver.cpp
inference-engine/src/vpu/myriad_plugin/myriad_infer_request.cpp
inference-engine/tests/functional/plugin/myriad/common/myriad_common_test_utils.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/myriad/common/myriad_common_test_utils.hpp
inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_reshape.cpp
inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_tests_common.hpp [new file with mode: 0644]

index e34a0e6..57ace6a 100644 (file)
@@ -5,24 +5,40 @@
 #pragma once
 
 #include "ngraph/op/op.hpp"
+#include "ngraph/runtime/host_tensor.hpp"
 
 #include <memory>
 
 namespace ngraph { namespace vpu { namespace op {
 
+enum class DynamicShapeResolverMode {
+    INFER_UPPER_BOUND_SHAPE,
+    INFER_DYNAMIC_SHAPE
+};
+
 class DynamicShapeResolver : public ngraph::op::Op {
 public:
     static constexpr NodeTypeInfo type_info{"DynamicShapeResolver", 0};
 
     const NodeTypeInfo& get_type_info() const override { return type_info; }
 
-    DynamicShapeResolver(const Output<Node>& tensorWithData, const Output<Node>& tensorWithDims);
+    DynamicShapeResolver(const Output<Node>& tensorWithData,
+                         const Output<Node>& tensorWithDims,
+                         const DynamicShapeResolverMode& mode = DynamicShapeResolverMode::INFER_UPPER_BOUND_SHAPE);
 
     void validate_and_infer_types() override;
 
     std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override;
 
     bool visit_attributes(ngraph::AttributeVisitor& visitor) override;
+
+    bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) override;
+
+    void setMode(DynamicShapeResolverMode mode) { m_mode = mode; }
+    DynamicShapeResolverMode getMode() { return m_mode; }
+
+private:
+    DynamicShapeResolverMode m_mode;
 };
 
 }  // namespace op
index 1a29f8e..061a6b0 100644 (file)
@@ -4,23 +4,27 @@
 
 #include "vpu/ngraph/operations/dynamic_shape_resolver.hpp"
 
+#include "ngraph/opsets/opset3.hpp"
+
 namespace ngraph { namespace vpu { namespace op {
 
 constexpr NodeTypeInfo DynamicShapeResolver::type_info;
 
-DynamicShapeResolver::DynamicShapeResolver(const Output<Node>& tensorWithData, const Output<Node>& tensorWithDims)
-    : Op(OutputVector{tensorWithData, tensorWithDims}) {
+DynamicShapeResolver::DynamicShapeResolver(
+        const Output<Node>& tensorWithData,
+        const Output<Node>& tensorWithDims,
+        const DynamicShapeResolverMode& mode)
+    : Op(OutputVector{tensorWithData, tensorWithDims}), m_mode(mode) {
     constructor_validate_and_infer_types();
 }
 
 std::shared_ptr<Node> DynamicShapeResolver::copy_with_new_args(const NodeVector& new_args) const {
     check_new_args_count(this, new_args);
-    return std::make_shared<DynamicShapeResolver>(new_args.at(0), new_args.at(1));
+    return std::make_shared<DynamicShapeResolver>(new_args.at(0), new_args.at(1), m_mode);
 }
 
 void DynamicShapeResolver::validate_and_infer_types() {
     NODE_VALIDATION_CHECK(this, get_input_size() == 2, "(", get_friendly_name(), ") supports only ", 2, " inputs, but ", get_input_size(), " provided");
-    NODE_VALIDATION_CHECK(this, get_input_partial_shape(0).is_static(), "(", get_friendly_name(), ") does not support dynamic shape for data tensor");
     NODE_VALIDATION_CHECK(this, get_input_partial_shape(1).is_static(), "(", get_friendly_name(), ") does not support dynamic shape for dims tensor");
 
     const auto& dataElementType = get_input_element_type(0);
@@ -30,18 +34,166 @@ void DynamicShapeResolver::validate_and_infer_types() {
                                                                 dimsElementType.compatible(ngraph::element::i32)),
         "(", get_friendly_name(), ") supports only i64 and i32 number type for dims tensor, but ", dimsElementType, " provided");
 
-    const auto& dataShape = get_input_shape(0);
     const auto& dimsShape = get_input_shape(1);
-    NODE_VALIDATION_CHECK(this, dimsShape.size() == 1 && dimsShape.front() == dataShape.size(), "(", get_friendly_name(), ") inputs shapes mismatch: first "
-        "input shape = ", dataShape, " second input shape = ", dimsShape, " but ", dataShape, " and ", Shape{dataShape.size()}, " are expected");
 
-    set_output_type(0, dataElementType, dataShape);
+    if (m_mode == DynamicShapeResolverMode::INFER_UPPER_BOUND_SHAPE) {
+        NODE_VALIDATION_CHECK(this, get_input_partial_shape(0).is_static(), "(", get_friendly_name(), ") does not support dynamic shape for data tensor");
+
+        const auto& dataShape = get_input_shape(0);
+        NODE_VALIDATION_CHECK(this, dimsShape.size() == 1 && dimsShape.front() == dataShape.size(), "(", get_friendly_name(), ") inputs shapes mismatch: first "
+            "input shape = ", dataShape, " second input shape = ", dimsShape, " but ", dataShape, " and ", Shape{dataShape.size()}, " are expected");
+
+        set_output_type(0, dataElementType, dataShape);
+    } else if (m_mode == DynamicShapeResolverMode::INFER_DYNAMIC_SHAPE) {
+        NODE_VALIDATION_CHECK(this, get_input_partial_shape(0).rank() == dimsShape.front(),
+                "(", get_friendly_name(), ") data and shape ranks must be equal, provided: ",
+                get_input_partial_shape(0).rank(), " vs ", dimsShape.front());
+
+        set_output_type(0, dataElementType,
+                        ngraph::PartialShape::dynamic(get_input_partial_shape(0).rank()));
+    } else {
+        NGRAPH_UNREACHABLE(this, "Unknown DynamicShapeResolverMode value, expected one of: INFER_UPPER_BOUND_SHAPE, INFER_DYNAMIC_SHAPE");
+    }
 }
 
 bool DynamicShapeResolver::visit_attributes(ngraph::AttributeVisitor&) {
     return true;
 }
 
+namespace {
+
+template<element::Type_t ET>
+bool getShapeFromHostTensorData(const HostTensorPtr& data, Shape& result) {
+    using T = typename element_type_traits<ET>::value_type;
+    T *dataPtr = data->get_data_ptr<ET>();
+    if (!dataPtr) {
+        return false;
+    }
+    if (data->get_shape().size() != 1) {
+        return false;
+    }
+    size_t outputRank = data->get_shape()[0];
+
+    for (int i = 0; i < outputRank; i++) {
+        result.push_back(dataPtr[i]);
+    }
+
+    return true;
+}
+
+bool getShapeFromHostTensorData(const HostTensorPtr& data, Shape& shape) {
+    switch (data->get_element_type()) {
+        case element::Type_t::i8:
+            return getShapeFromHostTensorData<element::Type_t::i8>(data, shape);
+        case element::Type_t::i16:
+            return getShapeFromHostTensorData<element::Type_t::i16>(data, shape);
+        case element::Type_t::i32:
+            return getShapeFromHostTensorData<element::Type_t::i32>(data, shape);
+        case element::Type_t::i64:
+            return getShapeFromHostTensorData<element::Type_t::i64>(data, shape);
+        case element::Type_t::u8:
+            return getShapeFromHostTensorData<element::Type_t::u8>(data, shape);
+        case element::Type_t::u16:
+            return getShapeFromHostTensorData<element::Type_t::u16>(data, shape);
+        case element::Type_t::u32:
+            return getShapeFromHostTensorData<element::Type_t::u32>(data, shape);
+        case element::Type_t::u64:
+            return getShapeFromHostTensorData<element::Type_t::u64>(data, shape);
+        default:
+            return false;
+    }
+    return true;
+}
+
+template<element::Type_t DataType>
+bool evaluate(const HostTensorPtr& inputTensor,
+              const HostTensorPtr& inputShapeTensor,
+              const HostTensorPtr& outputTensor) {
+    Shape inputShape = inputTensor->get_shape();
+    Shape outputShape;
+    if (!getShapeFromHostTensorData(inputShapeTensor, outputShape)) {
+        return false;
+    }
+
+    if (!ngraph::PartialShape(outputShape).refines(outputTensor->get_partial_shape())) {
+        return false;
+    }
+
+    outputTensor->set_shape(outputShape);
+
+    using T = typename element_type_traits<DataType>::value_type;
+    T *inputPtr = inputTensor->get_data_ptr<DataType>();
+    T *outputPtr = outputTensor->get_data_ptr<DataType>();
+
+    const auto inTotalDimSize = shape_size(inputShape);
+    const auto stridesByElements = row_major_strides(inputShape);
+
+    const auto inLineSize = inputShape[inputShape.size() - 1];
+    const auto outLineSize = outputShape[outputShape.size() - 1];
+
+    for (size_t inElementOffset = 0, outElementOffset = 0; inElementOffset < inTotalDimSize; inElementOffset += inLineSize) {
+        auto offset = inElementOffset;
+        bool isGarbageLine = false;
+        for (size_t dim = 0; dim < stridesByElements.size() - 1; ++dim) {
+            const auto coordAlongDim = offset / stridesByElements[dim];
+            if (coordAlongDim > outputShape[dim] - 1) {
+                isGarbageLine = true;
+                break;
+            }
+
+            offset %= stridesByElements[dim];
+        }
+        if (!isGarbageLine) {
+            std::copy_n(inputPtr + inElementOffset, outLineSize, outputPtr + outElementOffset);
+            outElementOffset += outLineSize;
+        }
+    }
+    return true;
+}
+
+bool evaluateDynamicShapeResolver(const HostTensorPtr& inputTensor,
+                                  const HostTensorPtr& inputShapeTensor,
+                                  const HostTensorPtr& outputTensor) {
+    bool rc = true;
+
+    switch (inputTensor->get_element_type()) {
+        TYPE_CASE(i8)(inputTensor, inputShapeTensor, outputTensor);
+            break;
+        TYPE_CASE(i16)(inputTensor, inputShapeTensor, outputTensor);
+            break;
+        TYPE_CASE(i32)(inputTensor, inputShapeTensor, outputTensor);
+            break;
+        TYPE_CASE(i64)(inputTensor, inputShapeTensor, outputTensor);
+            break;
+        TYPE_CASE(u8)(inputTensor, inputShapeTensor, outputTensor);
+            break;
+        TYPE_CASE(u16)(inputTensor, inputShapeTensor, outputTensor);
+            break;
+        TYPE_CASE(u32)(inputTensor, inputShapeTensor, outputTensor);
+            break;
+        TYPE_CASE(u64)(inputTensor, inputShapeTensor, outputTensor);
+            break;
+        TYPE_CASE(bf16)(inputTensor, inputShapeTensor, outputTensor);
+            break;
+        TYPE_CASE(f32)(inputTensor, inputShapeTensor, outputTensor);
+            break;
+        TYPE_CASE(f64)(inputTensor, inputShapeTensor, outputTensor);
+            break;
+        default:
+            rc = false;
+            break;
+    }
+
+    return rc;
+}
+
+}  // namespace
+
+bool DynamicShapeResolver::evaluate(const HostTensorVector& outputs,
+                                    const HostTensorVector& inputs) {
+    return evaluateDynamicShapeResolver(inputs[0], inputs[1], outputs[0]);
+}
+
 }  // namespace op
 }  // namespace vpu
 }  // namespace ngraph
index b8819a4..7f7f39e 100644 (file)
@@ -243,7 +243,7 @@ public:
 
     Pass::Ptr replaceGemmByConv();
 
-    Pass::Ptr propagateDynamismToOutputs();
+    Pass::Ptr propagateDynamism();
 
 protected:
     StageBuilder::Ptr _stageBuilder;
index c533a14..9441a38 100644 (file)
@@ -149,14 +149,14 @@ void BackEnd::serializeConstShapes(const Model& model, const mv_blob_header& blo
 
         if (shapeLocation.dimsLocation == Location::Blob) {
             serializeToBlob(data->desc().dims(), shapeLocation.dimsOffset);
-        } else if (data->usage() == DataUsage::Output) {
+        } else if (data->usage() == DataUsage::Output || data->usage() == DataUsage::Input) {
             auto ioDimsUpperBoundOffset = data->attrs().get<int>("ioDimsUpperBoundOffset");
             serializeToBlob(data->desc().dims(), ioDimsUpperBoundOffset);
         }
 
         if (shapeLocation.stridesLocation == Location::Blob) {
             serializeToBlob(data->strides(), shapeLocation.stridesOffset);
-        } else if (data->usage() == DataUsage::Output) {
+        } else if (data->usage() == DataUsage::Output || data->usage() == DataUsage::Input) {
             auto ioStridesUpperBoundOffset = data->attrs().get<int>("ioStridesUpperBoundOffset");
             serializeToBlob(data->strides(), ioStridesUpperBoundOffset);
         }
index 92663b9..6084400 100644 (file)
@@ -308,7 +308,7 @@ ShapeLocation Allocator::allocateShape(Data& data) {
         shapeLocation.dimsLocation = dataLocation.location;
         shapeLocation.dimsOffset = dataLocation.offset;
 
-        if (data->usage() == DataUsage::Output) {
+        if (data->usage() == DataUsage::Output || data->usage() == DataUsage::Input) {
             // We need to allocate memory for maximum dims values also
             data->attrs().set<int>("ioDimsUpperBoundOffset", _blobMemOffset);
             _blobMemOffset += dimsByteSize;
index b18038a..3d05bbc 100644 (file)
@@ -111,13 +111,6 @@ PassSet::Ptr PassManager::buildMiddleEnd() {
     ADD_PASS(addCopyForOutputsInsideNetwork);
     ADD_DUMP_PASS("addCopyForOutputsInsideNetwork");
 
-    // MyriadInferRequest::GetResult expects output shape data object
-    // to be in IE notation in case of dynamic data object
-    // propagateDynamismToOutputs must be applied after convertShapeNotation
-    // and addCopyForOutputsInsideNetwork to mark shape in IE notation, not MDK notation as output
-    ADD_PASS(propagateDynamismToOutputs);
-    ADD_DUMP_PASS("propagateDynamismToOutputs");
-
     ADD_PASS(initialCheck);
 
     //
@@ -295,6 +288,23 @@ PassSet::Ptr PassManager::buildMiddleEnd() {
     ADD_DUMP_PASS("processSpecialStages");
 
     //
+    // Propagation dynamism from input to output and from output to input
+    // for inserted stages at frontend and middleend.
+    //
+
+    // propagateDynamism must be applied after convertShapeNotation
+    // and addCopyForOutputsInsideNetwork to mark shape in IE notation, not MDK notation as output
+    // and it is processed after all passes include specialStageProcessor to
+    // propagate dynamism for copy stages which are added in passes above.
+    // Also it is needed allocateResources after propagation to connect datas with shapes
+
+    // In cases of dynamic network output MyriadInferRequest::GetResult expects output shape data
+    // object to be in IE notation in case of dynamic data object.
+
+    ADD_PASS(propagateDynamism);
+    ADD_DUMP_PASS("propagateDynamism");
+
+    //
     // Data location adjustment
     //
 
index 81941de..239b702 100644 (file)
@@ -189,7 +189,7 @@ AllocationResult runAllocator(const Model& model, bool onlyCheckCMX) {
     //
 
     for (auto data : model->datas()) {
-        if (data->usage() != DataUsage::Output) {
+        if (data->usage() != DataUsage::Output && data->usage() != DataUsage::Input) {
             continue;
         }
 
index 4f1c4c5..212ffca 100644 (file)
@@ -66,11 +66,18 @@ void PassImpl::run(const Model& model) {
         }
 
         // In case if data and shape had the same producer
-        // Topological order (nextStages/previousStages) needs to be updated
+        // Topological order (nextStages/previousStages) needs to be updated.
+        // Also it is needed if data is the network Input data.
         for (const auto& dataToShapeEdge : convertedShape->childDataToShapeEdges()) {
             const auto& child = dataToShapeEdge->child();
 
-            if (!child->producer() || child->producer() != shape->producer()) {
+            const auto& childProducer = child->producer();
+            if (!childProducer) {
+                VPU_THROW_UNLESS(child->usage() == DataUsage::Input,
+                        "ConvertShapeNotation pass for shape of name {} failed: if child data of name {} "
+                        "has no producer than it must have Input data usage, actual: {}",
+                        shape->name(), child->name(), child->usage());
+            } else if (child->producer() != shape->producer()) {
                 continue;
             }
 
diff --git a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/propagate_dynamism.cpp b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/propagate_dynamism.cpp
new file mode 100644 (file)
index 0000000..5038553
--- /dev/null
@@ -0,0 +1,174 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "vpu/middleend/pass_manager.hpp"
+
+#include <set>
+#include <memory>
+
+namespace vpu {
+
+namespace {
+
+static const std::set<StageType> stagesSupportedInToOutPropagation = {
+        StageType::Convert,
+        StageType::Copy,
+        StageType::Power,
+        StageType::Prod,
+};
+
+static const std::set<StageType> stagesSupportedOutToInPropagation = {
+        StageType::Convert,
+};
+
+class PassImpl final : public Pass {
+public:
+    explicit PassImpl(StageBuilder::Ptr stageBuilder) : _stageBuilder(std::move(stageBuilder)) {}
+
+    static void validateShapeConversion(const Stage& stage, const Data& shape) {
+        const auto& shapeAttrs = shape->attrs();
+        VPU_THROW_UNLESS(shapeAttrs.getOrDefault("converted-notation", false),
+                "Validation shape conversion while propagation dynamism for stage {} of name {} failed: All shape parent "
+                "data object with name {} must be already converted to MDK notation", stage->type(), stage->name(), shape->name());
+
+        const auto& shapeProducer = shape->producer();
+        const auto& shapeInIENotation = shapeProducer->input(0);
+        VPU_THROW_UNLESS(shapeInIENotation->usage() == DataUsage::Intermediate || shapeInIENotation->usage() == DataUsage::Input,
+                "Validation shape conversion while propagation dynamism for stage {} of name {} failed: Shape parent data object (which is "
+                "the input with index 0 for shape producer of type {} and name {}) with name {} is expected to be an intermediate or input "
+                "data object since shape child is not an output, actual {}",
+                stage->type(), stage->name(), shapeProducer->type(), shapeProducer->name(),
+                shapeInIENotation->name(), shapeInIENotation->usage());
+
+        const auto& shapeInIENotationAttrs = shapeInIENotation->attrs();
+        VPU_THROW_UNLESS(shapeInIENotationAttrs.getOrDefault("IE-notation", false),
+                "Validation shape conversion while propagation dynamism for stage {} of name {} failed: Unexpected data object (which is "
+                "the input with index 0 for shape producer of type {} and name {}) as shape with name {} in IE notation",
+                stage->type(), stage->name(),  shapeProducer->type(), shapeProducer->name(), shapeInIENotation->name());
+    }
+
+    static void validateShapes(const Stage& stage, const Data& input, const Data& output) {
+        // Propagation is only supported for input and output with equal upper-bound shapes.
+        // For example, Prod stage with dynamic input and broadcast are not supported.
+        for (const auto& dim : input->desc().dims()) {
+            VPU_THROW_UNLESS(dim.second == output->desc().dim(dim.first),
+                    "PropagateDynamism: {} stage of name {} must have input of name {} with upper-bound dimension {} "
+                    "which should be equal to output which is {}, actual: {}",
+                    stage->type(), stage->name(), input->name(), dim.first, output->desc().dim(dim.first), dim.second);
+        }
+    }
+
+    void propagateFromInputToOutput(
+            const Model& model, const Stage& stage,
+            const Data& input, const DataToShapeAllocation& parentInputShapeEdge, const Data& output) {
+        validateShapes(stage, input, output);
+
+        const auto shape = parentInputShapeEdge->parent();
+        validateShapeConversion(stage, shape);
+
+        model->connectDataWithShape(shape, output);
+
+        if (output->usage() == DataUsage::Output) {
+            // MyriadInferRequest::GetResult assumes that dynamic data object has shape data object
+            // with the same name + suffix "@shape"
+            const auto shapeName = output->name() + "@shape";
+            const auto& shapeOutput = model->addOutputData(shapeName, shape->desc());
+
+            const auto& shapeProducer = shape->producer();
+            const auto& shapeInIENotation = shapeProducer->input(0);
+
+            _stageBuilder->addCopyStage(
+                    model,
+                    "copy-for-dynamic-output",
+                    nullptr,
+                    shapeInIENotation,
+                    shapeOutput,
+                    "PropagateDynamismToOutput");
+        }
+    }
+
+    static void propagateFromOutputToInput(
+            const Model& model, const Stage& stage,
+            const Data& input, const DataToShapeAllocation& parentOutputShapeEdge, const Data& output) {
+        validateShapes(stage, input, output);
+
+        const auto shape = parentOutputShapeEdge->parent();
+        validateShapeConversion(stage, shape);
+
+        VPU_THROW_UNLESS(input->usage() == DataUsage::Input,
+                "PropagateDynamism for stage {} of type {} failed: propagate output dynamism to "
+                "input with name {} is available for data with only Input data usage, actual: {}",
+                stage->name(), stage->type(), input->name(), shape->usage());
+
+        model->connectDataWithShape(shape, input);
+    }
+
+    void run(const Model& model) override {
+        VPU_PROFILE(propagateDynamism);
+
+        for (const auto& stage : model->getStages()) {
+            if (stagesSupportedInToOutPropagation.count(stage->type())) {
+                VPU_THROW_UNLESS(stage->numOutputs() == 1,
+                        "PropagateDynamism from input data to output: only single output stages are supported, but {} stage "
+                        "of name {} has {} outputs", stage->type(), stage->name(), stage->numOutputs());
+
+                const auto& inputs = stage->inputs();
+                std::vector<DataToShapeAllocation> parentInputShapeEdges;
+
+                for (const auto& input : inputs) {
+                    if (const auto parentDataToShapeEdge = input->parentDataToShapeEdge()) {
+                        parentInputShapeEdges.push_back(parentDataToShapeEdge);
+                    }
+                }
+
+                const auto& output = stage->output(0);
+                const auto& parentOutputShapeEdge = output->parentDataToShapeEdge();
+
+                const auto needPropagateFromInputToOutput = !parentInputShapeEdges.empty() && !parentOutputShapeEdge;
+
+                if (needPropagateFromInputToOutput) {
+                    VPU_THROW_UNLESS(parentInputShapeEdges.size() == 1,
+                            "PropagateDynamism from input data to output for stage {} of name {} failed: propagation dynamism "
+                            "from multiple inputs is not supported, actual number of dynamic inputs: {}",
+                            stage->type(), stage->name(), parentInputShapeEdges.size());
+                    const auto& parentInputShapeEdge = parentInputShapeEdges[0];
+                    const auto& input = parentInputShapeEdge->child();
+
+                    propagateFromInputToOutput(model, stage, input, parentInputShapeEdge, output);
+                }
+            }
+
+            if (stagesSupportedOutToInPropagation.count(stage->type())) {
+                VPU_THROW_UNLESS(stage->numInputs() == 1,
+                        "PropagateDynamism from output data to input: only single input stages are supported, but {} stage "
+                        "of name {} has {} inputs", stage->type(), stage->name(), stage->numInputs());
+                VPU_THROW_UNLESS(stage->numOutputs() == 1,
+                        "PropagateDynamism from output data to input: only single output stages are supported, but {} stage "
+                        "of name {} has {} outputs", stage->type(), stage->name(), stage->numOutputs());
+                const auto& input = stage->input(0);
+                const auto& output = stage->output(0);
+
+                const auto& parentInputShapeEdge = input->parentDataToShapeEdge();
+                const auto& parentOutputShapeEdge = output->parentDataToShapeEdge();
+
+                const auto needPropagateFromOutputToInput = !parentInputShapeEdge && parentOutputShapeEdge;
+
+                if (needPropagateFromOutputToInput) {
+                    propagateFromOutputToInput(model, stage, input, parentOutputShapeEdge, output);
+                }
+            }
+        }
+    }
+
+private:
+    StageBuilder::Ptr _stageBuilder;
+};
+
+}  // namespace
+
+Pass::Ptr PassManager::propagateDynamism() {
+    return std::make_shared<PassImpl>(_stageBuilder);
+}
+
+}  // namespace vpu
diff --git a/inference-engine/src/vpu/graph_transformer/src/middleend/passes/propagate_dynamism_to_outputs.cpp b/inference-engine/src/vpu/graph_transformer/src/middleend/passes/propagate_dynamism_to_outputs.cpp
deleted file mode 100644 (file)
index 1110fab..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (C) 2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include "vpu/middleend/pass_manager.hpp"
-
-#include <set>
-#include <memory>
-
-namespace vpu {
-
-namespace {
-
-class PassImpl final : public Pass {
-public:
-    explicit PassImpl(StageBuilder::Ptr stageBuilder) : _stageBuilder(std::move(stageBuilder)) {}
-
-    void run(const Model& model) override {
-        for (const auto& data : model->datas()) {
-            if (data->usage() != DataUsage::Output || data->parentDataToShapeEdge() != nullptr) {
-                continue;
-            }
-
-            const auto& producer = data->producer();
-            VPU_THROW_UNLESS(producer, "Output data must have a producer, but {} doesn't have", data->name());
-
-            if (producer->type() != StageType::Convert) {
-                continue;
-            }
-
-            VPU_THROW_UNLESS(producer->numInputs() == 1,
-                "Only single input producers are supported, but {} has {} inputs",
-                producer->name(), producer->numInputs());
-
-            const auto& input = producer->input(0);
-            const auto& parentDataToShapeEdge = input->parentDataToShapeEdge();
-            if (parentDataToShapeEdge == nullptr) {
-                continue;
-            }
-            const auto parent = parentDataToShapeEdge->parent();
-
-            const auto& parentAttrs = parent->attrs();
-            VPU_THROW_UNLESS(parentAttrs.getOrDefault("converted-notation", false),
-                "All shape parent data object must be already converted to MDK notation, but {} is in IE notation",
-                parent->name());
-
-            const auto& parentInIENotation = parent->producer()->input(0);
-            const auto& parentInIENotationAttrs = parentInIENotation->attrs();
-            VPU_THROW_UNLESS(parentInIENotationAttrs.getOrDefault("IE-notation", false),
-                 "Data object {} is expected to be shape in IE notation, but is not marked as it",
-                 parentInIENotation->name());
-
-            VPU_THROW_UNLESS(parentInIENotation->usage() == DataUsage::Intermediate,
-                "Shape data object in IE notation {} is expected to be an {} data object, but it has usage {}",
-                parentInIENotation->name(), DataUsage::Intermediate, parentInIENotation->usage());
-
-            model->connectDataWithShape(parent, data);
-
-            // MyriadInferRequest::GetResult assumes that dynamic data object has shape data object
-            // with the same name + suffix "@shape"
-            const auto shapeName = data->name() + "@shape";
-            const auto& shapeOutput = model->addOutputData(shapeName, parentInIENotation->desc());
-
-            _stageBuilder->addCopyStage(
-                model,
-                "copy-for-dynamic-output",
-                nullptr,
-                parentInIENotation,
-                shapeOutput,
-                "PropagateDynamismToOutput");
-        }
-    }
-
-private:
-    StageBuilder::Ptr _stageBuilder;
-};
-
-}  // namespace
-
-Pass::Ptr PassManager::propagateDynamismToOutputs() {
-    return std::make_shared<PassImpl>(_stageBuilder);
-}
-
-}  // namespace vpu
index db6198a..b3da759 100644 (file)
@@ -381,8 +381,6 @@ StageOutput ModelObj::addStageOutput(
         IE_ASSERT(stage->_parentStageEdge == nullptr);
         IE_ASSERT(consumerEdge->_consumer->_parentStageEdge == nullptr);
         setStagesOrder(stage, consumerEdge->consumer());
-
-        _initialStages.erase(consumerEdge->_consumer);
     }
 
     return edge;
@@ -528,8 +526,6 @@ void ModelObj::replaceStageInput(
         IE_ASSERT(edge->_consumer->_parentStageEdge == nullptr);
         IE_ASSERT(newInput->_producerEdge->_producer->_parentStageEdge == nullptr);
         setStagesOrder(newInput->producerEdge()->producer(), edge->consumer());
-
-        _initialStages.erase(edge->_consumer);
     }
 
     if (edge->_consumer->_prevStages.empty()) {
@@ -628,8 +624,6 @@ void ModelObj::replaceStageOutput(
         IE_ASSERT(edge->_producer->_parentStageEdge == nullptr);
         IE_ASSERT(consumerEdge->_consumer->_parentStageEdge == nullptr);
         setStagesOrder(edge->producer(), consumerEdge->consumer());
-
-        _initialStages.erase(consumerEdge->_consumer);
     }
 }
 
@@ -1918,8 +1912,10 @@ void ModelObj::cleanUp() {
 
     for (const auto& data : datas()) {
         if (data->_usage == DataUsage::Input) {
-            VPU_THROW_UNLESS(!data->_consumerEdges.empty(),
-                "Input data {} must have at least one consumers, but got zero.", data->name());
+            if (data->childDataToShapeEdges().empty()) {
+                VPU_THROW_UNLESS(!data->_consumerEdges.empty(),
+                    "Input data {} must have at least one consumers, but got zero.", data->name());
+            }
             IE_ASSERT(data->_parentDataToDataEdge == nullptr);
         } else if (data->_usage == DataUsage::Output) {
             IE_ASSERT(data->_producerEdge != nullptr);
@@ -1998,6 +1994,7 @@ void ModelObj::reorderStages(
 void ModelObj::setStagesOrder(const Stage& parent, const Stage& child) {
     ++parent->_nextStages[child];
     ++child->_prevStages[parent];
+    _initialStages.erase(child);
 }
 
 void ModelObj::removeStagesOrder(const Stage& parent, const Stage& child) {
@@ -2018,6 +2015,9 @@ void ModelObj::removeStagesOrder(const Stage& parent, const Stage& child) {
     if (childPrevStage->second <= 0) {
         child->_prevStages.erase(childPrevStage);
     }
+    if (child->_prevStages.empty()) {
+        _initialStages.emplace(child);
+    }
 }
 
 void ModelObj::runDFS(
index a30c457..e48f80c 100644 (file)
@@ -15,11 +15,7 @@ void FrontEnd::parseDSR(const Model& model, const ie::CNNLayerPtr& layer, const
 
     VPU_THROW_UNLESS(outputs.size() == 1, "Parsing layer {} of type {} failed: got {} outputs, while {} were expected",
          layer->name, layer->type, outputs.size(), 1);
-    const auto& dataOutput = outputs[0];
-
-    const auto dataProducerEdge = data->producerEdge();
-    VPU_THROW_UNLESS(dataProducerEdge != nullptr, "Parsing layer {} of type {} failed: input with index {} (of name {}) must have a producer",
-        layer->name, layer->type, 0, data->name());
+    auto dataOutput = outputs[0];
 
     const auto ngraphNode = layer->getNode();
     VPU_THROW_UNLESS(!ngraphNode || ngraphNode->get_input_source_output(0).get_target_inputs().size() == 1,
@@ -46,25 +42,55 @@ void FrontEnd::parseDSR(const Model& model, const ie::CNNLayerPtr& layer, const
         "input with index {} (of name {}), actual {} and {} respectively",
         layer->name, layer->type, 0, shape->name(), 1, data->name(), shape->desc().totalDimSize(), data->desc().numDims());
 
+    const auto dataProducerEdge = data->producerEdge();
     const auto shapeProducerEdge = shape->producerEdge();
-    VPU_THROW_UNLESS(shapeProducerEdge != nullptr, "Parsing layer {} of type {} failed: input with index {} (of name {}) must have a producer",
-        layer->name, layer->type, 1, shape->name());
-
-    if (auto dataToShapeEdge = data->parentDataToShapeEdge()) {
-        const auto& parent = dataToShapeEdge->parent();
-        VPU_THROW_UNLESS(parent == shape, "Myriad plugin encountered layer of type \"{}\" and name \"{}\" with input #{} (data input with name \"{}\") that "
-            "already has parent in terms of data to shape connection. The parent is expected to be input #{} (shape input with name \"{}\") of the layer, so "
-            "it's a \"{}\" with already connected inputs, but actual parent is other data object with name \"{}\". The case of connected inputs is considered "
-            "as \"{}\" that goes directly to \"{}\" as a result of some optimization (operation between them has been optimized out). Other cases, when some "
-            "input already has a connection, but with other data object are prohibited.",
-            layer->type, layer->name, 0, data->name(), 1, shape->name(), layer->type, parent->name(), layer->type, layer->type);
-        model->disconnectDatas(dataToShapeEdge);
+
+    if (dataProducerEdge == nullptr) {
+        VPU_THROW_UNLESS(data->usage() == DataUsage::Input,
+            "Parsing layer {} of type {} failed: if input with index {} (of name {}) has not a producer, it must have Input "
+            "data usage, actual: {}", layer->name, layer->type, 0, data->name(), data->usage());
+        const auto& origData = dataOutput->origData();
+        VPU_THROW_UNLESS(origData != nullptr,
+            "Parsing layer {} of type {} failed: output data {} must have original IE data",
+            layer->name, layer->type, 0, dataOutput->name());
+
+        bindData(data, origData);
+        model->removeUnusedData(dataOutput);
+        dataOutput = data;
+    } else {
+        VPU_THROW_UNLESS(data->usage() == DataUsage::Intermediate,
+            "Parsing layer {} of type {} failed: if input with index {} (of name {}) has a producer, it must have Intermediate "
+            "data usage, actual: ", layer->name, layer->type, 0, data->name(), data->usage());
+
+        if (auto dataToShapeEdge = data->parentDataToShapeEdge()) {
+            const auto& parent = dataToShapeEdge->parent();
+            VPU_THROW_UNLESS(parent == shape,
+                "Myriad plugin encountered layer of type \"{}\" and name \"{}\" with input #{} (data input with name \"{}\") that "
+                "already has parent in terms of data to shape connection. The parent is expected to be input #{} (shape input with "
+                "name \"{}\") of the layer, so it's a \"{}\" with already connected inputs, but actual parent is other data object "
+                "with name \"{}\". The case of connected inputs is considered as \"{}\" that goes directly to \"{}\" as a result of "
+                "some optimization (operation between them has been optimized out). Other cases, when some input already has a "
+                "connection, but with other data object are prohibited.",
+                layer->type, layer->name, 0, data->name(), 1, shape->name(),
+                layer->type, parent->name(), layer->type, layer->type);
+            model->disconnectDatas(dataToShapeEdge);
+        }
+        model->replaceStageOutput(dataProducerEdge, dataOutput);
+        model->removeUnusedData(data);
+    }
+
+    if (shapeProducerEdge == nullptr) {
+        VPU_THROW_UNLESS(shape->usage() == DataUsage::Input,
+            "Parsing layer {} of type {} failed: if input with index {} (of name {}) has not a producer, it must have Input "
+            "data usage, actual: {}", layer->name, layer->type, 1, shape->name(), shape->usage());
+    } else {
+        VPU_THROW_UNLESS(shape->usage() == DataUsage::Intermediate,
+            "Parsing layer {} of type {} failed: if input with index {} (of name {}) has a producer, it must have Intermediate "
+            "data usage, actual: {}", layer->name, layer->type, 1, shape->name(), shape->usage());
     }
-    model->replaceStageOutput(dataProducerEdge, dataOutput);
-    model->removeUnusedData(data);
 
+    auto shapeDataObject = shape;
     if (dataOutput->usage() == DataUsage::Output) {
-        // Create the second output with shape in case of dynamic output
         const auto& shapeOutput = model->addOutputData(dataOutput->name() + "@shape", shape->desc());
 
         bindData(shapeOutput, shape->origData());
@@ -75,13 +101,23 @@ void FrontEnd::parseDSR(const Model& model, const ie::CNNLayerPtr& layer, const
         for (const auto& dataToShapeEdge : shape->childDataToShapeEdges()) {
             model->replaceDataToShapeParent(dataToShapeEdge, shapeOutput);
         }
-        model->replaceStageOutput(shapeProducerEdge, shapeOutput);
-        model->removeUnusedData(shape);
 
-        model->connectDataWithShape(shapeOutput, dataOutput);
-    } else {
-        model->connectDataWithShape(shape, dataOutput);
+        if (!shapeProducerEdge) {
+            _stageBuilder->addCopyStage(
+                    model,
+                    layer->name + "@copy-for-dynamic-output",
+                    layer,
+                    shape,
+                    shapeOutput,
+                    "DynamicShapeResolver");
+        } else {
+            model->replaceStageOutput(shapeProducerEdge, shapeOutput);
+            model->removeUnusedData(shape);
+        }
+
+        shapeDataObject = shapeOutput;
     }
+    model->connectDataWithShape(shapeDataObject, dataOutput);
 }
 
 }  // namespace vpu
index 57ca33a..9f246af 100644 (file)
@@ -147,8 +147,18 @@ static void copyBlobAccordingUpperBound(
     const auto inLayout = in->getTensorDesc().getLayout();
     const auto outLayout = out->getTensorDesc().getLayout();
 
-    const auto& inDims = in->getTensorDesc().getDims();
-    const auto& outDims = out->getTensorDesc().getDims();
+    const auto& inBlockingDesc = in->getTensorDesc().getBlockingDesc();
+    const auto& outBlockingDesc = out->getTensorDesc().getBlockingDesc();
+
+    const auto& inDims = inBlockingDesc.getBlockDims();
+    const auto& outDims = outBlockingDesc.getBlockDims();
+    const auto inTotalDimSize = in->byteSize();
+
+    // Strides in blocking description is presented by elements.
+    // So we need to multiply them by element size
+    auto inStrides = inBlockingDesc.getStrides();
+    std::transform(inStrides.begin(), inStrides.end(), inStrides.begin(),
+                   std::bind(std::multiplies<size_t>(), std::placeholders::_1, in->element_size()));
 
     IE_ASSERT(inLayout == outLayout);
 
@@ -158,22 +168,26 @@ static void copyBlobAccordingUpperBound(
     auto outPtr = out->cbuffer().as<uint8_t *>();
     IE_ASSERT(outPtr != nullptr);
 
-    if (inDims.size() == 1) {
-        std::copy_n(
-            in->cbuffer().as<uint8_t*>(),
-            in->byteSize(),
-            out->buffer().as<uint8_t*>());
-    } else if (inDims.size() == 2) {
-        size_t inLineSize = inDims[1] * in->element_size();
-        size_t outLineSize = outDims[1] * out->element_size();
-        for (size_t n = 0; n < outDims[0]; n++) {
-            std::copy_n(
-                in->cbuffer().as<uint8_t*>() + n * inLineSize,
-                outLineSize,
-                out->buffer().as<uint8_t*>() + n * outLineSize);
+    const auto inLineByteSize = inDims[inDims.size() - 1] * in->element_size();
+    const auto outLineByteSize = outDims[inDims.size() - 1] * out->element_size();
+
+    for (size_t inByteOffset = 0, outByteOffset = 0; inByteOffset < inTotalDimSize; inByteOffset += inLineByteSize) {
+        auto offset = inByteOffset;
+        bool isGarbageLine = false;
+        for (size_t dim = 0; dim < inStrides.size() - 1; ++dim) {
+            const auto coordAlongDim = offset / inStrides[dim];
+            if (coordAlongDim > outDims[dim] - 1) {
+                isGarbageLine = true;
+                break;
+            }
+
+            offset %= inStrides[dim];
+        }
+        if (!isGarbageLine) {
+            // We transfer outLineByteSize bytes, so garbage data at the end of the line is not copied.
+            std::copy_n(inPtr + inByteOffset, outLineByteSize, outPtr + outByteOffset);
+            outByteOffset += outLineByteSize;
         }
-    } else {
-        VPU_THROW_EXCEPTION << "Copying of blobs with dynamic shape and num dims greater than 2 unsupported yet";
     }
 }
 
diff --git a/inference-engine/tests/functional/plugin/myriad/common/myriad_common_test_utils.cpp b/inference-engine/tests/functional/plugin/myriad/common/myriad_common_test_utils.cpp
new file mode 100644 (file)
index 0000000..ff8fd80
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "myriad_common_test_utils.hpp"
+
+#include <cstdlib>
+#include <string>
+
+namespace CommonTestUtils {
+namespace vpu {
+
+bool CheckMyriad2() {
+    if (const auto& envVar = std::getenv("IE_VPU_MYRIADX")) {
+        return std::stoi(envVar) == 0;
+    }
+    return true;
+}
+
+}  // namespace vpu
+}  // namespace CommonTestUtils
+
index c27cea0..acf939b 100644 (file)
@@ -4,18 +4,10 @@
 
 #pragma once
 
-#include <cstdlib>
-#include <iostream>
-
 namespace CommonTestUtils {
 namespace vpu {
 
-bool CheckMyriad2() {
-    if (const auto& envVar = std::getenv("IE_VPU_MYRIADX")) {
-        return std::stoi(envVar) == 0;
-    }
-    return true;
-}
+extern bool CheckMyriad2();
 
 }  // namespace vpu
 }  // namespace CommonTestUtils
index 58413e1..16d7484 100644 (file)
@@ -2,53 +2,41 @@
 // SPDX-License-Identifier: Apache-2.0
 //
 
-#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
-
-#include <functional_test_utils/layer_test_utils.hpp>
-#include <ngraph_functions/builders.hpp>
+#include "dsr_tests_common.hpp"
 
 namespace {
 
-using DataType = ngraph::element::Type_t;
-using DataShape = ngraph::Shape;
+using namespace LayerTestsUtils::vpu;
+
 using ShapeDescriptor = std::vector<int32_t>;
-using ReshapeTestParams = std::tuple<DataShape, bool, ShapeDescriptor>;
+using ReshapeTestParams = std::tuple<DataShapeWithUpperBound, bool, ShapeDescriptor>;
 
 using Parameters = std::tuple<
         DataType,
         ReshapeTestParams,
-        LayerTestsUtils::TargetDevice
->;
+        LayerTestsUtils::TargetDevice>;
+
 
-class DSR_Reshape : public testing::WithParamInterface<Parameters>, public LayerTestsUtils::LayerTestsCommon {
+class DSR_Reshape : public testing::WithParamInterface<Parameters>, public DSR_TestsCommon {
 protected:
-    void SetUp() override {
+    std::shared_ptr<ngraph::Node> createTestedOp() override {
         const auto& parameters = GetParam();
         const auto& inDataType = std::get<0>(GetParam());
         const auto& reshapeTestParams = std::get<1>(GetParam());
         targetDevice = std::get<2>(GetParam());
 
-        const auto& inDataShape = std::get<0>(reshapeTestParams);
+        const auto& inDataShapes = std::get<0>(reshapeTestParams);
         const auto& specialZero = std::get<1>(reshapeTestParams);
         const auto& outShapeDescriptor = std::get<2>(reshapeTestParams);
 
-        const auto inDataParam = std::make_shared<ngraph::op::Parameter>(
-                inDataType, inDataShape);
-        const auto inDataShapeParam = std::make_shared<ngraph::op::Parameter>(
-                ngraph::element::i32, ngraph::Shape{inDataShape.size()});
-        const auto dsr  = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(
-                inDataParam, inDataShapeParam);
+        const auto inputSubgraph = createInputSubgraphWithDSR(inDataType, inDataShapes);
 
-        const auto outShapeDescriptorConstNode = std::make_shared<ngraph::op::Constant>(
+        const auto outShapeDescriptorConstNode = std::make_shared<ngraph::opset3::Constant>(
                 ngraph::element::i64, ngraph::Shape{outShapeDescriptor.size()}, outShapeDescriptor);
-        const auto reshape = std::make_shared<ngraph::op::v1::Reshape>(
-                dsr, outShapeDescriptorConstNode, specialZero);
+        const auto reshape = std::make_shared<ngraph::opset3::Reshape>(
+                inputSubgraph, outShapeDescriptorConstNode, specialZero);
 
-        const auto result = std::make_shared<ngraph::op::Result>(reshape);
-        function = std::make_shared<ngraph::Function>(
-                ngraph::ResultVector{result},
-                ngraph::ParameterVector{inDataParam, inDataShapeParam},
-                "DSR-Reshape");
+        return reshape;
     }
 };
 
@@ -57,15 +45,19 @@ TEST_P(DSR_Reshape, CompareWithReference) {
 }
 
 std::vector<ReshapeTestParams> reshapeTestParams = {
-        std::make_tuple(DataShape{1, 5, 5, 24}, true, ShapeDescriptor{0, -1, 4}),
-        std::make_tuple(DataShape{1, 5, 5, 0}, false, ShapeDescriptor{0, 4}),
-        std::make_tuple(DataShape{1, 3, 128, 256}, true, ShapeDescriptor{0, 0, 64, 512}),
+        std::make_tuple(DataShapeWithUpperBound{{1, 750}, {1, 1000}}, true, ShapeDescriptor{-1, 1}),
+        std::make_tuple(DataShapeWithUpperBound{{750, 1}, {1000, 1}}, true, ShapeDescriptor{-1}),
+        std::make_tuple(DataShapeWithUpperBound{{750, 1}, {750, 1}}, true, ShapeDescriptor{-1, 1, 1, 1}),
+        std::make_tuple(DataShapeWithUpperBound{{750, 4}, {1000, 4}}, true, ShapeDescriptor{1, -1, 4}),
+        std::make_tuple(DataShapeWithUpperBound{{750}, {1000}}, true, ShapeDescriptor{1, 1, -1}),
 };
 
-INSTANTIATE_TEST_CASE_P(DISABLED_DynamicReshape, DSR_Reshape,
-                        ::testing::Combine(
-                                ::testing::Values(ngraph::element::f16, ngraph::element::f32, ngraph::element::i32),
-                                ::testing::ValuesIn(reshapeTestParams),
-                                ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
+INSTANTIATE_TEST_CASE_P(DynamicReshape, DSR_Reshape,
+    ::testing::Combine(
+        ::testing::Values(ngraph::element::f16,
+                          ngraph::element::f32,
+                          ngraph::element::i32),
+        ::testing::ValuesIn(reshapeTestParams),
+        ::testing::Values(CommonTestUtils::DEVICE_MYRIAD)));
 
 }  // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_tests_common.hpp b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_tests_common.hpp
new file mode 100644 (file)
index 0000000..2905eff
--- /dev/null
@@ -0,0 +1,110 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "../common/myriad_common_test_utils.hpp"
+
+#include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
+#include <vpu/ngraph/transformations/dynamic_to_static_shape.hpp>
+
+#include "vpu/private_plugin_config.hpp"
+
+#include <functional_test_utils/layer_test_utils.hpp>
+#include <ngraph_functions/builders.hpp>
+
+namespace LayerTestsUtils {
+namespace vpu {
+
+using DataType = ngraph::element::Type;
+using DataShape = ngraph::Shape;
+
+struct DataShapeWithUpperBound {
+    DataShape shape;
+    DataShape upperBoundShape;
+};
+
+class DSR_TestsCommon : public LayerTestsUtils::LayerTestsCommon {
+protected:
+    std::unordered_map<std::string, DataShape> m_shapes;
+    ngraph::ParameterVector m_parameterVector;
+
+    virtual std::shared_ptr<ngraph::Node> createInputSubgraphWithDSR(
+            const DataType& inDataType, const DataShapeWithUpperBound& shapes,
+            const std::string& suffix = "") {
+        const auto inDataParam = std::make_shared<ngraph::opset3::Parameter>(
+                inDataType, shapes.upperBoundShape);
+        inDataParam->set_friendly_name(inDataParam->get_friendly_name() + suffix);
+        const auto inDataShapeParam = std::make_shared<ngraph::opset3::Parameter>(
+                ngraph::element::i32, ngraph::Shape{shapes.shape.size()});
+        inDataShapeParam->set_friendly_name(inDataParam->get_friendly_name() + "/shape");
+
+        m_shapes[inDataShapeParam->get_friendly_name()] = shapes.shape;
+        m_parameterVector.push_back(inDataParam);
+        m_parameterVector.push_back(inDataShapeParam);
+
+        const auto dsr = std::make_shared<ngraph::vpu::op::DynamicShapeResolver>(
+                inDataParam, inDataShapeParam);
+
+        return dsr;
+    }
+
+    virtual std::shared_ptr<ngraph::Node> createTestedOp() = 0;
+
+    void SetUp() override {
+        SetRefMode(LayerTestsUtils::RefMode::CONSTANT_FOLDING);
+        configuration[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+        if (CommonTestUtils::vpu::CheckMyriad2()) {
+            configuration[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(YES);
+        }
+
+        const auto testedOp = createTestedOp();
+        const auto result = std::make_shared<ngraph::opset3::Result>(testedOp);
+
+        function = std::make_shared<ngraph::Function>(
+                ngraph::NodeVector{result},
+                m_parameterVector,
+                "DSR-" + std::string(testedOp->get_type_name()));
+        testedOp->set_output_type(0, testedOp->get_input_element_type(0), ngraph::PartialShape::dynamic(
+                testedOp->get_output_partial_shape(0).rank()));
+        ::vpu::DynamicToStaticShape().transform(function);
+    }
+
+    InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override {
+        const auto& shapeIt = m_shapes.find(info.name());
+        if (shapeIt == m_shapes.end()) {
+            return LayerTestsCommon::GenerateInput(info);
+        }
+
+        auto blob = make_blob_with_precision(info.getTensorDesc());
+        blob->allocate();
+
+        auto dataPtr = InferenceEngine::as<InferenceEngine::MemoryBlob>(blob)->rwmap().as<int32_t*>();
+        for (size_t i = 0; i < blob->size(); ++i) {
+            dataPtr[i] = shapeIt->second[i];
+        }
+
+        return blob;
+    }
+
+    void Validate() override {
+        for (const auto& op : function->get_ordered_ops()) {
+            if (const auto dsr = ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(op)) {
+                dsr->setMode(ngraph::vpu::op::DynamicShapeResolverMode::INFER_DYNAMIC_SHAPE);
+            }
+        }
+        function->validate_nodes_and_infer_types();
+
+        LayerTestsCommon::Validate();
+
+        for (const auto& op : function->get_ordered_ops()) {
+            if (const auto dsr = ngraph::as_type_ptr<ngraph::vpu::op::DynamicShapeResolver>(op)) {
+                dsr->setMode(ngraph::vpu::op::DynamicShapeResolverMode::INFER_UPPER_BOUND_SHAPE);
+            }
+        }
+    }
+};
+
+}  // namespace vpu
+}  // namespace LayerTestsUtils