From 7bd3738afdc026cbdc327eb9841bd7c3c900459f Mon Sep 17 00:00:00 2001 From: Vitaliy Urusovskij Date: Wed, 25 Nov 2020 14:37:21 +0300 Subject: [PATCH] Add `fillBlobs` to fix issue with negative values in `image_info` input layer (#3335) --- .../stress_tests/common/ie_pipelines/pipelines.cpp | 10 +- tests/stress_tests/common/ie_pipelines/pipelines.h | 3 +- tests/stress_tests/common/ie_utils.cpp | 58 +++++++++++ tests/stress_tests/common/ie_utils.h | 114 +++++++++++++++++++++ tests/stress_tests/memcheck_tests/tests.cpp | 7 ++ tests/stress_tests/memleaks_tests/tests.cpp | 10 +- .../tests_pipelines/tests_pipelines.cpp | 4 +- .../tests_pipelines/tests_pipelines.h | 2 +- .../tests_pipelines_full_pipeline.cpp | 42 ++++++++ 9 files changed, 242 insertions(+), 8 deletions(-) create mode 100644 tests/stress_tests/common/ie_utils.cpp create mode 100644 tests/stress_tests/common/ie_utils.h diff --git a/tests/stress_tests/common/ie_pipelines/pipelines.cpp b/tests/stress_tests/common/ie_pipelines/pipelines.cpp index 4a73a04..c31c636 100644 --- a/tests/stress_tests/common/ie_pipelines/pipelines.cpp +++ b/tests/stress_tests/common/ie_pipelines/pipelines.cpp @@ -4,6 +4,7 @@ #include "pipelines.h" #include "../utils.h" +#include "../ie_utils.h" #include #include @@ -113,6 +114,12 @@ std::function infer_request_inference(const std::string &model, const st CNNNetwork cnnNetwork = ie.ReadNetwork(model); ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); InferRequest infer_request = exeNetwork.CreateInferRequest(); + + auto batchSize = cnnNetwork.getBatchSize(); + batchSize = batchSize != 0 ? batchSize : 1; + const InferenceEngine::ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); + fillBlobs(infer_request, inputsInfo, batchSize); + infer_request.Infer(); OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); for (auto &output : output_info) @@ -120,10 +127,9 @@ std::function infer_request_inference(const std::string &model, const st }; } -std::function reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::CNNNetwork& cnnNetwork) { +std::function reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::OutputsDataMap& output_info) { return [&] { infer_request.Infer(); - OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); for (auto &output : output_info) Blob::Ptr outputBlob = infer_request.GetBlob(output.first); }; diff --git a/tests/stress_tests/common/ie_pipelines/pipelines.h b/tests/stress_tests/common/ie_pipelines/pipelines.h index f34a11c..c474d83 100644 --- a/tests/stress_tests/common/ie_pipelines/pipelines.h +++ b/tests/stress_tests/common/ie_pipelines/pipelines.h @@ -15,5 +15,4 @@ std::function recreate_exenetwork(InferenceEngine::Core &ie, const std:: std::function create_infer_request(const std::string &model, const std::string &target_device); std::function recreate_infer_request(InferenceEngine::ExecutableNetwork& exeNetwork); std::function infer_request_inference(const std::string &model, const std::string &target_device); -std::function infer_request_inference(const std::string &model, const std::string &target_device); -std::function reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::CNNNetwork& cnnNetwork); +std::function reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::OutputsDataMap& output_info); diff --git a/tests/stress_tests/common/ie_utils.cpp b/tests/stress_tests/common/ie_utils.cpp new file mode 100644 index 0000000..9909e76 --- /dev/null +++ b/tests/stress_tests/common/ie_utils.cpp @@ -0,0 +1,58 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ie_utils.h" + +#include + +using namespace InferenceEngine; + +/** + * @brief Fill InferRequest blobs with random values or image information + */ +void fillBlobs(InferenceEngine::InferRequest inferRequest, + const InferenceEngine::ConstInputsDataMap& inputsInfo, + const size_t& batchSize) { + std::vector> input_image_sizes; + for (const ConstInputsDataMap::value_type& item : inputsInfo) { + if (isImage(item.second)) + input_image_sizes.push_back(getTensorHeightWidth(item.second->getTensorDesc())); + } + + for (const ConstInputsDataMap::value_type& item : inputsInfo) { + Blob::Ptr inputBlob = inferRequest.GetBlob(item.first); + if (isImageInfo(inputBlob) && (input_image_sizes.size() == 1)) { + // Fill image information + auto image_size = input_image_sizes.at(0); + if (item.second->getPrecision() == InferenceEngine::Precision::FP32) { + fillBlobImInfo(inputBlob, batchSize, image_size); + } else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) { + fillBlobImInfo(inputBlob, batchSize, image_size); + } else if (item.second->getPrecision() == InferenceEngine::Precision::I32) { + fillBlobImInfo(inputBlob, batchSize, image_size); + } else { + THROW_IE_EXCEPTION << "Input precision is not supported for image info!"; + } + continue; + } + // Fill random + if (item.second->getPrecision() == InferenceEngine::Precision::FP32) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::I32) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::U8) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::I8) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::U16) { + fillBlobRandom(inputBlob); + } else if (item.second->getPrecision() == InferenceEngine::Precision::I16) { + fillBlobRandom(inputBlob); + } else { + THROW_IE_EXCEPTION << "Input precision is not supported for " << item.first; + } + } +} \ No newline at end of file diff --git a/tests/stress_tests/common/ie_utils.h b/tests/stress_tests/common/ie_utils.h new file mode 100644 index 0000000..c9df6d5 --- /dev/null +++ b/tests/stress_tests/common/ie_utils.h @@ -0,0 +1,114 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +using namespace InferenceEngine; + +/** + * @brief Determine if InferenceEngine blob means image or not + */ +template +static bool isImage(const T &blob) { + auto descriptor = blob->getTensorDesc(); + if (descriptor.getLayout() != InferenceEngine::NCHW) { + return false; + } + auto channels = descriptor.getDims()[1]; + return channels == 3; +} + + +/** + * @brief Determine if InferenceEngine blob means image information or not + */ +template +static bool isImageInfo(const T &blob) { + auto descriptor = blob->getTensorDesc(); + if (descriptor.getLayout() != InferenceEngine::NC) { + return false; + } + auto channels = descriptor.getDims()[1]; + return (channels >= 2); +} + + +/** + * @brief Return height and width from provided InferenceEngine tensor description + */ +inline std::pair getTensorHeightWidth(const InferenceEngine::TensorDesc& desc) { + const auto& layout = desc.getLayout(); + const auto& dims = desc.getDims(); + const auto& size = dims.size(); + if ((size >= 2) && + (layout == InferenceEngine::Layout::NCHW || + layout == InferenceEngine::Layout::NHWC || + layout == InferenceEngine::Layout::NCDHW || + layout == InferenceEngine::Layout::NDHWC || + layout == InferenceEngine::Layout::OIHW || + layout == InferenceEngine::Layout::GOIHW || + layout == InferenceEngine::Layout::OIDHW || + layout == InferenceEngine::Layout::GOIDHW || + layout == InferenceEngine::Layout::CHW || + layout == InferenceEngine::Layout::HW)) { + // Regardless of layout, dimensions are stored in fixed order + return std::make_pair(dims.back(), dims.at(size - 2)); + } else { + THROW_IE_EXCEPTION << "Tensor does not have height and width dimensions"; + } +} + + +/** + * @brief Fill InferenceEngine blob with random values + */ +template +void fillBlobRandom(Blob::Ptr& inputBlob) { + MemoryBlob::Ptr minput = as(inputBlob); + // locked memory holder should be alive all time while access to its buffer happens + auto minputHolder = minput->wmap(); + + auto inputBlobData = minputHolder.as(); + for (size_t i = 0; i < inputBlob->size(); i++) { + auto rand_max = RAND_MAX; + inputBlobData[i] = (T) rand() / static_cast(rand_max) * 10; + } +} + + +/** + * @brief Fill InferenceEngine blob with image information + */ +template +void fillBlobImInfo(Blob::Ptr& inputBlob, + const size_t& batchSize, + std::pair image_size) { + MemoryBlob::Ptr minput = as(inputBlob); + // locked memory holder should be alive all time while access to its buffer happens + auto minputHolder = minput->wmap(); + + auto inputBlobData = minputHolder.as(); + for (size_t b = 0; b < batchSize; b++) { + size_t iminfoSize = inputBlob->size()/batchSize; + for (size_t i = 0; i < iminfoSize; i++) { + size_t index = b*iminfoSize + i; + if (0 == i) + inputBlobData[index] = static_cast(image_size.first); + else if (1 == i) + inputBlobData[index] = static_cast(image_size.second); + else + inputBlobData[index] = 1; + } + } +} + + +/** + * @brief Fill InferRequest blobs with random values or image information + */ +void fillBlobs(InferenceEngine::InferRequest inferRequest, + const InferenceEngine::ConstInputsDataMap& inputsInfo, + const size_t& batchSize); \ No newline at end of file diff --git a/tests/stress_tests/memcheck_tests/tests.cpp b/tests/stress_tests/memcheck_tests/tests.cpp index b21c584..1cdfa9f 100644 --- a/tests/stress_tests/memcheck_tests/tests.cpp +++ b/tests/stress_tests/memcheck_tests/tests.cpp @@ -4,6 +4,7 @@ #include "tests_utils.h" #include "../common/tests_utils.h" +#include "../common/ie_utils.h" #include "../common/managers/thread_manager.h" #include "tests_pipelines/tests_pipelines.h" @@ -75,6 +76,12 @@ TEST_P(MemCheckTestSuite, infer_request_inference) { CNNNetwork cnnNetwork = ie.ReadNetwork(model); ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, device); InferRequest inferRequest = exeNetwork.CreateInferRequest(); + + auto batchSize = cnnNetwork.getBatchSize(); + batchSize = batchSize != 0 ? batchSize : 1; + const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); + fillBlobs(inferRequest, inputsInfo, batchSize); + inferRequest.Infer(); OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); for (auto &output : output_info) diff --git a/tests/stress_tests/memleaks_tests/tests.cpp b/tests/stress_tests/memleaks_tests/tests.cpp index 52e0683..599c957 100644 --- a/tests/stress_tests/memleaks_tests/tests.cpp +++ b/tests/stress_tests/memleaks_tests/tests.cpp @@ -3,6 +3,7 @@ // #include "../common/tests_utils.h" +#include "../common/ie_utils.h" #include "../common/managers/thread_manager.h" #include "tests_pipelines/tests_pipelines.h" @@ -98,7 +99,14 @@ TEST_P(MemLeaksTestSuite, reinfer_request_inference) { CNNNetwork cnnNetwork = ie.ReadNetwork(test_params.model); ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, test_params.device); InferRequest infer_request = exeNetwork.CreateInferRequest(); - return test_reinfer_request_inference(infer_request, cnnNetwork, test_params.model, test_params.device, test_params.numiters); + + OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); + auto batchSize = cnnNetwork.getBatchSize(); + batchSize = batchSize != 0 ? batchSize : 1; + const InferenceEngine::ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); + fillBlobs(infer_request, inputsInfo, batchSize); + + return test_reinfer_request_inference(infer_request, output_info, test_params.model, test_params.device, test_params.numiters); }; test_runner(test_params.numthreads, test); } diff --git a/tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.cpp b/tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.cpp index 8d39d7d..a7cd6d7 100644 --- a/tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.cpp +++ b/tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.cpp @@ -201,9 +201,9 @@ TestResult test_infer_request_inference(const std::string& model, const std::str } TestResult test_reinfer_request_inference(InferenceEngine::InferRequest& infer_request, - InferenceEngine::CNNNetwork& cnnNetwork, const std::string& model, + InferenceEngine::OutputsDataMap& output_info, const std::string& model, const std::string& target_device, const int& n) { log_info("Inference of InferRequest from network: \"" << model << "\" for device: \"" << target_device << "\" for " << n << " times"); - return common_test_pipeline(reinfer_request_inference(infer_request, cnnNetwork), n); + return common_test_pipeline(reinfer_request_inference(infer_request, output_info), n); } diff --git a/tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.h b/tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.h index 79ac564..37a891b 100644 --- a/tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.h +++ b/tests/stress_tests/memleaks_tests/tests_pipelines/tests_pipelines.h @@ -21,5 +21,5 @@ TestResult test_recreate_exenetwork(InferenceEngine::Core &ie, const std::string TestResult test_create_infer_request(const std::string &model, const std::string &target_device, const int &n); TestResult test_recreate_infer_request(InferenceEngine::ExecutableNetwork& network, const std::string &model, const std::string &target_device, const int &n); TestResult test_infer_request_inference(const std::string &model, const std::string &target_device, const int &n); -TestResult test_reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::CNNNetwork& cnnNetwork, const std::string &model, const std::string &target_device, const int &n); +TestResult test_reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::OutputsDataMap& output_info, const std::string &model, const std::string &target_device, const int &n); // tests_pipelines/tests_pipelines.cpp diff --git a/tests/stress_tests/unittests/tests_pipelines/tests_pipelines_full_pipeline.cpp b/tests/stress_tests/unittests/tests_pipelines/tests_pipelines_full_pipeline.cpp index 51a9e3c..d3764d2 100644 --- a/tests/stress_tests/unittests/tests_pipelines/tests_pipelines_full_pipeline.cpp +++ b/tests/stress_tests/unittests/tests_pipelines/tests_pipelines_full_pipeline.cpp @@ -3,6 +3,7 @@ // #include "tests_pipelines.h" +#include "../common/ie_utils.h" #include @@ -56,6 +57,12 @@ void test_load_unload_plugin_full_pipeline(const std::string &model, const std:: reshapeCNNNetwork(); ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); InferRequest infer_request = exeNetwork.CreateInferRequest(); + + auto batchSize = cnnNetwork.getBatchSize(); + batchSize = batchSize != 0 ? batchSize : 1; + const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); + fillBlobs(infer_request, inputsInfo, batchSize); + infer_request.Infer(); OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); for (auto &output : output_info) @@ -82,6 +89,12 @@ void test_read_network_full_pipeline(const std::string &model, const std::string reshapeCNNNetwork(); ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); InferRequest infer_request = exeNetwork.CreateInferRequest(); + + auto batchSize = cnnNetwork.getBatchSize(); + batchSize = batchSize != 0 ? batchSize : 1; + const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); + fillBlobs(infer_request, inputsInfo, batchSize); + infer_request.Infer(); OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); for (auto &output : output_info) @@ -109,6 +122,12 @@ void test_set_input_params_full_pipeline(const std::string &model, const std::st reshapeCNNNetwork(); ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); InferRequest infer_request = exeNetwork.CreateInferRequest(); + + auto batchSize = cnnNetwork.getBatchSize(); + batchSize = batchSize != 0 ? batchSize : 1; + const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); + fillBlobs(infer_request, inputsInfo, batchSize); + infer_request.Infer(); OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); for (auto &output : output_info) @@ -151,6 +170,12 @@ void test_cnnnetwork_reshape_batch_x2_full_pipeline(const std::string &model, co } ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); InferRequest infer_request = exeNetwork.CreateInferRequest(); + + auto batchSize = cnnNetwork.getBatchSize(); + batchSize = batchSize != 0 ? batchSize : 1; + const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); + fillBlobs(infer_request, inputsInfo, batchSize); + infer_request.Infer(); OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); for (auto &output : output_info) @@ -178,6 +203,12 @@ void test_create_exenetwork_full_pipeline(const std::string &model, const std::s exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); } InferRequest infer_request = exeNetwork.CreateInferRequest(); + + auto batchSize = cnnNetwork.getBatchSize(); + batchSize = batchSize != 0 ? batchSize : 1; + const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); + fillBlobs(infer_request, inputsInfo, batchSize); + infer_request.Infer(); OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); for (auto &output : output_info) @@ -199,11 +230,16 @@ void test_create_infer_request_full_pipeline(const std::string &model, const std reshapeCNNNetwork(); ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); InferRequest infer_request; + + auto batchSize = cnnNetwork.getBatchSize(); + batchSize = batchSize != 0 ? batchSize : 1; + const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); for (int i = 0; i < n; i++) { if (i == n / 2) { log_info("Half of the test have already passed"); } infer_request = exeNetwork.CreateInferRequest(); + fillBlobs(infer_request, inputsInfo, batchSize); } infer_request.Infer(); OutputsDataMap output_info(cnnNetwork.getOutputsInfo()); @@ -226,6 +262,12 @@ void test_infer_request_inference_full_pipeline(const std::string &model, const reshapeCNNNetwork(); ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device); InferRequest infer_request = exeNetwork.CreateInferRequest(); + + auto batchSize = cnnNetwork.getBatchSize(); + batchSize = batchSize != 0 ? batchSize : 1; + const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo()); + fillBlobs(infer_request, inputsInfo, batchSize); + for (int i = 0; i < n; i++) { if (i == n / 2) { log_info("Half of the test have already passed"); -- 2.7.4