#include "pipelines.h"
#include "../utils.h"
+#include "../ie_utils.h"
#include <iostream>
#include <string>
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
+
+ auto batchSize = cnnNetwork.getBatchSize();
+ batchSize = batchSize != 0 ? batchSize : 1;
+ const InferenceEngine::ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
+ fillBlobs(infer_request, inputsInfo, batchSize);
+
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
};
}
-std::function<void()> reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::CNNNetwork& cnnNetwork) {
+std::function<void()> reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::OutputsDataMap& output_info) {
return [&] {
infer_request.Infer();
- OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
Blob::Ptr outputBlob = infer_request.GetBlob(output.first);
};
std::function<void()> create_infer_request(const std::string &model, const std::string &target_device);
std::function<void()> recreate_infer_request(InferenceEngine::ExecutableNetwork& exeNetwork);
std::function<void()> infer_request_inference(const std::string &model, const std::string &target_device);
-std::function<void()> infer_request_inference(const std::string &model, const std::string &target_device);
-std::function<void()> reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::CNNNetwork& cnnNetwork);
+std::function<void()> reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::OutputsDataMap& output_info);
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "ie_utils.h"
+
+#include <inference_engine.hpp>
+
+using namespace InferenceEngine;
+
+/**
+ * @brief Fill InferRequest blobs with random values or image information
+ */
+void fillBlobs(InferenceEngine::InferRequest inferRequest,
+ const InferenceEngine::ConstInputsDataMap& inputsInfo,
+ const size_t& batchSize) {
+ std::vector<std::pair<size_t, size_t>> input_image_sizes;
+ for (const ConstInputsDataMap::value_type& item : inputsInfo) {
+ if (isImage(item.second))
+ input_image_sizes.push_back(getTensorHeightWidth(item.second->getTensorDesc()));
+ }
+
+ for (const ConstInputsDataMap::value_type& item : inputsInfo) {
+ Blob::Ptr inputBlob = inferRequest.GetBlob(item.first);
+ if (isImageInfo(inputBlob) && (input_image_sizes.size() == 1)) {
+ // Fill image information
+ auto image_size = input_image_sizes.at(0);
+ if (item.second->getPrecision() == InferenceEngine::Precision::FP32) {
+ fillBlobImInfo<float>(inputBlob, batchSize, image_size);
+ } else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) {
+ fillBlobImInfo<short>(inputBlob, batchSize, image_size);
+ } else if (item.second->getPrecision() == InferenceEngine::Precision::I32) {
+ fillBlobImInfo<int32_t>(inputBlob, batchSize, image_size);
+ } else {
+ THROW_IE_EXCEPTION << "Input precision is not supported for image info!";
+ }
+ continue;
+ }
+ // Fill random
+ if (item.second->getPrecision() == InferenceEngine::Precision::FP32) {
+ fillBlobRandom<float>(inputBlob);
+ } else if (item.second->getPrecision() == InferenceEngine::Precision::FP16) {
+ fillBlobRandom<short>(inputBlob);
+ } else if (item.second->getPrecision() == InferenceEngine::Precision::I32) {
+ fillBlobRandom<int32_t>(inputBlob);
+ } else if (item.second->getPrecision() == InferenceEngine::Precision::U8) {
+ fillBlobRandom<uint8_t>(inputBlob);
+ } else if (item.second->getPrecision() == InferenceEngine::Precision::I8) {
+ fillBlobRandom<int8_t>(inputBlob);
+ } else if (item.second->getPrecision() == InferenceEngine::Precision::U16) {
+ fillBlobRandom<uint16_t>(inputBlob);
+ } else if (item.second->getPrecision() == InferenceEngine::Precision::I16) {
+ fillBlobRandom<int16_t>(inputBlob);
+ } else {
+ THROW_IE_EXCEPTION << "Input precision is not supported for " << item.first;
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <inference_engine.hpp>
+
+using namespace InferenceEngine;
+
+/**
+ * @brief Determine if InferenceEngine blob means image or not
+ */
+template<typename T>
+static bool isImage(const T &blob) {
+ auto descriptor = blob->getTensorDesc();
+ if (descriptor.getLayout() != InferenceEngine::NCHW) {
+ return false;
+ }
+ auto channels = descriptor.getDims()[1];
+ return channels == 3;
+}
+
+
+/**
+ * @brief Determine if InferenceEngine blob means image information or not
+ */
+template<typename T>
+static bool isImageInfo(const T &blob) {
+ auto descriptor = blob->getTensorDesc();
+ if (descriptor.getLayout() != InferenceEngine::NC) {
+ return false;
+ }
+ auto channels = descriptor.getDims()[1];
+ return (channels >= 2);
+}
+
+
+/**
+ * @brief Return height and width from provided InferenceEngine tensor description
+ */
+inline std::pair<size_t, size_t> getTensorHeightWidth(const InferenceEngine::TensorDesc& desc) {
+ const auto& layout = desc.getLayout();
+ const auto& dims = desc.getDims();
+ const auto& size = dims.size();
+ if ((size >= 2) &&
+ (layout == InferenceEngine::Layout::NCHW ||
+ layout == InferenceEngine::Layout::NHWC ||
+ layout == InferenceEngine::Layout::NCDHW ||
+ layout == InferenceEngine::Layout::NDHWC ||
+ layout == InferenceEngine::Layout::OIHW ||
+ layout == InferenceEngine::Layout::GOIHW ||
+ layout == InferenceEngine::Layout::OIDHW ||
+ layout == InferenceEngine::Layout::GOIDHW ||
+ layout == InferenceEngine::Layout::CHW ||
+ layout == InferenceEngine::Layout::HW)) {
+ // Regardless of layout, dimensions are stored in fixed order
+ return std::make_pair(dims.back(), dims.at(size - 2));
+ } else {
+ THROW_IE_EXCEPTION << "Tensor does not have height and width dimensions";
+ }
+}
+
+
+/**
+ * @brief Fill InferenceEngine blob with random values
+ */
+template<typename T>
+void fillBlobRandom(Blob::Ptr& inputBlob) {
+ MemoryBlob::Ptr minput = as<MemoryBlob>(inputBlob);
+ // locked memory holder should be alive all time while access to its buffer happens
+ auto minputHolder = minput->wmap();
+
+ auto inputBlobData = minputHolder.as<T *>();
+ for (size_t i = 0; i < inputBlob->size(); i++) {
+ auto rand_max = RAND_MAX;
+ inputBlobData[i] = (T) rand() / static_cast<T>(rand_max) * 10;
+ }
+}
+
+
+/**
+ * @brief Fill InferenceEngine blob with image information
+ */
+template<typename T>
+void fillBlobImInfo(Blob::Ptr& inputBlob,
+ const size_t& batchSize,
+ std::pair<size_t, size_t> image_size) {
+ MemoryBlob::Ptr minput = as<MemoryBlob>(inputBlob);
+ // locked memory holder should be alive all time while access to its buffer happens
+ auto minputHolder = minput->wmap();
+
+ auto inputBlobData = minputHolder.as<T *>();
+ for (size_t b = 0; b < batchSize; b++) {
+ size_t iminfoSize = inputBlob->size()/batchSize;
+ for (size_t i = 0; i < iminfoSize; i++) {
+ size_t index = b*iminfoSize + i;
+ if (0 == i)
+ inputBlobData[index] = static_cast<T>(image_size.first);
+ else if (1 == i)
+ inputBlobData[index] = static_cast<T>(image_size.second);
+ else
+ inputBlobData[index] = 1;
+ }
+ }
+}
+
+
+/**
+ * @brief Fill InferRequest blobs with random values or image information
+ */
+void fillBlobs(InferenceEngine::InferRequest inferRequest,
+ const InferenceEngine::ConstInputsDataMap& inputsInfo,
+ const size_t& batchSize);
\ No newline at end of file
#include "tests_utils.h"
#include "../common/tests_utils.h"
+#include "../common/ie_utils.h"
#include "../common/managers/thread_manager.h"
#include "tests_pipelines/tests_pipelines.h"
CNNNetwork cnnNetwork = ie.ReadNetwork(model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, device);
InferRequest inferRequest = exeNetwork.CreateInferRequest();
+
+ auto batchSize = cnnNetwork.getBatchSize();
+ batchSize = batchSize != 0 ? batchSize : 1;
+ const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
+ fillBlobs(inferRequest, inputsInfo, batchSize);
+
inferRequest.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
//
#include "../common/tests_utils.h"
+#include "../common/ie_utils.h"
#include "../common/managers/thread_manager.h"
#include "tests_pipelines/tests_pipelines.h"
CNNNetwork cnnNetwork = ie.ReadNetwork(test_params.model);
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, test_params.device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
- return test_reinfer_request_inference(infer_request, cnnNetwork, test_params.model, test_params.device, test_params.numiters);
+
+ OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
+ auto batchSize = cnnNetwork.getBatchSize();
+ batchSize = batchSize != 0 ? batchSize : 1;
+ const InferenceEngine::ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
+ fillBlobs(infer_request, inputsInfo, batchSize);
+
+ return test_reinfer_request_inference(infer_request, output_info, test_params.model, test_params.device, test_params.numiters);
};
test_runner(test_params.numthreads, test);
}
}
TestResult test_reinfer_request_inference(InferenceEngine::InferRequest& infer_request,
- InferenceEngine::CNNNetwork& cnnNetwork, const std::string& model,
+ InferenceEngine::OutputsDataMap& output_info, const std::string& model,
const std::string& target_device, const int& n) {
log_info("Inference of InferRequest from network: \"" << model << "\" for device: \"" << target_device << "\" for "
<< n << " times");
- return common_test_pipeline(reinfer_request_inference(infer_request, cnnNetwork), n);
+ return common_test_pipeline(reinfer_request_inference(infer_request, output_info), n);
}
TestResult test_create_infer_request(const std::string &model, const std::string &target_device, const int &n);
TestResult test_recreate_infer_request(InferenceEngine::ExecutableNetwork& network, const std::string &model, const std::string &target_device, const int &n);
TestResult test_infer_request_inference(const std::string &model, const std::string &target_device, const int &n);
-TestResult test_reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::CNNNetwork& cnnNetwork, const std::string &model, const std::string &target_device, const int &n);
+TestResult test_reinfer_request_inference(InferenceEngine::InferRequest& infer_request, InferenceEngine::OutputsDataMap& output_info, const std::string &model, const std::string &target_device, const int &n);
// tests_pipelines/tests_pipelines.cpp
//
#include "tests_pipelines.h"
+#include "../common/ie_utils.h"
#include <string>
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
+
+ auto batchSize = cnnNetwork.getBatchSize();
+ batchSize = batchSize != 0 ? batchSize : 1;
+ const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
+ fillBlobs(infer_request, inputsInfo, batchSize);
+
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
+
+ auto batchSize = cnnNetwork.getBatchSize();
+ batchSize = batchSize != 0 ? batchSize : 1;
+ const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
+ fillBlobs(infer_request, inputsInfo, batchSize);
+
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
+
+ auto batchSize = cnnNetwork.getBatchSize();
+ batchSize = batchSize != 0 ? batchSize : 1;
+ const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
+ fillBlobs(infer_request, inputsInfo, batchSize);
+
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
}
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
+
+ auto batchSize = cnnNetwork.getBatchSize();
+ batchSize = batchSize != 0 ? batchSize : 1;
+ const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
+ fillBlobs(infer_request, inputsInfo, batchSize);
+
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
}
InferRequest infer_request = exeNetwork.CreateInferRequest();
+
+ auto batchSize = cnnNetwork.getBatchSize();
+ batchSize = batchSize != 0 ? batchSize : 1;
+ const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
+ fillBlobs(infer_request, inputsInfo, batchSize);
+
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
for (auto &output : output_info)
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request;
+
+ auto batchSize = cnnNetwork.getBatchSize();
+ batchSize = batchSize != 0 ? batchSize : 1;
+ const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");
}
infer_request = exeNetwork.CreateInferRequest();
+ fillBlobs(infer_request, inputsInfo, batchSize);
}
infer_request.Infer();
OutputsDataMap output_info(cnnNetwork.getOutputsInfo());
reshapeCNNNetwork();
ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, target_device);
InferRequest infer_request = exeNetwork.CreateInferRequest();
+
+ auto batchSize = cnnNetwork.getBatchSize();
+ batchSize = batchSize != 0 ? batchSize : 1;
+ const ConstInputsDataMap inputsInfo(exeNetwork.GetInputsInfo());
+ fillBlobs(infer_request, inputsInfo, batchSize);
+
for (int i = 0; i < n; i++) {
if (i == n / 2) {
log_info("Half of the test have already passed");