using CPtr = std::shared_ptr<const CompoundBlob>;
/**
- * @brief A virtual destructor
- */
- virtual ~CompoundBlob() = default;
-
- /**
- * @brief A copy constructor
- */
- CompoundBlob(const CompoundBlob& blob);
-
- /**
- * @brief A copy assignment operator
- */
- CompoundBlob& operator=(const CompoundBlob& blob) = default;
-
- /**
- * @brief A move constructor
- */
- CompoundBlob(CompoundBlob&& blob);
-
- /**
- * @brief A move assignment operator
- */
- CompoundBlob& operator=(CompoundBlob&& blob) = default;
-
- /**
* @brief Constructs a compound blob from a vector of blobs
*
* @param blobs A vector of blobs that is copied to this object
protected:
/**
- * @brief A default constructor
+ * @brief Constructs a compound blob with specified descriptor
+ *
+ * @param tensorDesc A tensor descriptor for the compound blob
*/
- CompoundBlob();
+ explicit CompoundBlob(const TensorDesc& tensorDesc);
/**
* @brief Compound blob container for underlying blobs
using CPtr = std::shared_ptr<const NV12Blob>;
/**
- * @brief A deleted default constructor
- */
- NV12Blob() = delete;
-
- /**
* @brief Constructs NV12 blob from two planes Y and UV
*
* @param y Blob object that represents Y plane in NV12 color format
NV12Blob(Blob::Ptr&& y, Blob::Ptr&& uv);
/**
- * @brief A virtual destructor
- */
- virtual ~NV12Blob() = default;
-
- /**
- * @brief A copy constructor
- */
- NV12Blob(const NV12Blob& blob) = default;
-
- /**
- * @brief A copy assignment operator
- */
- NV12Blob& operator=(const NV12Blob& blob) = default;
-
- /**
- * @brief A move constructor
- */
- NV12Blob(NV12Blob&& blob) = default;
-
- /**
- * @brief A move assignment operator
- */
- NV12Blob& operator=(NV12Blob&& blob) = default;
-
- /**
* @brief Returns a shared pointer to Y plane
*/
virtual Blob::Ptr& y() noexcept;
using CPtr = std::shared_ptr<const I420Blob>;
/**
- * @brief A deleted default constructor
- */
- I420Blob() = delete;
-
- /**
* @brief Constructs I420 blob from three planes Y, U and V
* @param y Blob object that represents Y plane in I420 color format
* @param u Blob object that represents U plane in I420 color format
I420Blob(Blob::Ptr&& y, Blob::Ptr&& u, Blob::Ptr&& v);
/**
- * @brief A virtual destructor. It is made out of line for RTTI to
- * work correctly on some platforms.
- */
- virtual ~I420Blob();
-
- /**
- * @brief A copy constructor
- */
- I420Blob(const I420Blob& blob) = default;
-
- /**
- * @brief A copy assignment operator
- */
- I420Blob& operator=(const I420Blob& blob) = default;
-
- /**
- * @brief A move constructor
- */
- I420Blob(I420Blob&& blob) = default;
-
- /**
- * @brief A move assignment operator
- */
- I420Blob& operator=(I420Blob&& blob) = default;
-
- /**
* @brief Returns a reference to shared pointer to Y plane
*
* Please note that reference to Blob::Ptr is returned. I.e. the reference will be valid until
Blob::Ptr createROI(const ROI& roi) const override;
};
+
+/**
+ * @brief This class represents a blob that contains other blobs - one per batch
+ * @details Plugin which supports BatchedBlob input should report BATCHED_BLOB
+ * in the OPTIMIZATION_CAPABILITIES metric.
+ */
+class INFERENCE_ENGINE_API_CLASS(BatchedBlob) : public CompoundBlob {
+ public:
+ /**
+ * @brief A smart pointer to the BatchedBlob object
+ */
+ using Ptr = std::shared_ptr<BatchedBlob>;
+
+ /**
+ * @brief A smart pointer to the const BatchedBlob object
+ */
+ using CPtr = std::shared_ptr<const BatchedBlob>;
+
+ /**
+ * @brief Constructs a batched blob from a vector of blobs
+ * @details All passed blobs should meet following requirements:
+ * - all blobs have equal tensor descriptors,
+ * - blobs layouts should be one of: NCHW, NHWC, NCDHW, NDHWC, NC, CN, C, CHW
+ * - batch dimensions should be equal to 1 or not defined (C, CHW).
+ * Resulting blob's tensor descriptor is constructed using tensor descriptors
+ * of passed blobs by setting batch dimension to blobs.size()
+ *
+ * @param blobs A vector of blobs that is copied to this object
+ */
+ explicit BatchedBlob(const std::vector<Blob::Ptr>& blobs);
+
+ /**
+ * @brief Constructs a batched blob from a vector of blobs
+ * @details All passed blobs should meet following requirements:
+ * - all blobs have equal tensor descriptors,
+ * - blobs layouts should be one of: NCHW, NHWC, NCDHW, NDHWC, NC, CN, C, CHW
+ * - batch dimensions should be equal to 1 or not defined (C, CHW).
+ * Resulting blob's tensor descriptor is constructed using tensor descriptors
+ * of passed blobs by setting batch dimension to blobs.size()
+ *
+ * @param blobs A vector of blobs that is moved to this object
+ */
+ explicit BatchedBlob(std::vector<Blob::Ptr>&& blobs);
+};
} // namespace InferenceEngine
* - "INT8" - device can support models with INT8 layers
* - "BIN" - device can support models with BIN layers
* - "WINOGRAD" - device can support models where convolution implemented via Winograd transformations
+ * - "BATCHED_BLOB" - device can support BatchedBlob
*/
DECLARE_METRIC_KEY(OPTIMIZATION_CAPABILITIES, std::vector<std::string>);
DECLARE_METRIC_VALUE(INT8);
DECLARE_METRIC_VALUE(BIN);
DECLARE_METRIC_VALUE(WINOGRAD);
+DECLARE_METRIC_VALUE(BATCHED_BLOB);
/**
* @brief Metric to provide information about a range for streams on platforms where streams are supported.
#include <samples/common.hpp>
#include <samples/classification_results.h>
+#include <samples/slog.hpp>
+
+#include <sys/stat.h>
+#ifdef _WIN32
+#include <os/windows/w_dirent.h>
+#else
+#include <dirent.h>
+#endif
+
using namespace InferenceEngine;
return {width, height};
}
+// Comparing to samples/args_helper.hpp, this version filters files by ".yuv" extension
/**
- * \brief Read image data from file
- * @return buffer containing the image data
- */
-std::unique_ptr<unsigned char[]> readImageDataFromFile(const std::string& image_path, size_t size) {
- std::ifstream file(image_path, std::ios_base::ate | std::ios_base::binary);
- if (!file.good() || !file.is_open()) {
- std::stringstream err;
- err << "Cannot access input image file. File path: " << image_path;
- throw std::runtime_error(err.str());
+* @brief This function checks input args and existence of specified files in a given folder
+* @param path path to a file to be checked for existence
+* @return files updated vector of verified input files
+*/
+std::vector<std::string> readInputFileNames(const std::string& path) {
+ struct stat sb;
+ if (stat(path.c_str(), &sb) != 0) {
+ slog::warn << "File " << path << " cannot be opened!" << slog::endl;
+ return {};
}
- const size_t file_size = file.tellg();
- if (file_size < size) {
- std::stringstream err;
- err << "Invalid read size provided. File size: " << file_size << ", to read: " << size;
- throw std::runtime_error(err.str());
+ std::vector<std::string> files;
+
+ if (S_ISDIR(sb.st_mode)) {
+ DIR *dp = opendir(path.c_str());
+ if (dp == nullptr) {
+ slog::warn << "Directory " << path << " cannot be opened!" << slog::endl;
+ return {};
+ }
+
+ for (struct dirent* ep = readdir(dp); ep != nullptr; ep = readdir(dp)) {
+ std::string fileName = ep->d_name;
+ if (fileName == "." || fileName == ".." || fileName.substr(fileName.size() - 4) != ".yuv") continue;
+ files.push_back(path + "/" + ep->d_name);
+ }
+ closedir(dp);
+ } else {
+ files.push_back(path);
+ }
+
+ if (files.size() < 20) {
+ slog::info << "Files were added: " << files.size() << slog::endl;
+ for (std::string filePath : files) {
+ slog::info << " " << filePath << slog::endl;
+ }
+ } else {
+ slog::info << "Files were added: " << files.size() << ". Too many to display each of them." << slog::endl;
}
- file.seekg(0);
- std::unique_ptr<unsigned char[]> data(new unsigned char[size]);
- file.read(reinterpret_cast<char*>(data.get()), size);
- return data;
+ return files;
+}
+
+using UString = std::basic_string<uint8_t>;
+
+/**
+ * \brief Read image data from file
+ * @return buffers containing the images data
+ */
+std::vector<UString> readImagesDataFromFiles(const std::vector<std::string>& files, size_t size) {
+ std::vector<UString> result;
+
+ for (const auto& image_path : files) {
+ std::ifstream file(image_path, std::ios_base::ate | std::ios_base::binary);
+ if (!file.good() || !file.is_open()) {
+ std::stringstream err;
+ err << "Cannot access input image file. File path: " << image_path;
+ throw std::runtime_error(err.str());
+ }
+
+ const size_t file_size = file.tellg();
+ if (file_size < size) {
+ std::stringstream err;
+ err << "Invalid read size provided. File size: " << file_size << ", to read: " << size;
+ throw std::runtime_error(err.str());
+ }
+ file.seekg(0);
+
+ UString data(size, 0);
+ file.read(reinterpret_cast<char*>(&data[0]), size);
+ result.push_back(std::move(data));
+ }
+ return result;
}
/**
network.reshape(inputShapes);
}
+std::vector<Blob::Ptr> readInputBlobs(std::vector<UString>& data, size_t width, size_t height) {
+ // read image with size converted to NV12 data size: height(NV12) = 3 / 2 * logical height
+
+ // Create tensor descriptors for Y and UV blobs
+ const InferenceEngine::TensorDesc y_plane_desc(InferenceEngine::Precision::U8, {1, 1, height, width},
+ InferenceEngine::Layout::NHWC);
+ const InferenceEngine::TensorDesc uv_plane_desc(InferenceEngine::Precision::U8, {1, 2, height / 2, width / 2},
+ InferenceEngine::Layout::NHWC);
+ const size_t offset = width * height;
+
+ std::vector<Blob::Ptr> blobs;
+ for (auto& buf : data) {
+ // --------------------------- Create a blob to hold the NV12 input data -------------------------------
+ auto ptr = &buf[0];
+
+ // Create blob for Y plane from raw data
+ Blob::Ptr y_blob = make_shared_blob<uint8_t>(y_plane_desc, ptr);
+ // Create blob for UV plane from raw data
+ Blob::Ptr uv_blob = make_shared_blob<uint8_t>(uv_plane_desc, ptr + offset);
+ // Create NV12Blob from Y and UV blobs
+ blobs.emplace_back(make_shared_blob<NV12Blob>(y_blob, uv_blob));
+ }
+
+ return blobs;
+}
+
+bool isBatchedBlobSupported(const Core& ie, const std::string& device_name) {
+ const std::vector<std::string> supported_metrics =
+ ie.GetMetric(device_name, METRIC_KEY(SUPPORTED_METRICS));
+
+ if (std::find(supported_metrics.begin(), supported_metrics.end(),
+ METRIC_KEY(OPTIMIZATION_CAPABILITIES)) ==
+ supported_metrics.end()) {
+ return false;
+ }
+
+ const std::vector<std::string> optimization_caps =
+ ie.GetMetric(device_name, METRIC_KEY(OPTIMIZATION_CAPABILITIES));
+
+ return std::find(optimization_caps.begin(), optimization_caps.end(),
+ METRIC_VALUE(BATCHED_BLOB)) != optimization_caps.end();
+}
+
/**
* @brief The entry point of the Inference Engine sample application
*/
try {
// ------------------------------ Parsing and validatiing input arguments------------------------------
if (argc != 5) {
- std::cout << "Usage : ./hello_nv12_input_classification <path_to_model> <path_to_image> <image_size> <device_name>"
+ std::cout << "Usage : " << argv[0] << " <path_to_model> <path_to_image(s)> <image_size> <device_name>"
<< std::endl;
return EXIT_FAILURE;
}
const std::string device_name{argv[4]};
// -----------------------------------------------------------------------------------------------------
+ // --------------------------- 0. Read image names -----------------------------------------------------
+ auto image_names = readInputFileNames(input_image_path);
+
+ if (image_names.empty()) {
+ throw std::invalid_argument("images not found");
+ }
+ // -----------------------------------------------------------------------------------------------------
+
// --------------------------- 1. Load inference engine ------------------------------------------------
Core ie;
// -----------------------------------------------------------------------------------------------------
// 2. Read a model in OpenVINO Intermediate Representation (.xml and .bin files) or ONNX (.onnx file) format
CNNNetwork network = ie.ReadNetwork(input_model);
- setBatchSize(network, 1);
+ // -----------------------------------------------------------------------------------------------------
+
+ // --------------------------- 2. Set model batch size -------------------------------------------------
+ size_t batch_size = isBatchedBlobSupported(ie, device_name) ? image_names.size() : 1;
+ std::cout << "Setting network batch size to " << batch_size << std::endl;
+ setBatchSize(network, batch_size);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 3. Configure input and output -------------------------------------------
// -----------------------------------------------------------------------------------------------------
// --------------------------- 6. Prepare input --------------------------------------------------------
- // read image with size converted to NV12 data size: height(NV12) = 3 / 2 * logical height
- auto image_buf = readImageDataFromFile(input_image_path, input_width * (input_height * 3 / 2));
+ auto image_bufs = readImagesDataFromFiles(image_names, input_width * (input_height * 3 / 2));
- // --------------------------- Create a blob to hold the NV12 input data -------------------------------
- // Create tensor descriptors for Y and UV blobs
- InferenceEngine::TensorDesc y_plane_desc(InferenceEngine::Precision::U8,
- {1, 1, input_height, input_width}, InferenceEngine::Layout::NHWC);
- InferenceEngine::TensorDesc uv_plane_desc(InferenceEngine::Precision::U8,
- {1, 2, input_height / 2, input_width / 2}, InferenceEngine::Layout::NHWC);
- const size_t offset = input_width * input_height;
+ auto inputs = readInputBlobs(image_bufs, input_width, input_height);
- // Create blob for Y plane from raw data
- Blob::Ptr y_blob = make_shared_blob<uint8_t>(y_plane_desc, image_buf.get());
- // Create blob for UV plane from raw data
- Blob::Ptr uv_blob = make_shared_blob<uint8_t>(uv_plane_desc, image_buf.get() + offset);
- // Create NV12Blob from Y and UV blobs
- Blob::Ptr input = make_shared_blob<NV12Blob>(y_blob, uv_blob);
+ // If batch_size > 1 => batched blob supported => replace all inputs by a BatchedBlob
+ if (batch_size > 1) {
+ assert(batch_size == inputs.size());
+ std::cout << "Infer using BatchedBlob of NV12 images." << std::endl;
+ Blob::Ptr batched_input = make_shared_blob<BatchedBlob>(inputs);
+ inputs = {batched_input};
+ }
- // --------------------------- Set the input blob to the InferRequest ----------------------------------
- infer_request.SetBlob(input_name, input);
- // -----------------------------------------------------------------------------------------------------
+ /** Read labels from file (e.x. AlexNet.labels) **/
+ std::string labelFileName = fileNameNoExt(input_model) + ".labels";
+ std::vector<std::string> labels;
- // --------------------------- 7. Do inference ---------------------------------------------------------
- /* Running the request synchronously */
- infer_request.Infer();
- // -----------------------------------------------------------------------------------------------------
+ std::ifstream inputFile;
+ inputFile.open(labelFileName, std::ios::in);
+ if (inputFile.is_open()) {
+ std::string strLine;
+ while (std::getline(inputFile, strLine)) {
+ trim(strLine);
+ labels.push_back(strLine);
+ }
+ }
- // --------------------------- 8. Process output -------------------------------------------------------
- Blob::Ptr output = infer_request.GetBlob(output_name);
+ for (size_t i = 0; i < inputs.size(); i++) {
+ const auto& input = inputs[i];
+ // --------------------------- Set the input blob to the InferRequest ------------------------------
+ infer_request.SetBlob(input_name, input);
+ // -------------------------------------------------------------------------------------------------
- // Print classification results
- ClassificationResult classificationResult(output, {input_image_path});
- classificationResult.print();
- // -----------------------------------------------------------------------------------------------------
+ // --------------------------- 7. Do inference -----------------------------------------------------
+ /* Running the request synchronously */
+ infer_request.Infer();
+ // -------------------------------------------------------------------------------------------------
+
+ // --------------------------- 8. Process output ---------------------------------------------------
+ Blob::Ptr output = infer_request.GetBlob(output_name);
+
+ // Print classification results
+ const auto names_offset = image_names.begin() + batch_size * i;
+ std::vector<std::string> names(names_offset, names_offset + batch_size);
+
+ ClassificationResult classificationResult(output, names, batch_size, 10, labels);
+ classificationResult.print();
+ // -------------------------------------------------------------------------------------------------
+ }
} catch (const std::exception & ex) {
std::cerr << ex.what() << std::endl;
return EXIT_FAILURE;
namespace {
-void verifyNV12BlobInput(const Blob::Ptr& y, const Blob::Ptr& uv) {
+TensorDesc verifyNV12BlobInput(const Blob::Ptr& y, const Blob::Ptr& uv) {
// Y and UV must be valid pointers
if (y == nullptr || uv == nullptr) {
THROW_IE_EXCEPTION << "Y and UV planes must be valid Blob objects";
THROW_IE_EXCEPTION << "The width of the Y plane must be equal to (2 * the width of the UV plane), actual: "
<< yDims[3] << "(Y plane) and " << uvDims[3] << "(UV plane)";
}
+
+ return {Precision::U8, {}, Layout::NCHW};
}
-void verifyI420BlobInput(const Blob::Ptr& y, const Blob::Ptr& u, const Blob::Ptr& v) {
+TensorDesc verifyI420BlobInput(const Blob::Ptr& y, const Blob::Ptr& u, const Blob::Ptr& v) {
// Y and UV must be valid pointers
if (y == nullptr || u == nullptr || v == nullptr) {
THROW_IE_EXCEPTION << "Y, U and V planes must be valid Blob objects";
THROW_IE_EXCEPTION << "The width of the Y plane must be equal to (2 * the width of the UV plane), actual: "
<< yDims[3] << "(Y plane) and " << vDims[3] << "(V plane)";
}
+
+ return {Precision::U8, {}, Layout::NCHW};
}
-} // anonymous namespace
+TensorDesc getBlobTensorDesc(const Blob::Ptr& blob) {
+ if (auto nv12 = dynamic_cast<NV12Blob*>(blob.get())) {
+ auto yDesc = nv12->y()->getTensorDesc();
+ yDesc.getDims()[1] += 2;
+ return yDesc;
+ }
-CompoundBlob::CompoundBlob(): Blob(TensorDesc(Precision::UNSPECIFIED, {}, Layout::ANY)) {}
+ if (auto i420 = dynamic_cast<I420Blob*>(blob.get())) {
+ auto yDesc = i420->y()->getTensorDesc();
+ yDesc.getDims()[1] += 2;
+ return yDesc;
+ }
-CompoundBlob::CompoundBlob(const CompoundBlob& blob): CompoundBlob() {
- this->_blobs = blob._blobs;
+ return blob->getTensorDesc();
}
-CompoundBlob::CompoundBlob(CompoundBlob&& blob): CompoundBlob() {
- this->_blobs = std::move(blob._blobs);
+TensorDesc verifyBatchedBlobInput(const std::vector<Blob::Ptr>& blobs) {
+ // verify invariants
+ if (blobs.empty()) {
+ THROW_IE_EXCEPTION << "BatchedBlob cannot be created from empty vector of Blob, Please, make sure vector contains at least one Blob";
+ }
+
+ // Cannot create a compound blob from nullptr Blob objects
+ if (std::any_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) {
+ return blob == nullptr;
+ })) {
+ THROW_IE_EXCEPTION << "Cannot create a compound blob from nullptr Blob objects";
+ }
+
+ const auto subBlobDesc = getBlobTensorDesc(blobs[0]);
+
+ if (std::any_of(blobs.begin(), blobs.end(),
+ [&subBlobDesc](const Blob::Ptr& blob) {
+ return getBlobTensorDesc(blob) != subBlobDesc;
+ })) {
+ THROW_IE_EXCEPTION << "All blobs tensors should be equal";
+ }
+
+ auto subBlobLayout = subBlobDesc.getLayout();
+
+ auto blobLayout = Layout::ANY;
+ SizeVector blobDims = subBlobDesc.getDims();
+ switch (subBlobLayout) {
+ case NCHW:
+ case NHWC:
+ case NCDHW:
+ case NDHWC:
+ case NC:
+ case CN:
+ blobLayout = subBlobLayout;
+ if (blobDims[0] != 1) {
+ THROW_IE_EXCEPTION << "All blobs should be batch 1";
+ }
+ blobDims[0] = blobs.size();
+ break;
+ case C:
+ blobLayout = NC;
+ blobDims.insert(blobDims.begin(), blobs.size());
+ break;
+ case CHW:
+ blobLayout = NCHW;
+ blobDims.insert(blobDims.begin(), blobs.size());
+ break;
+ default:
+ THROW_IE_EXCEPTION << "Unsupported sub-blobs layout - to be one of: [NCHW, NHWC, NCDHW, NDHWC, NC, CN, C, CHW]";
+ }
+
+ return TensorDesc{subBlobDesc.getPrecision(), blobDims, blobLayout};
}
-CompoundBlob::CompoundBlob(const std::vector<Blob::Ptr>& blobs): CompoundBlob() {
+} // anonymous namespace
+
+CompoundBlob::CompoundBlob(const TensorDesc& tensorDesc): Blob(tensorDesc) {}
+
+CompoundBlob::CompoundBlob(const std::vector<Blob::Ptr>& blobs): CompoundBlob(TensorDesc{}) {
// Cannot create a compound blob from nullptr Blob objects
if (std::any_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) {
return blob == nullptr;
this->_blobs = blobs;
}
-CompoundBlob::CompoundBlob(std::vector<Blob::Ptr>&& blobs): CompoundBlob() {
+CompoundBlob::CompoundBlob(std::vector<Blob::Ptr>&& blobs): CompoundBlob(TensorDesc{}) {
// Cannot create a compound blob from nullptr Blob objects
if (std::any_of(blobs.begin(), blobs.end(), [](const Blob::Ptr& blob) {
return blob == nullptr;
return nullptr;
}
-NV12Blob::NV12Blob(const Blob::Ptr& y, const Blob::Ptr& uv) {
- // verify data is correct
- verifyNV12BlobInput(y, uv);
- // set blobs
- _blobs.emplace_back(y);
- _blobs.emplace_back(uv);
- tensorDesc = TensorDesc(Precision::U8, {}, Layout::NCHW);
+NV12Blob::NV12Blob(const Blob::Ptr& y, const Blob::Ptr& uv)
+ : CompoundBlob(verifyNV12BlobInput(y, uv)) {
+ this->_blobs = {y, uv};
}
-NV12Blob::NV12Blob(Blob::Ptr&& y, Blob::Ptr&& uv) {
- // verify data is correct
- verifyNV12BlobInput(y, uv);
- // set blobs
- _blobs.emplace_back(std::move(y));
- _blobs.emplace_back(std::move(uv));
- tensorDesc = TensorDesc(Precision::U8, {}, Layout::NCHW);
+NV12Blob::NV12Blob(Blob::Ptr&& y, Blob::Ptr&& uv)
+ : CompoundBlob(verifyNV12BlobInput(y, uv)) {
+ this->_blobs = {std::move(y), std::move(uv)};
}
Blob::Ptr& NV12Blob::y() noexcept {
return std::make_shared<NV12Blob>(yRoiBlob, uvRoiBlob);
}
-I420Blob::I420Blob(const Blob::Ptr& y, const Blob::Ptr& u, const Blob::Ptr& v) {
- // verify data is correct
- verifyI420BlobInput(y, u, v);
- // set blobs
- _blobs.emplace_back(y);
- _blobs.emplace_back(u);
- _blobs.emplace_back(v);
- tensorDesc = TensorDesc(Precision::U8, {}, Layout::NCHW);
+I420Blob::I420Blob(const Blob::Ptr& y, const Blob::Ptr& u, const Blob::Ptr& v)
+ : CompoundBlob(verifyI420BlobInput(y, u, v)) {
+ this->_blobs = {y, u, v};
}
-I420Blob::I420Blob(Blob::Ptr&& y, Blob::Ptr&& u, Blob::Ptr&& v) {
- // verify data is correct
- verifyI420BlobInput(y, u, v);
- // set blobs
- _blobs.emplace_back(std::move(y));
- _blobs.emplace_back(std::move(u));
- _blobs.emplace_back(std::move(v));
- tensorDesc = TensorDesc(Precision::U8, {}, Layout::NCHW);
+I420Blob::I420Blob(Blob::Ptr&& y, Blob::Ptr&& u, Blob::Ptr&& v)
+ : CompoundBlob(verifyI420BlobInput(y, u, v)) {
+ this->_blobs = {std::move(y), std::move(u), std::move(v)};
}
-I420Blob::~I420Blob() {}
-
Blob::Ptr& I420Blob::y() noexcept {
// NOTE: Y plane is a memory blob, which is checked in the constructor
return _blobs[0];
return std::make_shared<I420Blob>(yRoiBlob, uRoiBlob, vRoiBlob);
}
+BatchedBlob::BatchedBlob(const std::vector<Blob::Ptr>& blobs)
+ : CompoundBlob(verifyBatchedBlobInput(blobs)) {
+ this->_blobs = blobs;
+}
+
+BatchedBlob::BatchedBlob(std::vector<Blob::Ptr>&& blobs)
+ : CompoundBlob(verifyBatchedBlobInput(blobs)) {
+ this->_blobs = std::move(blobs);
+}
+
} // namespace InferenceEngine
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior/set_blob_of_kind.hpp"
+#include "common_test_utils/test_constants.hpp"
+#include "multi-device/multi_device_config.hpp"
+
+using namespace BehaviorTestsDefinitions;
+using namespace InferenceEngine;
+
+const std::vector<FuncTestUtils::BlobKind> blobKinds = {
+ FuncTestUtils::BlobKind::Simple,
+ FuncTestUtils::BlobKind::Compound,
+ FuncTestUtils::BlobKind::BatchOfSimple
+};
+
+const SetBlobOfKindConfig cpuConfig{}; //nothing special
+const SetBlobOfKindConfig multiConfig{{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}};
+const SetBlobOfKindConfig heteroConfig{{ "TARGET_FALLBACK", CommonTestUtils::DEVICE_CPU }};
+
+INSTANTIATE_TEST_CASE_P(smoke_SetBlobOfKindCPU, SetBlobOfKindTest,
+ ::testing::Combine(::testing::ValuesIn(blobKinds),
+ ::testing::Values(CommonTestUtils::DEVICE_CPU),
+ ::testing::Values(cpuConfig)),
+ SetBlobOfKindTest::getTestCaseName);
+
+
+INSTANTIATE_TEST_CASE_P(smoke_SetBlobOfKindMULTI, SetBlobOfKindTest,
+ ::testing::Combine(::testing::ValuesIn(blobKinds),
+ ::testing::Values(CommonTestUtils::DEVICE_MULTI),
+ ::testing::Values(multiConfig)),
+ SetBlobOfKindTest::getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(smoke_SetBlobOfKindHETERO, SetBlobOfKindTest,
+ ::testing::Combine(::testing::ValuesIn(blobKinds),
+ ::testing::Values(CommonTestUtils::DEVICE_HETERO),
+ ::testing::Values(heteroConfig)),
+ SetBlobOfKindTest::getTestCaseName);
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior/set_blob_of_kind.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace BehaviorTestsDefinitions;
+using namespace InferenceEngine;
+
+const std::vector<FuncTestUtils::BlobKind> blobKinds = {
+ FuncTestUtils::BlobKind::Simple,
+ FuncTestUtils::BlobKind::Compound,
+ FuncTestUtils::BlobKind::BatchOfSimple
+};
+
+const SetBlobOfKindConfig gpuConfig{}; //nothing special
+
+INSTANTIATE_TEST_CASE_P(smoke_SetBlobOfKindGPU, SetBlobOfKindTest,
+ ::testing::Combine(::testing::ValuesIn(blobKinds),
+ ::testing::Values(CommonTestUtils::DEVICE_GPU),
+ ::testing::Values(gpuConfig)),
+ SetBlobOfKindTest::getTestCaseName);
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "functional_test_utils/layer_test_utils.hpp"
+#include "common_test_utils/common_utils.hpp"
+
+namespace BehaviorTestsDefinitions {
+
+using SetBlobOfKindConfig = std::remove_reference<decltype(((LayerTestsUtils::LayerTestsCommon*)0)->GetConfiguration())>::type;
+
+using SetBlobOfKindParams = std::tuple<FuncTestUtils::BlobKind, // The kind of blob
+ std::string, // Device name
+ SetBlobOfKindConfig>; // configuration
+
+class SetBlobOfKindTest : public testing::WithParamInterface<SetBlobOfKindParams>, virtual public LayerTestsUtils::LayerTestsCommon {
+public:
+ InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override;
+ void Run() override;
+ static std::string getTestCaseName(testing::TestParamInfo<SetBlobOfKindParams> obj);
+ void ExpectSetBlobThrow();
+
+protected:
+ void SetUp() override;
+
+private:
+ FuncTestUtils::BlobKind blobKind;
+};
+
+} // namespace BehaviorTestsDefinitions
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "behavior/set_blob_of_kind.hpp"
+
+#include <single_layer_tests/cum_sum.hpp>
+#include <functional_test_utils/plugin_config.hpp>
+
+#include <ie_compound_blob.h>
+
+using namespace InferenceEngine;
+
+namespace BehaviorTestsDefinitions {
+
+std::string SetBlobOfKindTest::getTestCaseName(testing::TestParamInfo<SetBlobOfKindParams> obj) {
+ FuncTestUtils::BlobKind blobKind;
+ std::string targetDevice;
+ std::map<std::string, std::string> configuration;
+ std::tie(blobKind, targetDevice, configuration) = obj.param;
+
+ std::ostringstream result;
+ result << "Kind=" << blobKind;
+ result << " Device="<< targetDevice;
+ return result.str();
+}
+
+namespace {
+
+bool isBatchedBlobSupported(const std::shared_ptr<Core>& core, const LayerTestsUtils::TargetDevice& targetDevice) {
+ const std::vector<std::string> supported_metrics = core->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_METRICS));
+
+ if (std::find(supported_metrics.begin(), supported_metrics.end(),
+ METRIC_KEY(OPTIMIZATION_CAPABILITIES)) == supported_metrics.end()) {
+ return false;
+ }
+
+ const std::vector<std::string> optimization_caps =
+ core->GetMetric(targetDevice, METRIC_KEY(OPTIMIZATION_CAPABILITIES));
+
+ return std::find(optimization_caps.begin(), optimization_caps.end(),
+ METRIC_VALUE(BATCHED_BLOB)) != optimization_caps.end();
+}
+
+bool isBlobKindSupported(const std::shared_ptr<Core>& core,
+ const LayerTestsUtils::TargetDevice& targetDevice,
+ FuncTestUtils::BlobKind blobKind) {
+ switch (blobKind) {
+ case FuncTestUtils::BlobKind::Simple:
+ return true;
+ case FuncTestUtils::BlobKind::Compound:
+ return false;
+ case FuncTestUtils::BlobKind::BatchOfSimple:
+ return isBatchedBlobSupported(core, targetDevice);
+ default:
+ THROW_IE_EXCEPTION << "Test does not support the blob kind";
+ }
+}
+
+} // namespace
+
+Blob::Ptr SetBlobOfKindTest::GenerateInput(const InferenceEngine::InputInfo& info) const {
+ return makeBlobOfKind(info.getTensorDesc(), blobKind);
+}
+
+void SetBlobOfKindTest::Run() {
+ SKIP_IF_CURRENT_TEST_IS_DISABLED()
+
+ LoadNetwork();
+
+ if (isBlobKindSupported(core, targetDevice, blobKind)) {
+ Infer();
+ } else {
+ ExpectSetBlobThrow();
+ }
+}
+
+void SetBlobOfKindTest::ExpectSetBlobThrow() {
+ inferRequest = executableNetwork.CreateInferRequest();
+
+ for (const auto &input : executableNetwork.GetInputsInfo()) {
+ const auto &info = input.second;
+ auto blob = GenerateInput(*info);
+ EXPECT_THROW(inferRequest.SetBlob(info->name(), blob),
+ InferenceEngine::details::InferenceEngineException);
+ }
+}
+
+void SetBlobOfKindTest::SetUp() {
+ SizeVector IS{4, 3, 6, 8};
+ std::tie(blobKind, targetDevice, configuration) = this->GetParam();
+
+ auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(Precision::FP32);
+ auto params = ngraph::builder::makeParams(ngPrc, {IS});
+ auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
+ auto axisNode = std::make_shared<ngraph::op::Constant>(ngraph::element::Type_t::i64, ngraph::Shape{}, std::vector<int64_t>{-1})->output(0);
+ auto cumSum = std::dynamic_pointer_cast<ngraph::opset4::CumSum>(ngraph::builder::makeCumSum(paramOuts[0], axisNode, false, false));
+ ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(cumSum)};
+ function = std::make_shared<ngraph::Function>(results, params, "InferSetBlob");
+}
+
+TEST_P(SetBlobOfKindTest, CompareWithRefs) {
+ Run();
+}
+
+} // namespace BehaviorTestsDefinitions
#include <gtest/gtest.h>
#include "blob_factory.hpp"
#include "blob_transform.hpp"
+#include "ie_compound_blob.h"
#include "precision_utils.h"
#include "common_test_utils/data_utils.hpp"
#include "common_test_utils/test_constants.hpp"
return s;
}
} // namespace Bf16TestUtils
+
+enum class BlobKind {
+ Simple,
+ Compound,
+ BatchOfSimple
+};
+
+inline std::ostream& operator<<(std::ostream& os, BlobKind kind) {
+ switch (kind) {
+ case BlobKind::Simple:
+ return os << "Simple";
+ case BlobKind::Compound:
+ return os << "Compound";
+ case BlobKind::BatchOfSimple:
+ return os << "BatchOfSimple";
+ default:
+ THROW_IE_EXCEPTION << "Test does not support the blob kind";
+ }
+}
+
+inline InferenceEngine::Blob::Ptr makeBlobOfKind(const InferenceEngine::TensorDesc& td, BlobKind blobKind) {
+ using namespace ::InferenceEngine;
+ switch (blobKind) {
+ case BlobKind::Simple:
+ return createAndFillBlob(td);
+ case BlobKind::Compound:
+ return make_shared_blob<CompoundBlob>(std::vector<Blob::Ptr>{});
+ case BlobKind::BatchOfSimple: {
+ const auto subBlobsNum = td.getDims()[0];
+ auto subBlobDesc = td;
+ subBlobDesc.getDims()[0] = 1;
+ std::vector<Blob::Ptr> subBlobs;
+ for (size_t i = 0; i < subBlobsNum; i++) {
+ subBlobs.push_back(makeBlobOfKind(subBlobDesc, BlobKind::Simple));
+ }
+ return make_shared_blob<BatchedBlob>(subBlobs);
+ }
+ default:
+ THROW_IE_EXCEPTION << "Test does not support the blob kind";
+ }
+}
+
} // namespace FuncTestUtils