From 32d7959b925388aa6e3c76316b41a05f0e73e15d Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Tue, 14 Jul 2020 10:27:10 +0300 Subject: [PATCH] Added U32 precision (#1297) --- inference-engine/ie_bridges/c/docs/api_overview.md | 2 ++ .../ie_bridges/c/include/c_api/ie_c_api.h | 1 + inference-engine/ie_bridges/c/src/ie_c_api.cpp | 6 +++++ .../src/openvino/inference_engine/constants.pyx | 2 +- .../src/openvino/inference_engine/ie_api.pyx | 3 ++- .../src/openvino/inference_engine/ie_api_impl.cpp | 5 ++-- inference-engine/include/ie_blob.h | 10 ++++---- inference-engine/include/ie_precision.hpp | 25 ++++++++++++-------- .../common/samples/classification_results.h | 1 + .../src/cldnn_engine/cldnn_program.cpp | 5 ++++ .../src/gna_plugin/gna_executable_network.hpp | 1 + .../src/inference_engine/blob_transform.cpp | 2 ++ inference-engine/src/inference_engine/ie_rtti.cpp | 10 ++++---- .../src/legacy_api/include/ie_ngraph_utils.hpp | 12 ++++++---- .../legacy_api/src/ie_cnn_layer_builder_ngraph.cpp | 9 +++++--- inference-engine/src/legacy_api/src/net_pass.cpp | 3 +++ .../src/network_helper.cpp | 4 ++++ .../src/mkldnn_plugin/mkldnn_exec_network.cpp | 1 + inference-engine/src/plugin_api/blob_factory.hpp | 27 +++++++++++----------- .../graph_transformer/src/frontend/frontend.cpp | 1 + .../functional/inference_engine/blob_copy_test.cpp | 27 ++++++++++++++++++---- .../inference_engine/ie_precision_test.cpp | 4 ++++ .../layer_transformation.cpp | 2 ++ .../util_const_infer_test.cpp | 7 ++++++ ngraph/test/runtime/ie/ie_executable.cpp | 1 + ngraph/test/util/engine/ie_engines.cpp | 3 +++ 26 files changed, 129 insertions(+), 45 deletions(-) diff --git a/inference-engine/ie_bridges/c/docs/api_overview.md b/inference-engine/ie_bridges/c/docs/api_overview.md index 67b3b11..f41d777 100644 --- a/inference-engine/ie_bridges/c/docs/api_overview.md +++ b/inference-engine/ie_bridges/c/docs/api_overview.md @@ -136,6 +136,8 @@ enum precision_e{ ​ U64 = 73, /**< 64bit unsigned integer value */ +​ U32 = 74, /**< 32bit unsigned integer value */ + ​ BIN = 71, /**< 1bit integer value */ ​ CUSTOM = 80 /**< custom precision has it's own name and size of elements */ diff --git a/inference-engine/ie_bridges/c/include/c_api/ie_c_api.h b/inference-engine/ie_bridges/c/include/c_api/ie_c_api.h index f124978..a7caa65 100644 --- a/inference-engine/ie_bridges/c/include/c_api/ie_c_api.h +++ b/inference-engine/ie_bridges/c/include/c_api/ie_c_api.h @@ -182,6 +182,7 @@ typedef enum { I32 = 70, /**< 32bit signed integer value */ I64 = 72, /**< 64bit signed integer value */ U64 = 73, /**< 64bit unsigned integer value */ + U32 = 74, /**< 32bit unsigned integer value */ BIN = 71, /**< 1bit integer value */ CUSTOM = 80 /**< custom precision has it's own name and size of elements */ }precision_e; diff --git a/inference-engine/ie_bridges/c/src/ie_c_api.cpp b/inference-engine/ie_bridges/c/src/ie_c_api.cpp index d814c32..ba87c7c 100644 --- a/inference-engine/ie_bridges/c/src/ie_c_api.cpp +++ b/inference-engine/ie_bridges/c/src/ie_c_api.cpp @@ -86,6 +86,7 @@ std::map precision_map = {{IE::Precision::UNSPECIFIE {IE::Precision::I8, precision_e::I8}, {IE::Precision::U16, precision_e::U16}, {IE::Precision::I32, precision_e::I32}, + {IE::Precision::U32, precision_e::U32}, {IE::Precision::I64, precision_e::I64}, {IE::Precision::U64, precision_e::U64}, {IE::Precision::BIN, precision_e::BIN}, @@ -1424,6 +1425,8 @@ IEStatusCode ie_blob_make_memory(const tensor_desc_t *tensorDesc, ie_blob_t **bl _blob->object = IE::make_shared_blob(tensor); } else if (prec == IE::Precision::I32) { _blob->object = IE::make_shared_blob(tensor); + } else if (prec == IE::Precision::U32) { + _blob->object = IE::make_shared_blob(tensor); } else if (prec == IE::Precision::I64) { _blob->object = IE::make_shared_blob(tensor); } else if (prec == IE::Precision::U64) { @@ -1490,6 +1493,9 @@ IEStatusCode ie_blob_make_memory_from_preallocated(const tensor_desc_t *tensorDe } else if (prec == IE::Precision::I32) { int32_t *p = reinterpret_cast(ptr); _blob->object = IE::make_shared_blob(tensor, p, size); + } else if (prec == IE::Precision::U32) { + uint32_t *p = reinterpret_cast(ptr); + _blob->object = IE::make_shared_blob(tensor, p, size); } else if (prec == IE::Precision::I64) { int64_t *p = reinterpret_cast(ptr); _blob->object = IE::make_shared_blob(tensor, p, size); diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/constants.pyx b/inference-engine/ie_bridges/python/src/openvino/inference_engine/constants.pyx index 305a692..2251033 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/constants.pyx +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/constants.pyx @@ -18,7 +18,7 @@ from .cimport ie_api_impl_defs as C import numpy as np from enum import Enum -supported_precisions = ["FP32", "FP16", "I64", "U64", "I32", "I16", "I8", "U16", "U8"] +supported_precisions = ["FP32", "FP16", "I64", "U64", "I32", "U32", "I16", "I8", "U16", "U8"] known_plugins = ['CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL', 'MULTI'] diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx index 98a0af3..9fa91a0 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx @@ -1687,8 +1687,9 @@ cdef class BlobBuffer: 'I8': 'b', # signed char 'I16': 'h', # signed short 'I32': 'i', # signed int + 'U32': 'I', # unsigned int 'I64': 'q', # signed long int - 'U64': 'Q', # signed long int + 'U64': 'Q', # unsigned long int } if name not in precision_to_format: raise ValueError("Unknown Blob precision: {}".format(name)) diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp index a417b31..00b0eb7 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp @@ -14,9 +14,10 @@ std::map precision_map = {{"FP32", Inf {"I16", InferenceEngine::Precision::I16}, {"I32", InferenceEngine::Precision::I32}, {"I64", InferenceEngine::Precision::I64}, - {"U64", InferenceEngine::Precision::U64}, + {"U8", InferenceEngine::Precision::U8}, {"U16", InferenceEngine::Precision::U16}, - {"U8", InferenceEngine::Precision::U8}}; + {"U32", InferenceEngine::Precision::U32}, + {"U64", InferenceEngine::Precision::U64}}; std::map layout_map = {{"ANY", InferenceEngine::Layout::ANY}, {"NCHW", InferenceEngine::Layout::NCHW}, diff --git a/inference-engine/include/ie_blob.h b/inference-engine/include/ie_blob.h index a6a17c4..205edde 100644 --- a/inference-engine/include/ie_blob.h +++ b/inference-engine/include/ie_blob.h @@ -761,14 +761,16 @@ protected: #ifdef __clang__ extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); +extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); +extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); +extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); +extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); -extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); +extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); +extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob); #endif // __clang__ /** diff --git a/inference-engine/include/ie_precision.hpp b/inference-engine/include/ie_precision.hpp index f0a05b5..e7f3cf7 100644 --- a/inference-engine/include/ie_precision.hpp +++ b/inference-engine/include/ie_precision.hpp @@ -34,6 +34,7 @@ public: I8 = 50, /**< 8bit signed integer value */ U16 = 60, /**< 16bit unsigned integer value */ I32 = 70, /**< 32bit signed integer value */ + U32 = 74, /**< 32bit unsigned integer value */ I64 = 72, /**< 64bit signed integer value */ U64 = 73, /**< 64bit unsigned integer value */ BIN = 71, /**< 1bit integer value */ @@ -108,13 +109,14 @@ public: CASE(FP32, float); CASE2(FP16, int16_t, uint16_t); CASE2(BF16, int16_t, uint16_t); + CASE(I8, int8_t); CASE(I16, int16_t); CASE(I32, int32_t); CASE(I64, int64_t); - CASE(U64, uint64_t); - CASE(U16, uint16_t); CASE(U8, uint8_t); - CASE(I8, int8_t); + CASE(U16, uint16_t); + CASE(U32, uint32_t); + CASE(U64, uint64_t); CASE(BOOL, uint8_t); CASE2(Q78, int16_t, uint16_t); CASE2(BIN, int8_t, uint8_t); @@ -182,10 +184,10 @@ public: static Precision FromStr(const std::string& str) { static std::unordered_map names = { #define PRECISION_NAME(s) {#s, s} - PRECISION_NAME(Q78), PRECISION_NAME(U8), PRECISION_NAME(I8), PRECISION_NAME(I16), - PRECISION_NAME(I32), PRECISION_NAME(I64), PRECISION_NAME(U64), PRECISION_NAME(U16), + PRECISION_NAME(Q78), PRECISION_NAME(BOOL), PRECISION_NAME(BF16), + PRECISION_NAME(I8), PRECISION_NAME(I16), PRECISION_NAME(I32), PRECISION_NAME(I64), + PRECISION_NAME(U8), PRECISION_NAME(U16), PRECISION_NAME(U32), PRECISION_NAME(U64), PRECISION_NAME(FP32), PRECISION_NAME(FP16), PRECISION_NAME(MIXED), PRECISION_NAME(BIN), - PRECISION_NAME(BOOL), PRECISION_NAME(BF16), #undef PRECISION_NAME }; auto i = names.find(str); @@ -263,13 +265,14 @@ protected: CASE(FP32); CASE(FP16); CASE(BF16); + CASE(I8); CASE(I16); CASE(I32); CASE(I64); - CASE(U64); - CASE(U16); CASE(U8); - CASE(I8); + CASE(U16); + CASE(U32); + CASE(U64); CASE(Q78); CASE(MIXED); CASE(BIN); @@ -330,6 +333,10 @@ struct PrecisionTrait { using value_type = int32_t; }; template <> +struct PrecisionTrait { + using value_type = uint32_t; +}; +template <> struct PrecisionTrait { using value_type = int64_t; }; diff --git a/inference-engine/samples/common/samples/classification_results.h b/inference-engine/samples/common/samples/classification_results.h index c685f19..e18eff5 100644 --- a/inference-engine/samples/common/samples/classification_results.h +++ b/inference-engine/samples/common/samples/classification_results.h @@ -103,6 +103,7 @@ private: TBLOB_TOP_RESULT(I8); TBLOB_TOP_RESULT(U16); TBLOB_TOP_RESULT(I32); + TBLOB_TOP_RESULT(U32); TBLOB_TOP_RESULT(U64); TBLOB_TOP_RESULT(I64); default: diff --git a/inference-engine/src/cldnn_engine/cldnn_program.cpp b/inference-engine/src/cldnn_engine/cldnn_program.cpp index 7f6e2a1..1ccef5a 100644 --- a/inference-engine/src/cldnn_engine/cldnn_program.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_program.cpp @@ -4626,6 +4626,11 @@ void Program::CreateCumSumPrimitive(cldnn::topology& topology, InferenceEngine:: axis = data[0]; break; } + case InferenceEngine::Precision::U32: { + auto data = constantBlob->buffer().as(); + axis = static_cast(data[0]); + break; + } case InferenceEngine::Precision::U64: { auto data = constantBlob->buffer().as(); axis = static_cast(data[0]); diff --git a/inference-engine/src/gna_plugin/gna_executable_network.hpp b/inference-engine/src/gna_plugin/gna_executable_network.hpp index f259da2..3db0696 100644 --- a/inference-engine/src/gna_plugin/gna_executable_network.hpp +++ b/inference-engine/src/gna_plugin/gna_executable_network.hpp @@ -33,6 +33,7 @@ class GNAExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafe : plg(plg) { InferenceEngine::NetPass::ConvertPrecision(network, InferenceEngine::Precision::I64, InferenceEngine::Precision::I32); InferenceEngine::NetPass::ConvertPrecision(network, InferenceEngine::Precision::U64, InferenceEngine::Precision::I32); + InferenceEngine::NetPass::ConvertPrecision(network, InferenceEngine::Precision::U32, InferenceEngine::Precision::I32); plg->LoadNetwork(network); } diff --git a/inference-engine/src/inference_engine/blob_transform.cpp b/inference-engine/src/inference_engine/blob_transform.cpp index ede401f..99d6f8c 100644 --- a/inference-engine/src/inference_engine/blob_transform.cpp +++ b/inference-engine/src/inference_engine/blob_transform.cpp @@ -126,6 +126,7 @@ static inline void blob_copy_4d(Blob::Ptr src, Blob::Ptr dst) { switch (src->getTensorDesc().getPrecision()) { case Precision::FP32: case Precision::I32: + case Precision::U32: blob_copy_4d_t(src, dst); break; @@ -263,6 +264,7 @@ static inline void blob_copy_5d(Blob::Ptr src, Blob::Ptr dst) { switch (src->getTensorDesc().getPrecision()) { case Precision::FP32: case Precision::I32: + case Precision::U32: blob_copy_5d_t(src, dst); break; diff --git a/inference-engine/src/inference_engine/ie_rtti.cpp b/inference-engine/src/inference_engine/ie_rtti.cpp index 8a9481d..9d5bdb4 100644 --- a/inference-engine/src/inference_engine/ie_rtti.cpp +++ b/inference-engine/src/inference_engine/ie_rtti.cpp @@ -109,12 +109,14 @@ TBlob::~TBlob() { template class InferenceEngine::TBlob; template class InferenceEngine::TBlob; -template class InferenceEngine::TBlob; -template class InferenceEngine::TBlob; template class InferenceEngine::TBlob; template class InferenceEngine::TBlob; -template class InferenceEngine::TBlob; +template class InferenceEngine::TBlob; +template class InferenceEngine::TBlob; +template class InferenceEngine::TBlob; +template class InferenceEngine::TBlob; template class InferenceEngine::TBlob; template class InferenceEngine::TBlob; -template class InferenceEngine::TBlob; +template class InferenceEngine::TBlob; +template class InferenceEngine::TBlob; #endif // __clang__ diff --git a/inference-engine/src/legacy_api/include/ie_ngraph_utils.hpp b/inference-engine/src/legacy_api/include/ie_ngraph_utils.hpp index cabbba0..e850101 100644 --- a/inference-engine/src/legacy_api/include/ie_ngraph_utils.hpp +++ b/inference-engine/src/legacy_api/include/ie_ngraph_utils.hpp @@ -33,6 +33,8 @@ inline ::ngraph::element::Type convertPrecision(const Precision& precision) { return ::ngraph::element::Type(::ngraph::element::Type_t::i16); case Precision::I32: return ::ngraph::element::Type(::ngraph::element::Type_t::i32); + case Precision::U32: + return ::ngraph::element::Type(::ngraph::element::Type_t::u32); case Precision::I64: return ::ngraph::element::Type(::ngraph::element::Type_t::i64); case Precision::U64: @@ -103,14 +105,16 @@ inline Precision convertPrecision(const ::ngraph::element::Type& precision) { return Precision(Precision::I32); case ::ngraph::element::Type_t::i64: return Precision(Precision::I64); - case ::ngraph::element::Type_t::u64: - return Precision(Precision::U64); - case ::ngraph::element::Type_t::u1: - return Precision(Precision::BIN); case ::ngraph::element::Type_t::u8: return Precision(Precision::U8); case ::ngraph::element::Type_t::u16: return Precision(Precision::U16); + case ::ngraph::element::Type_t::u32: + return Precision(Precision::U32); + case ::ngraph::element::Type_t::u64: + return Precision(Precision::U64); + case ::ngraph::element::Type_t::u1: + return Precision(Precision::BIN); case ::ngraph::element::Type_t::boolean: return Precision(Precision::BOOL); default: diff --git a/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp b/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp index f474959..dfa6eba 100644 --- a/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp +++ b/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp @@ -401,15 +401,18 @@ CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ case Precision::I64: precision_str = "I64"; break; - case Precision::U64: - precision_str = "U64"; - break; case Precision::U8: precision_str = "U8"; break; case Precision::U16: precision_str = "U16"; break; + case Precision::U32: + precision_str = "U32"; + break; + case Precision::U64: + precision_str = "U64"; + break; case Precision::BOOL: precision_str = "BOOL"; break; diff --git a/inference-engine/src/legacy_api/src/net_pass.cpp b/inference-engine/src/legacy_api/src/net_pass.cpp index bc811a4..aa02087 100644 --- a/inference-engine/src/legacy_api/src/net_pass.cpp +++ b/inference-engine/src/legacy_api/src/net_pass.cpp @@ -1471,6 +1471,9 @@ details::CNNSubnet GetInternalSubnet(const CNNLayerPtr &layer) { void ConvertPrecision(ICNNNetwork& net, Precision from, Precision to) { auto compare = getPrecisionMask(from, to); switch (compare) { + case getPrecisionMask(Precision::U32, Precision::I32): + convertPrecisionForAll(net); + break; case getPrecisionMask(Precision::U64, Precision::I32): convertPrecisionForAll(net); break; diff --git a/inference-engine/src/low_precision_transformations/src/network_helper.cpp b/inference-engine/src/low_precision_transformations/src/network_helper.cpp index b02c359..ab1167d 100644 --- a/inference-engine/src/low_precision_transformations/src/network_helper.cpp +++ b/inference-engine/src/low_precision_transformations/src/network_helper.cpp @@ -647,6 +647,9 @@ std::shared_ptr CNNNetworkHelper::getFloatData(const Blob::Ptr& srcBlob) } else if (precision == Precision::I32) { const auto* srcData = srcBlob->buffer().as::value_type*>(); std::copy(srcData, srcData + dataSize, floatPtr.get()); + } else if (precision == Precision::U32) { + const auto* srcData = srcBlob->buffer().as::value_type*>(); + std::copy(srcData, srcData + dataSize, floatPtr.get()); } else if (precision == Precision::I64) { const auto* srcData = srcBlob->buffer().as::value_type*>(); std::copy(srcData, srcData + dataSize, floatPtr.get()); @@ -666,6 +669,7 @@ bool CNNNetworkHelper::isBlobPrecisionSupported(const Precision precision) { (precision == Precision::I8) || (precision == Precision::U8) || (precision == Precision::I32) || + (precision == Precision::U32) || (precision == Precision::I64) || (precision == Precision::U64); } diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp index a4779cb..159a6d7 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp @@ -52,6 +52,7 @@ MKLDNNExecNetwork::MKLDNNExecNetwork(const InferenceEngine::ICNNNetwork &network // Default int64->int32 conversion is already applied in IE common module. NetPass::ConvertPrecision(*_clonedNetwork, Precision::I64, Precision::I32); NetPass::ConvertPrecision(*_clonedNetwork, Precision::U64, Precision::I32); + NetPass::ConvertPrecision(*_clonedNetwork, Precision::U32, Precision::I32); NetPass::ConvertPrecision(*_clonedNetwork, Precision::FP16, Precision::FP32); NetPass::ConvertPrecision(*_clonedNetwork, Precision::BOOL, Precision::U8); NetPass::ConvertPrecision(*_clonedNetwork, Precision::U16, Precision::I32); diff --git a/inference-engine/src/plugin_api/blob_factory.hpp b/inference-engine/src/plugin_api/blob_factory.hpp index bcddeab..8b77274 100644 --- a/inference-engine/src/plugin_api/blob_factory.hpp +++ b/inference-engine/src/plugin_api/blob_factory.hpp @@ -48,7 +48,7 @@ InferenceEngine::Blob::Ptr make_shared_blob2(Args&&... args) { /** * @brief Creates Blob::Ptr with precision. - * @ingroup ie_dev_api_memory + * @ingroup ie_dev_api_memory * * @param[in] desc The TensorDesc object * @return A Blob::Ptr pointer @@ -58,7 +58,7 @@ make_blob_with_precision(const InferenceEngine::TensorDesc& desc); /** * @brief Makes a blob with precision. - * @ingroup ie_dev_api_memory + * @ingroup ie_dev_api_memory * * @param[in] desc The TensorDesc object * @param ptr The pointer to a raw memory @@ -69,7 +69,7 @@ make_blob_with_precision(const InferenceEngine::TensorDesc& desc, void* ptr); /** * @brief Makes a blob with precision. - * @ingroup ie_dev_api_memory + * @ingroup ie_dev_api_memory * * @param[in] desc The description * @param[in] alloc The IAllocator object @@ -81,7 +81,7 @@ make_blob_with_precision(const InferenceEngine::TensorDesc& desc, /** * @brief Creates a plain Blob::Ptr - * @ingroup ie_dev_api_memory + * @ingroup ie_dev_api_memory * * @param[in] prec The Precision value * @param[in] dims The dims @@ -92,7 +92,7 @@ make_plain_blob(InferenceEngine::Precision prec, const InferenceEngine::SizeVect /** * @brief Creates Blob::Ptr with precision - * @ingroup ie_dev_api_memory + * @ingroup ie_dev_api_memory * * @param[in] precision The precision * @param args The arguments @@ -109,12 +109,13 @@ InferenceEngine::Blob::Ptr make_blob_with_precision(InferenceEngine::Precision p USE_FACTORY(FP32); USE_FACTORY(FP16); USE_FACTORY(Q78); - USE_FACTORY(I16); - USE_FACTORY(U8); USE_FACTORY(I8); - USE_FACTORY(U16); + USE_FACTORY(I16); USE_FACTORY(I32); USE_FACTORY(I64); + USE_FACTORY(U8); + USE_FACTORY(U16); + USE_FACTORY(U32); USE_FACTORY(U64); USE_FACTORY(BIN); USE_FACTORY(BF16); @@ -127,7 +128,7 @@ InferenceEngine::Blob::Ptr make_blob_with_precision(InferenceEngine::Precision p /** * @brief Create blob with custom precision - * @ingroup ie_dev_api_memory + * @ingroup ie_dev_api_memory * @tparam T - type off underlined elements * @tparam Args Variadic template type arguments * @param args Arguments @@ -140,7 +141,7 @@ InferenceEngine::Blob::Ptr make_custom_blob(Args&&... args) { /** * @brief Create blob with custom precision - * @ingroup ie_dev_api_memory + * @ingroup ie_dev_api_memory * @tparam T A type off underlined elements * @param layout A blob layout * @param size A blob size @@ -154,7 +155,7 @@ InferenceEngine::Blob::Ptr make_custom_blob(InferenceEngine::Layout layout, Infe /** * @brief Creates a TBlob<> object from a Data node - * @ingroup ie_dev_api_memory + * @ingroup ie_dev_api_memory * @param data A reference to a smart pointer of the Data node * @return Smart pointer to TBlob<> with the relevant C type to the precision of the data node */ @@ -162,10 +163,10 @@ INFERENCE_ENGINE_API_CPP(InferenceEngine::Blob::Ptr) CreateBlobFromData(const In /** * @brief Copy data from std::vector to Blob - * @ingroup ie_dev_api_memory + * @ingroup ie_dev_api_memory * @tparam T type of data in std::vector * @param outputBlob An output blob to copy to - * @param inputVector An input std::vector to copy from + * @param inputVector An input std::vector to copy from */ template void CopyVectorToBlob(const InferenceEngine::Blob::Ptr outputBlob, const std::vector& inputVector) { diff --git a/inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp b/inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp index beeb422..264f42f 100644 --- a/inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp @@ -394,6 +394,7 @@ ModelPtr FrontEnd::runCommonPasses(ie::ICNNNetwork& network, const UnsupportedLa } ie::NetPass::ConvertPrecision(*originalOrConvertNetwork, ie::Precision::I64, ie::Precision::I32); + ie::NetPass::ConvertPrecision(*originalOrConvertNetwork, ie::Precision::U32, ie::Precision::I32); ie::NetPass::ConvertPrecision(*originalOrConvertNetwork, ie::Precision::U64, ie::Precision::I32); ie::NetPass::ConvertPrecision(*originalOrConvertNetwork, ie::Precision::BOOL, ie::Precision::I32); diff --git a/inference-engine/tests/functional/inference_engine/blob_copy_test.cpp b/inference-engine/tests/functional/inference_engine/blob_copy_test.cpp index 28804f2..91c946c 100644 --- a/inference-engine/tests/functional/inference_engine/blob_copy_test.cpp +++ b/inference-engine/tests/functional/inference_engine/blob_copy_test.cpp @@ -52,6 +52,8 @@ InferenceEngine::Blob::Ptr createBlob(InferenceEngine::Precision precision, Size return make_shared_blob(tensorDesc); case InferenceEngine::Precision::I32: return make_shared_blob(tensorDesc); + case InferenceEngine::Precision::U32: + return make_shared_blob(tensorDesc); case InferenceEngine::Precision::I64: return make_shared_blob(tensorDesc); case InferenceEngine::Precision::U64: @@ -121,6 +123,8 @@ void FillBlob(Blob::Ptr& inputBlob) { return FillBlobRandom(inputBlob); case InferenceEngine::Precision::I32: return FillBlobRandom(inputBlob); + case InferenceEngine::Precision::U32: + return FillBlobRandom(inputBlob); case InferenceEngine::Precision::I64: return FillBlobRandom(inputBlob); case InferenceEngine::Precision::U64: @@ -206,7 +210,9 @@ bool IsCorrectBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) { case InferenceEngine::Precision::Q78: return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); case InferenceEngine::Precision::I32: - IsCorrectBlobCopy_Impl(srcBlob, dstBlob); + return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); + case InferenceEngine::Precision::U32: + return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); case InferenceEngine::Precision::I64: return IsCorrectBlobCopy_Impl(srcBlob, dstBlob); case InferenceEngine::Precision::U64: @@ -286,7 +292,7 @@ std::vector BlobCopy_Dims = { }; // The 'blob_copy(4/5)_d' function is a template with the parameter-list -// FP32 is used for cases with the following accuracy: FP32, I32 +// FP32 is used for cases with the following accuracy: FP32, I32, U32 // FP16 is used for cases with the following accuracy: FP16, U16, I16 // U8 is used for cases with the following accuracy: U8, I8 // Cases with other precision are not supported @@ -294,6 +300,11 @@ std::vector BlobCopy_PrecisionParams = { InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16, InferenceEngine::Precision::U8, + InferenceEngine::Precision::I8, + InferenceEngine::Precision::U16, + InferenceEngine::Precision::I16, + InferenceEngine::Precision::U32, + InferenceEngine::Precision::I32, }; } // namespace @@ -325,17 +336,21 @@ bool IsEqualBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) { case InferenceEngine::Precision::I16: case InferenceEngine::Precision::Q78: return IsEqualBlobCopy_Impl(srcBlob, dstBlob); + case InferenceEngine::Precision::U32: + IsEqualBlobCopy_Impl(srcBlob, dstBlob); case InferenceEngine::Precision::I32: IsEqualBlobCopy_Impl(srcBlob, dstBlob); + case InferenceEngine::Precision::U64: + return IsEqualBlobCopy_Impl(srcBlob, dstBlob); case InferenceEngine::Precision::I64: return IsEqualBlobCopy_Impl(srcBlob, dstBlob); - case InferenceEngine::Precision::U16: - return IsEqualBlobCopy_Impl(srcBlob, dstBlob); case InferenceEngine::Precision::I8: case InferenceEngine::Precision::BIN: return IsEqualBlobCopy_Impl(srcBlob, dstBlob); case InferenceEngine::Precision::U8: return IsEqualBlobCopy_Impl(srcBlob, dstBlob); + case InferenceEngine::Precision::U16: + return IsEqualBlobCopy_Impl(srcBlob, dstBlob); default: return false; } @@ -372,6 +387,10 @@ void copy3DBlobsAllBytesWithReLayoutWrapper(const Blob::Ptr& srcLayoutBlob, Blob return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); case InferenceEngine::Precision::I32: return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); + case InferenceEngine::Precision::U32: + return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); + case InferenceEngine::Precision::U64: + return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); case InferenceEngine::Precision::I64: return copy3DBlobsAllBytesWithReLayout(srcLayoutBlob, trgLayoutBlob); case InferenceEngine::Precision::U16: diff --git a/inference-engine/tests/functional/inference_engine/ie_precision_test.cpp b/inference-engine/tests/functional/inference_engine/ie_precision_test.cpp index 7fa179a..5046067 100644 --- a/inference-engine/tests/functional/inference_engine/ie_precision_test.cpp +++ b/inference-engine/tests/functional/inference_engine/ie_precision_test.cpp @@ -19,6 +19,7 @@ TEST_F(PrecisionTests, ShowsCorrectPrecisionNames) { ASSERT_STREQ(Precision(Precision::FP32).name(), "FP32"); ASSERT_STREQ(Precision(Precision::I16).name(), "I16"); ASSERT_STREQ(Precision(Precision::I32).name(), "I32"); + ASSERT_STREQ(Precision(Precision::U32).name(), "U32"); ASSERT_STREQ(Precision(Precision::U16).name(), "U16"); ASSERT_STREQ(Precision(Precision::I8).name(), "I8"); ASSERT_STREQ(Precision(Precision::Q78).name(), "Q78"); @@ -35,6 +36,7 @@ TEST_F(PrecisionTests, sizeIsCorrect) { ASSERT_EQ(Precision(Precision::FP16).size(), 2); ASSERT_EQ(Precision(Precision::FP32).size(), 4); ASSERT_EQ(Precision(Precision::I32).size(), 4); + ASSERT_EQ(Precision(Precision::U32).size(), 4); ASSERT_EQ(Precision(Precision::I16).size(), 2); ASSERT_EQ(Precision(Precision::U16).size(), 2); ASSERT_EQ(Precision(Precision::I8).size(), 1); @@ -51,6 +53,7 @@ TEST_F(PrecisionTests, is_float) { ASSERT_FALSE(Precision(Precision::I64).is_float()); ASSERT_FALSE(Precision(Precision::U64).is_float()); ASSERT_FALSE(Precision(Precision::I32).is_float()); + ASSERT_FALSE(Precision(Precision::U32).is_float()); ASSERT_FALSE(Precision(Precision::I16).is_float()); ASSERT_FALSE(Precision(Precision::U16).is_float()); ASSERT_FALSE(Precision(Precision::I8).is_float()); @@ -68,6 +71,7 @@ TEST_F(PrecisionTests, constructFromSTR) { ASSERT_EQ(Precision(Precision::FP16), Precision::FromStr("FP16")); ASSERT_EQ(Precision(Precision::FP32), Precision::FromStr("FP32")); ASSERT_EQ(Precision(Precision::I32), Precision::FromStr("I32")); + ASSERT_EQ(Precision(Precision::U32), Precision::FromStr("U32")); ASSERT_EQ(Precision(Precision::I16), Precision::FromStr("I16")); ASSERT_EQ(Precision(Precision::U16), Precision::FromStr("U16")); ASSERT_EQ(Precision(Precision::I8), Precision::FromStr("I8")); diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp index 213dad5..ff6a1a6 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp @@ -93,6 +93,7 @@ InferenceEngine::CNNNetwork LayerTransformation::transform(InferenceEngine::deta InferenceEngine::NetPass::ConvertPrecision(*implNetwork, InferenceEngine::Precision::I64, InferenceEngine::Precision::I32); InferenceEngine::NetPass::ConvertPrecision(*implNetwork, InferenceEngine::Precision::U64, InferenceEngine::Precision::I32); + InferenceEngine::NetPass::ConvertPrecision(*implNetwork, InferenceEngine::Precision::U32, InferenceEngine::Precision::I32); InferenceEngine::NetPass::ConvertPrecision(*implNetwork, InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32); InferenceEngine::NetPass::ConvertPrecision(*implNetwork, InferenceEngine::Precision::BOOL, InferenceEngine::Precision::U8); @@ -115,6 +116,7 @@ InferenceEngine::CNNNetwork LayerTransformation::transform(const InferenceEngine InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::I64, InferenceEngine::Precision::I32); InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::U64, InferenceEngine::Precision::I32); + InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::U32, InferenceEngine::Precision::I32); InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32); InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::BOOL, InferenceEngine::Precision::U8); diff --git a/inference-engine/tests_deprecated/unit/inference_engine_tests/util_const_infer_test.cpp b/inference-engine/tests_deprecated/unit/inference_engine_tests/util_const_infer_test.cpp index 1732b79..1bee229 100644 --- a/inference-engine/tests_deprecated/unit/inference_engine_tests/util_const_infer_test.cpp +++ b/inference-engine/tests_deprecated/unit/inference_engine_tests/util_const_infer_test.cpp @@ -170,6 +170,13 @@ IE::BlobMap RemoveLayerTests::fillConstDataDiffPrec (const std::vectorbuffer().as(); + for (int i = 0; i < blob->size(); i++) { + buffer[i] = i + 2; + } + break; + } case IE::Precision::I64: { auto *buffer = blob->buffer().as(); for (int i = 0; i < blob->size(); i++) { diff --git a/ngraph/test/runtime/ie/ie_executable.cpp b/ngraph/test/runtime/ie/ie_executable.cpp index dc82ec0..c4858ed 100644 --- a/ngraph/test/runtime/ie/ie_executable.cpp +++ b/ngraph/test/runtime/ie/ie_executable.cpp @@ -60,6 +60,7 @@ namespace case element::Type_t::i8: blob = MAKE_IE_TBLOB(int8_t, I8, shape, layout); break; case element::Type_t::u16: blob = MAKE_IE_TBLOB(uint16_t, U16, shape, layout); break; case element::Type_t::i32: blob = MAKE_IE_TBLOB(int32_t, I32, shape, layout); break; + case element::Type_t::u32: blob = MAKE_IE_TBLOB(uint32_t, U32, shape, layout); break; case element::Type_t::i64: blob = MAKE_IE_TBLOB(int64_t, I64, shape, layout); break; case element::Type_t::u64: blob = MAKE_IE_TBLOB(uint64_t, U64, shape, layout); break; case element::Type_t::boolean: blob = MAKE_IE_TBLOB(uint8_t, BOOL, shape, layout); break; diff --git a/ngraph/test/util/engine/ie_engines.cpp b/ngraph/test/util/engine/ie_engines.cpp index 5bab1d0..683bbc6 100644 --- a/ngraph/test/util/engine/ie_engines.cpp +++ b/ngraph/test/util/engine/ie_engines.cpp @@ -105,6 +105,9 @@ namespace case InferenceEngine::Precision::U16: return compare_blobs(computed, expected, tolerance_bits); break; + case InferenceEngine::Precision::U32: + return compare_blobs(computed, expected, tolerance_bits); + break; case InferenceEngine::Precision::U64: return compare_blobs(computed, expected, tolerance_bits); break; -- 2.7.4