Added U32 precision (#1297)
authorIlya Churaev <ilya.churaev@intel.com>
Tue, 14 Jul 2020 07:27:10 +0000 (10:27 +0300)
committerGitHub <noreply@github.com>
Tue, 14 Jul 2020 07:27:10 +0000 (10:27 +0300)
26 files changed:
inference-engine/ie_bridges/c/docs/api_overview.md
inference-engine/ie_bridges/c/include/c_api/ie_c_api.h
inference-engine/ie_bridges/c/src/ie_c_api.cpp
inference-engine/ie_bridges/python/src/openvino/inference_engine/constants.pyx
inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx
inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp
inference-engine/include/ie_blob.h
inference-engine/include/ie_precision.hpp
inference-engine/samples/common/samples/classification_results.h
inference-engine/src/cldnn_engine/cldnn_program.cpp
inference-engine/src/gna_plugin/gna_executable_network.hpp
inference-engine/src/inference_engine/blob_transform.cpp
inference-engine/src/inference_engine/ie_rtti.cpp
inference-engine/src/legacy_api/include/ie_ngraph_utils.hpp
inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp
inference-engine/src/legacy_api/src/net_pass.cpp
inference-engine/src/low_precision_transformations/src/network_helper.cpp
inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp
inference-engine/src/plugin_api/blob_factory.hpp
inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp
inference-engine/tests/functional/inference_engine/blob_copy_test.cpp
inference-engine/tests/functional/inference_engine/ie_precision_test.cpp
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/low_precision_transformations/layer_transformation.cpp
inference-engine/tests_deprecated/unit/inference_engine_tests/util_const_infer_test.cpp
ngraph/test/runtime/ie/ie_executable.cpp
ngraph/test/util/engine/ie_engines.cpp

index 67b3b11..f41d777 100644 (file)
@@ -136,6 +136,8 @@ enum precision_e{
 
 ​    U64 = 73,   /**< 64bit unsigned integer value */
 
+​    U32 = 74,   /**< 32bit unsigned integer value */
+
 ​    BIN = 71,   /**< 1bit integer value */
 
 ​    CUSTOM = 80 /**< custom precision has it's own name and size of elements */
index f124978..a7caa65 100644 (file)
@@ -182,6 +182,7 @@ typedef enum {
     I32 = 70,   /**< 32bit signed integer value */
     I64 = 72,   /**< 64bit signed integer value */
     U64 = 73,   /**< 64bit unsigned integer value */
+    U32 = 74,   /**< 32bit unsigned integer value */
     BIN = 71,   /**< 1bit integer value */
     CUSTOM = 80 /**< custom precision has it's own name and size of elements */
 }precision_e;
index d814c32..ba87c7c 100644 (file)
@@ -86,6 +86,7 @@ std::map<IE::Precision, precision_e> precision_map = {{IE::Precision::UNSPECIFIE
                                                         {IE::Precision::I8, precision_e::I8},
                                                         {IE::Precision::U16, precision_e::U16},
                                                         {IE::Precision::I32, precision_e::I32},
+                                                        {IE::Precision::U32, precision_e::U32},
                                                         {IE::Precision::I64, precision_e::I64},
                                                         {IE::Precision::U64, precision_e::U64},
                                                         {IE::Precision::BIN, precision_e::BIN},
@@ -1424,6 +1425,8 @@ IEStatusCode ie_blob_make_memory(const tensor_desc_t *tensorDesc, ie_blob_t **bl
             _blob->object = IE::make_shared_blob<int16_t>(tensor);
         } else if (prec == IE::Precision::I32) {
             _blob->object = IE::make_shared_blob<int32_t>(tensor);
+        } else if (prec == IE::Precision::U32) {
+            _blob->object = IE::make_shared_blob<uint32_t>(tensor);
         } else if (prec == IE::Precision::I64) {
             _blob->object = IE::make_shared_blob<int64_t>(tensor);
         } else if (prec == IE::Precision::U64) {
@@ -1490,6 +1493,9 @@ IEStatusCode ie_blob_make_memory_from_preallocated(const tensor_desc_t *tensorDe
         } else if (prec == IE::Precision::I32) {
             int32_t *p = reinterpret_cast<int32_t *>(ptr);
             _blob->object = IE::make_shared_blob(tensor, p, size);
+        } else if (prec == IE::Precision::U32) {
+            uint32_t *p = reinterpret_cast<uint32_t *>(ptr);
+            _blob->object = IE::make_shared_blob(tensor, p, size);
         } else if (prec == IE::Precision::I64) {
             int64_t *p = reinterpret_cast<int64_t *>(ptr);
             _blob->object = IE::make_shared_blob(tensor, p, size);
index 305a692..2251033 100644 (file)
@@ -18,7 +18,7 @@ from .cimport ie_api_impl_defs as C
 import numpy as np
 from enum import Enum
 
-supported_precisions = ["FP32", "FP16", "I64", "U64", "I32", "I16", "I8", "U16", "U8"]
+supported_precisions = ["FP32", "FP16", "I64", "U64", "I32", "U32", "I16", "I8", "U16", "U8"]
 
 known_plugins = ['CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL', 'MULTI']
 
index 98a0af3..9fa91a0 100644 (file)
@@ -1687,8 +1687,9 @@ cdef class BlobBuffer:
             'I8': 'b',  # signed char
             'I16': 'h',  # signed short
             'I32': 'i',  # signed int
+            'U32': 'I',  # unsigned int
             'I64': 'q',  # signed long int
-            'U64': 'Q',  # signed long int
+            'U64': 'Q',  # unsigned long int
         }
         if name not in precision_to_format:
             raise ValueError("Unknown Blob precision: {}".format(name))
index a417b31..00b0eb7 100644 (file)
@@ -14,9 +14,10 @@ std::map <std::string, InferenceEngine::Precision> precision_map = {{"FP32", Inf
                                                                     {"I16",  InferenceEngine::Precision::I16},
                                                                     {"I32",  InferenceEngine::Precision::I32},
                                                                     {"I64",  InferenceEngine::Precision::I64},
-                                                                    {"U64",  InferenceEngine::Precision::U64},
+                                                                    {"U8",   InferenceEngine::Precision::U8},
                                                                     {"U16",  InferenceEngine::Precision::U16},
-                                                                    {"U8",   InferenceEngine::Precision::U8}};
+                                                                    {"U32",  InferenceEngine::Precision::U32},
+                                                                    {"U64",  InferenceEngine::Precision::U64}};
 
 std::map <std::string, InferenceEngine::Layout> layout_map = {{"ANY",     InferenceEngine::Layout::ANY},
                                                               {"NCHW",    InferenceEngine::Layout::NCHW},
index a6a17c4..205edde 100644 (file)
@@ -761,14 +761,16 @@ protected:
 #ifdef __clang__
 extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<float>);
 extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<double>);
-extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<int16_t>);
-extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<uint16_t>);
 extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<int8_t>);
 extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<uint8_t>);
-extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<int>);
+extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<int16_t>);
+extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<uint16_t>);
+extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<int32_t>);
+extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<uint32_t>);
 extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<long>);
 extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<long long>);
-extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<uint64_t>);
+extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<unsigned long>);
+extern template class INFERENCE_ENGINE_API_CLASS(InferenceEngine::TBlob<unsigned long long>);
 #endif  // __clang__
 
 /**
index f0a05b5..e7f3cf7 100644 (file)
@@ -34,6 +34,7 @@ public:
         I8 = 50,           /**< 8bit signed integer value */
         U16 = 60,          /**< 16bit unsigned integer value */
         I32 = 70,          /**< 32bit signed integer value */
+        U32 = 74,          /**< 32bit unsigned integer value */
         I64 = 72,          /**< 64bit signed integer value */
         U64 = 73,          /**< 64bit unsigned integer value */
         BIN = 71,          /**< 1bit integer value */
@@ -108,13 +109,14 @@ public:
                 CASE(FP32, float);
                 CASE2(FP16, int16_t, uint16_t);
                 CASE2(BF16, int16_t, uint16_t);
+                CASE(I8, int8_t);
                 CASE(I16, int16_t);
                 CASE(I32, int32_t);
                 CASE(I64, int64_t);
-                CASE(U64, uint64_t);
-                CASE(U16, uint16_t);
                 CASE(U8, uint8_t);
-                CASE(I8, int8_t);
+                CASE(U16, uint16_t);
+                CASE(U32, uint32_t);
+                CASE(U64, uint64_t);
                 CASE(BOOL, uint8_t);
                 CASE2(Q78, int16_t, uint16_t);
                 CASE2(BIN, int8_t, uint8_t);
@@ -182,10 +184,10 @@ public:
     static Precision FromStr(const std::string& str) {
         static std::unordered_map<std::string, ePrecision> names = {
 #define PRECISION_NAME(s) {#s, s}
-            PRECISION_NAME(Q78),  PRECISION_NAME(U8),    PRECISION_NAME(I8),    PRECISION_NAME(I16),
-            PRECISION_NAME(I32),  PRECISION_NAME(I64),   PRECISION_NAME(U64),    PRECISION_NAME(U16),
+            PRECISION_NAME(Q78),  PRECISION_NAME(BOOL),  PRECISION_NAME(BF16),
+            PRECISION_NAME(I8),   PRECISION_NAME(I16),   PRECISION_NAME(I32),  PRECISION_NAME(I64),
+            PRECISION_NAME(U8),   PRECISION_NAME(U16),   PRECISION_NAME(U32),  PRECISION_NAME(U64),
             PRECISION_NAME(FP32), PRECISION_NAME(FP16),  PRECISION_NAME(MIXED), PRECISION_NAME(BIN),
-            PRECISION_NAME(BOOL), PRECISION_NAME(BF16),
 #undef PRECISION_NAME
         };
         auto i = names.find(str);
@@ -263,13 +265,14 @@ protected:
             CASE(FP32);
             CASE(FP16);
             CASE(BF16);
+            CASE(I8);
             CASE(I16);
             CASE(I32);
             CASE(I64);
-            CASE(U64);
-            CASE(U16);
             CASE(U8);
-            CASE(I8);
+            CASE(U16);
+            CASE(U32);
+            CASE(U64);
             CASE(Q78);
             CASE(MIXED);
             CASE(BIN);
@@ -330,6 +333,10 @@ struct PrecisionTrait<Precision::I32> {
     using value_type = int32_t;
 };
 template <>
+struct PrecisionTrait<Precision::U32> {
+    using value_type = uint32_t;
+};
+template <>
 struct PrecisionTrait<Precision::I64> {
     using value_type = int64_t;
 };
index c685f19..e18eff5 100644 (file)
@@ -103,6 +103,7 @@ private:
             TBLOB_TOP_RESULT(I8);
             TBLOB_TOP_RESULT(U16);
             TBLOB_TOP_RESULT(I32);
+            TBLOB_TOP_RESULT(U32);
             TBLOB_TOP_RESULT(U64);
             TBLOB_TOP_RESULT(I64);
         default:
index 7f6e2a1..1ccef5a 100644 (file)
@@ -4626,6 +4626,11 @@ void Program::CreateCumSumPrimitive(cldnn::topology& topology, InferenceEngine::
                     axis = data[0];
                     break;
                 }
+                case InferenceEngine::Precision::U32: {
+                    auto data = constantBlob->buffer().as<uint32_t*>();
+                    axis = static_cast<int32_t>(data[0]);
+                    break;
+                }
                 case InferenceEngine::Precision::U64: {
                     auto data = constantBlob->buffer().as<uint64_t*>();
                     axis = static_cast<int32_t>(data[0]);
index f259da2..3db0696 100644 (file)
@@ -33,6 +33,7 @@ class GNAExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafe
         : plg(plg) {
         InferenceEngine::NetPass::ConvertPrecision(network, InferenceEngine::Precision::I64, InferenceEngine::Precision::I32);
         InferenceEngine::NetPass::ConvertPrecision(network, InferenceEngine::Precision::U64, InferenceEngine::Precision::I32);
+        InferenceEngine::NetPass::ConvertPrecision(network, InferenceEngine::Precision::U32, InferenceEngine::Precision::I32);
         plg->LoadNetwork(network);
     }
 
index ede401f..99d6f8c 100644 (file)
@@ -126,6 +126,7 @@ static inline void blob_copy_4d(Blob::Ptr src, Blob::Ptr dst) {
     switch (src->getTensorDesc().getPrecision()) {
     case Precision::FP32:
     case Precision::I32:
+    case Precision::U32:
         blob_copy_4d_t<Precision::FP32>(src, dst);
         break;
 
@@ -263,6 +264,7 @@ static inline void blob_copy_5d(Blob::Ptr src, Blob::Ptr dst) {
     switch (src->getTensorDesc().getPrecision()) {
     case Precision::FP32:
     case Precision::I32:
+    case Precision::U32:
         blob_copy_5d_t<Precision::FP32>(src, dst);
         break;
 
index 8a9481d..9d5bdb4 100644 (file)
@@ -109,12 +109,14 @@ TBlob<T, U>::~TBlob() {
 
 template class InferenceEngine::TBlob<float>;
 template class InferenceEngine::TBlob<double>;
-template class InferenceEngine::TBlob<int16_t>;
-template class InferenceEngine::TBlob<uint16_t>;
 template class InferenceEngine::TBlob<int8_t>;
 template class InferenceEngine::TBlob<uint8_t>;
-template class InferenceEngine::TBlob<int>;
+template class InferenceEngine::TBlob<int16_t>;
+template class InferenceEngine::TBlob<uint16_t>;
+template class InferenceEngine::TBlob<int32_t>;
+template class InferenceEngine::TBlob<uint32_t>;
 template class InferenceEngine::TBlob<long>;
 template class InferenceEngine::TBlob<long long>;
-template class InferenceEngine::TBlob<uint64_t>;
+template class InferenceEngine::TBlob<unsigned long>;
+template class InferenceEngine::TBlob<unsigned long long>;
 #endif  // __clang__
index cabbba0..e850101 100644 (file)
@@ -33,6 +33,8 @@ inline ::ngraph::element::Type convertPrecision(const Precision& precision) {
         return ::ngraph::element::Type(::ngraph::element::Type_t::i16);
     case Precision::I32:
         return ::ngraph::element::Type(::ngraph::element::Type_t::i32);
+    case Precision::U32:
+        return ::ngraph::element::Type(::ngraph::element::Type_t::u32);
     case Precision::I64:
         return ::ngraph::element::Type(::ngraph::element::Type_t::i64);
     case Precision::U64:
@@ -103,14 +105,16 @@ inline Precision convertPrecision(const ::ngraph::element::Type& precision) {
         return Precision(Precision::I32);
     case ::ngraph::element::Type_t::i64:
         return Precision(Precision::I64);
-    case ::ngraph::element::Type_t::u64:
-        return Precision(Precision::U64);
-    case ::ngraph::element::Type_t::u1:
-        return Precision(Precision::BIN);
     case ::ngraph::element::Type_t::u8:
         return Precision(Precision::U8);
     case ::ngraph::element::Type_t::u16:
         return Precision(Precision::U16);
+    case ::ngraph::element::Type_t::u32:
+        return Precision(Precision::U32);
+    case ::ngraph::element::Type_t::u64:
+        return Precision(Precision::U64);
+    case ::ngraph::element::Type_t::u1:
+        return Precision(Precision::BIN);
     case ::ngraph::element::Type_t::boolean:
         return Precision(Precision::BOOL);
     default:
index f474959..dfa6eba 100644 (file)
@@ -401,15 +401,18 @@ CNNLayer::Ptr NodeConverter<ngraph::op::Convert>::createLayer(const std::shared_
     case Precision::I64:
         precision_str = "I64";
         break;
-    case Precision::U64:
-        precision_str = "U64";
-        break;
     case Precision::U8:
         precision_str = "U8";
         break;
     case Precision::U16:
         precision_str = "U16";
         break;
+    case Precision::U32:
+        precision_str = "U32";
+        break;
+    case Precision::U64:
+        precision_str = "U64";
+        break;
     case Precision::BOOL:
         precision_str = "BOOL";
         break;
index bc811a4..aa02087 100644 (file)
@@ -1471,6 +1471,9 @@ details::CNNSubnet GetInternalSubnet(const CNNLayerPtr &layer) {
 void ConvertPrecision(ICNNNetwork& net, Precision from, Precision to) {
     auto compare = getPrecisionMask(from, to);
     switch (compare) {
+        case getPrecisionMask(Precision::U32, Precision::I32):
+            convertPrecisionForAll<Precision::U32, Precision::I32>(net);
+            break;
         case getPrecisionMask(Precision::U64, Precision::I32):
             convertPrecisionForAll<Precision::U64, Precision::I32>(net);
             break;
index b02c359..ab1167d 100644 (file)
@@ -647,6 +647,9 @@ std::shared_ptr<float> CNNNetworkHelper::getFloatData(const Blob::Ptr& srcBlob)
     } else if (precision == Precision::I32) {
         const auto* srcData = srcBlob->buffer().as<PrecisionTrait<Precision::I32>::value_type*>();
         std::copy(srcData, srcData + dataSize, floatPtr.get());
+    } else if (precision == Precision::U32) {
+        const auto* srcData = srcBlob->buffer().as<PrecisionTrait<Precision::U32>::value_type*>();
+        std::copy(srcData, srcData + dataSize, floatPtr.get());
     } else if (precision == Precision::I64) {
         const auto* srcData = srcBlob->buffer().as<PrecisionTrait<Precision::I64>::value_type*>();
         std::copy(srcData, srcData + dataSize, floatPtr.get());
@@ -666,6 +669,7 @@ bool CNNNetworkHelper::isBlobPrecisionSupported(const Precision precision) {
         (precision == Precision::I8) ||
         (precision == Precision::U8) ||
         (precision == Precision::I32) ||
+        (precision == Precision::U32) ||
         (precision == Precision::I64) ||
         (precision == Precision::U64);
 }
index a4779cb..159a6d7 100644 (file)
@@ -52,6 +52,7 @@ MKLDNNExecNetwork::MKLDNNExecNetwork(const InferenceEngine::ICNNNetwork &network
     // Default int64->int32 conversion is already applied in IE common module.
     NetPass::ConvertPrecision(*_clonedNetwork, Precision::I64, Precision::I32);
     NetPass::ConvertPrecision(*_clonedNetwork, Precision::U64, Precision::I32);
+    NetPass::ConvertPrecision(*_clonedNetwork, Precision::U32, Precision::I32);
     NetPass::ConvertPrecision(*_clonedNetwork, Precision::FP16, Precision::FP32);
     NetPass::ConvertPrecision(*_clonedNetwork, Precision::BOOL, Precision::U8);
     NetPass::ConvertPrecision(*_clonedNetwork, Precision::U16, Precision::I32);
index bcddeab..8b77274 100644 (file)
@@ -48,7 +48,7 @@ InferenceEngine::Blob::Ptr make_shared_blob2(Args&&... args) {
 
 /**
  * @brief      Creates Blob::Ptr with precision.
- * @ingroup    ie_dev_api_memory 
+ * @ingroup    ie_dev_api_memory
  *
  * @param[in]  desc  The TensorDesc object
  * @return     A Blob::Ptr pointer
@@ -58,7 +58,7 @@ make_blob_with_precision(const InferenceEngine::TensorDesc& desc);
 
 /**
  * @brief      Makes a blob with precision.
- * @ingroup    ie_dev_api_memory 
+ * @ingroup    ie_dev_api_memory
  *
  * @param[in]  desc  The TensorDesc object
  * @param      ptr   The pointer to a raw memory
@@ -69,7 +69,7 @@ make_blob_with_precision(const InferenceEngine::TensorDesc& desc, void* ptr);
 
 /**
  * @brief      Makes a blob with precision.
- * @ingroup    ie_dev_api_memory 
+ * @ingroup    ie_dev_api_memory
  *
  * @param[in]  desc   The description
  * @param[in]  alloc  The IAllocator object
@@ -81,7 +81,7 @@ make_blob_with_precision(const InferenceEngine::TensorDesc& desc,
 
 /**
  * @brief      Creates a plain Blob::Ptr
- * @ingroup    ie_dev_api_memory 
+ * @ingroup    ie_dev_api_memory
  *
  * @param[in]  prec  The Precision value
  * @param[in]  dims  The dims
@@ -92,7 +92,7 @@ make_plain_blob(InferenceEngine::Precision prec, const InferenceEngine::SizeVect
 
 /**
  * @brief      Creates Blob::Ptr with precision
- * @ingroup    ie_dev_api_memory 
+ * @ingroup    ie_dev_api_memory
  *
  * @param[in]  precision  The precision
  * @param      args       The arguments
@@ -109,12 +109,13 @@ InferenceEngine::Blob::Ptr make_blob_with_precision(InferenceEngine::Precision p
         USE_FACTORY(FP32);
         USE_FACTORY(FP16);
         USE_FACTORY(Q78);
-        USE_FACTORY(I16);
-        USE_FACTORY(U8);
         USE_FACTORY(I8);
-        USE_FACTORY(U16);
+        USE_FACTORY(I16);
         USE_FACTORY(I32);
         USE_FACTORY(I64);
+        USE_FACTORY(U8);
+        USE_FACTORY(U16);
+        USE_FACTORY(U32);
         USE_FACTORY(U64);
         USE_FACTORY(BIN);
         USE_FACTORY(BF16);
@@ -127,7 +128,7 @@ InferenceEngine::Blob::Ptr make_blob_with_precision(InferenceEngine::Precision p
 
 /**
  * @brief Create blob with custom precision
- * @ingroup ie_dev_api_memory 
+ * @ingroup ie_dev_api_memory
  * @tparam T - type off underlined elements
  * @tparam Args Variadic template type arguments
  * @param args Arguments
@@ -140,7 +141,7 @@ InferenceEngine::Blob::Ptr make_custom_blob(Args&&... args) {
 
 /**
  * @brief Create blob with custom precision
- * @ingroup ie_dev_api_memory 
+ * @ingroup ie_dev_api_memory
  * @tparam T A type off underlined elements
  * @param layout A blob layout
  * @param size A blob size
@@ -154,7 +155,7 @@ InferenceEngine::Blob::Ptr make_custom_blob(InferenceEngine::Layout layout, Infe
 
 /**
  * @brief Creates a TBlob<> object from a Data node
- * @ingroup ie_dev_api_memory 
+ * @ingroup ie_dev_api_memory
  * @param data A reference to a smart pointer of the Data node
  * @return Smart pointer to TBlob<> with the relevant C type to the precision of the data node
  */
@@ -162,10 +163,10 @@ INFERENCE_ENGINE_API_CPP(InferenceEngine::Blob::Ptr) CreateBlobFromData(const In
 
 /**
  * @brief Copy data from std::vector to Blob
- * @ingroup ie_dev_api_memory 
+ * @ingroup ie_dev_api_memory
  * @tparam T type of data in std::vector
  * @param outputBlob An output blob to copy to
- * @param inputVector An input std::vector to copy from 
+ * @param inputVector An input std::vector to copy from
  */
 template <typename T>
 void CopyVectorToBlob(const InferenceEngine::Blob::Ptr outputBlob, const std::vector<T>& inputVector) {
index beeb422..264f42f 100644 (file)
@@ -394,6 +394,7 @@ ModelPtr FrontEnd::runCommonPasses(ie::ICNNNetwork& network, const UnsupportedLa
         }
 
         ie::NetPass::ConvertPrecision(*originalOrConvertNetwork, ie::Precision::I64, ie::Precision::I32);
+        ie::NetPass::ConvertPrecision(*originalOrConvertNetwork, ie::Precision::U32, ie::Precision::I32);
         ie::NetPass::ConvertPrecision(*originalOrConvertNetwork, ie::Precision::U64, ie::Precision::I32);
         ie::NetPass::ConvertPrecision(*originalOrConvertNetwork, ie::Precision::BOOL, ie::Precision::I32);
 
index 28804f2..91c946c 100644 (file)
@@ -52,6 +52,8 @@ InferenceEngine::Blob::Ptr createBlob(InferenceEngine::Precision precision, Size
             return make_shared_blob<int16_t>(tensorDesc);
         case InferenceEngine::Precision::I32:
             return make_shared_blob<int32_t>(tensorDesc);
+        case InferenceEngine::Precision::U32:
+            return make_shared_blob<uint32_t>(tensorDesc);
         case InferenceEngine::Precision::I64:
             return make_shared_blob<int64_t>(tensorDesc);
         case InferenceEngine::Precision::U64:
@@ -121,6 +123,8 @@ void FillBlob(Blob::Ptr& inputBlob) {
             return FillBlobRandom<int16_t>(inputBlob);
         case InferenceEngine::Precision::I32:
             return FillBlobRandom<int32_t>(inputBlob);
+        case InferenceEngine::Precision::U32:
+            return FillBlobRandom<uint32_t>(inputBlob);
         case InferenceEngine::Precision::I64:
             return FillBlobRandom<int64_t>(inputBlob);
         case InferenceEngine::Precision::U64:
@@ -206,7 +210,9 @@ bool IsCorrectBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) {
         case InferenceEngine::Precision::Q78:
             return IsCorrectBlobCopy_Impl<int16_t>(srcBlob, dstBlob);
         case InferenceEngine::Precision::I32:
-            IsCorrectBlobCopy_Impl<int32_t>(srcBlob, dstBlob);
+            return IsCorrectBlobCopy_Impl<int32_t>(srcBlob, dstBlob);
+        case InferenceEngine::Precision::U32:
+            return IsCorrectBlobCopy_Impl<uint32_t >(srcBlob, dstBlob);
         case InferenceEngine::Precision::I64:
             return IsCorrectBlobCopy_Impl<int64_t >(srcBlob, dstBlob);
         case InferenceEngine::Precision::U64:
@@ -286,7 +292,7 @@ std::vector<Dims> BlobCopy_Dims = {
 };
 
 //  The 'blob_copy(4/5)_d' function is a template with the parameter-list  <InferenceEngine::Precision::ePrecision PRC>
-//  FP32 is used for cases with the following accuracy:  FP32, I32
+//  FP32 is used for cases with the following accuracy:  FP32, I32, U32
 //  FP16 is used for cases with the following accuracy:  FP16, U16, I16
 //  U8 is used for cases with the following accuracy:  U8, I8
 //  Cases with other precision are not supported
@@ -294,6 +300,11 @@ std::vector<PrecisionType> BlobCopy_PrecisionParams = {
         InferenceEngine::Precision::FP32,
         InferenceEngine::Precision::FP16,
         InferenceEngine::Precision::U8,
+        InferenceEngine::Precision::I8,
+        InferenceEngine::Precision::U16,
+        InferenceEngine::Precision::I16,
+        InferenceEngine::Precision::U32,
+        InferenceEngine::Precision::I32,
 };
 
 }  // namespace
@@ -325,17 +336,21 @@ bool IsEqualBlobCopy(Blob::Ptr& srcBlob, Blob::Ptr& dstBlob) {
     case InferenceEngine::Precision::I16:
     case InferenceEngine::Precision::Q78:
         return IsEqualBlobCopy_Impl<int16_t>(srcBlob, dstBlob);
+    case InferenceEngine::Precision::U32:
+        IsEqualBlobCopy_Impl<uint32_t>(srcBlob, dstBlob);
     case InferenceEngine::Precision::I32:
         IsEqualBlobCopy_Impl<int32_t>(srcBlob, dstBlob);
+    case InferenceEngine::Precision::U64:
+        return IsEqualBlobCopy_Impl<uint64_t>(srcBlob, dstBlob);
     case InferenceEngine::Precision::I64:
         return IsEqualBlobCopy_Impl<int64_t>(srcBlob, dstBlob);
-    case InferenceEngine::Precision::U16:
-        return IsEqualBlobCopy_Impl<uint16_t>(srcBlob, dstBlob);
     case InferenceEngine::Precision::I8:
     case InferenceEngine::Precision::BIN:
         return IsEqualBlobCopy_Impl<int8_t>(srcBlob, dstBlob);
     case InferenceEngine::Precision::U8:
         return IsEqualBlobCopy_Impl<uint8_t>(srcBlob, dstBlob);
+    case InferenceEngine::Precision::U16:
+        return IsEqualBlobCopy_Impl<uint16_t>(srcBlob, dstBlob);
     default:
         return false;
     }
@@ -372,6 +387,10 @@ void copy3DBlobsAllBytesWithReLayoutWrapper(const Blob::Ptr& srcLayoutBlob, Blob
         return copy3DBlobsAllBytesWithReLayout<int16_t>(srcLayoutBlob, trgLayoutBlob);
     case InferenceEngine::Precision::I32:
         return copy3DBlobsAllBytesWithReLayout<int32_t>(srcLayoutBlob, trgLayoutBlob);
+    case InferenceEngine::Precision::U32:
+        return copy3DBlobsAllBytesWithReLayout<uint32_t>(srcLayoutBlob, trgLayoutBlob);
+    case InferenceEngine::Precision::U64:
+        return copy3DBlobsAllBytesWithReLayout<uint64_t>(srcLayoutBlob, trgLayoutBlob);
     case InferenceEngine::Precision::I64:
         return copy3DBlobsAllBytesWithReLayout<int64_t>(srcLayoutBlob, trgLayoutBlob);
     case InferenceEngine::Precision::U16:
index 7fa179a..5046067 100644 (file)
@@ -19,6 +19,7 @@ TEST_F(PrecisionTests, ShowsCorrectPrecisionNames) {
     ASSERT_STREQ(Precision(Precision::FP32).name(), "FP32");
     ASSERT_STREQ(Precision(Precision::I16).name(), "I16");
     ASSERT_STREQ(Precision(Precision::I32).name(), "I32");
+    ASSERT_STREQ(Precision(Precision::U32).name(), "U32");
     ASSERT_STREQ(Precision(Precision::U16).name(), "U16");
     ASSERT_STREQ(Precision(Precision::I8).name(), "I8");
     ASSERT_STREQ(Precision(Precision::Q78).name(), "Q78");
@@ -35,6 +36,7 @@ TEST_F(PrecisionTests, sizeIsCorrect) {
     ASSERT_EQ(Precision(Precision::FP16).size(), 2);
     ASSERT_EQ(Precision(Precision::FP32).size(), 4);
     ASSERT_EQ(Precision(Precision::I32).size(), 4);
+    ASSERT_EQ(Precision(Precision::U32).size(), 4);
     ASSERT_EQ(Precision(Precision::I16).size(), 2);
     ASSERT_EQ(Precision(Precision::U16).size(), 2);
     ASSERT_EQ(Precision(Precision::I8).size(), 1);
@@ -51,6 +53,7 @@ TEST_F(PrecisionTests, is_float) {
     ASSERT_FALSE(Precision(Precision::I64).is_float());
     ASSERT_FALSE(Precision(Precision::U64).is_float());
     ASSERT_FALSE(Precision(Precision::I32).is_float());
+    ASSERT_FALSE(Precision(Precision::U32).is_float());
     ASSERT_FALSE(Precision(Precision::I16).is_float());
     ASSERT_FALSE(Precision(Precision::U16).is_float());
     ASSERT_FALSE(Precision(Precision::I8).is_float());
@@ -68,6 +71,7 @@ TEST_F(PrecisionTests, constructFromSTR) {
     ASSERT_EQ(Precision(Precision::FP16), Precision::FromStr("FP16"));
     ASSERT_EQ(Precision(Precision::FP32), Precision::FromStr("FP32"));
     ASSERT_EQ(Precision(Precision::I32), Precision::FromStr("I32"));
+    ASSERT_EQ(Precision(Precision::U32), Precision::FromStr("U32"));
     ASSERT_EQ(Precision(Precision::I16), Precision::FromStr("I16"));
     ASSERT_EQ(Precision(Precision::U16), Precision::FromStr("U16"));
     ASSERT_EQ(Precision(Precision::I8), Precision::FromStr("I8"));
index 213dad5..ff6a1a6 100644 (file)
@@ -93,6 +93,7 @@ InferenceEngine::CNNNetwork LayerTransformation::transform(InferenceEngine::deta
 
     InferenceEngine::NetPass::ConvertPrecision(*implNetwork, InferenceEngine::Precision::I64, InferenceEngine::Precision::I32);
     InferenceEngine::NetPass::ConvertPrecision(*implNetwork, InferenceEngine::Precision::U64, InferenceEngine::Precision::I32);
+    InferenceEngine::NetPass::ConvertPrecision(*implNetwork, InferenceEngine::Precision::U32, InferenceEngine::Precision::I32);
     InferenceEngine::NetPass::ConvertPrecision(*implNetwork, InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32);
     InferenceEngine::NetPass::ConvertPrecision(*implNetwork, InferenceEngine::Precision::BOOL, InferenceEngine::Precision::U8);
 
@@ -115,6 +116,7 @@ InferenceEngine::CNNNetwork LayerTransformation::transform(const InferenceEngine
 
     InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::I64, InferenceEngine::Precision::I32);
     InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::U64, InferenceEngine::Precision::I32);
+    InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::U32, InferenceEngine::Precision::I32);
     InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32);
     InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::BOOL, InferenceEngine::Precision::U8);
 
index 1732b79..1bee229 100644 (file)
@@ -170,6 +170,13 @@ IE::BlobMap RemoveLayerTests::fillConstDataDiffPrec (const std::vector<std::stri
                     }
                     break;
                 }
+                case IE::Precision::U32: {
+                    auto *buffer = blob->buffer().as<unsigned int *>();
+                    for (int i = 0; i < blob->size(); i++) {
+                        buffer[i] = i + 2;
+                    }
+                    break;
+                }
                 case IE::Precision::I64: {
                     auto *buffer = blob->buffer().as<long long int *>();
                     for (int i = 0; i < blob->size(); i++) {
index dc82ec0..c4858ed 100644 (file)
@@ -60,6 +60,7 @@ namespace
         case element::Type_t::i8: blob = MAKE_IE_TBLOB(int8_t, I8, shape, layout); break;
         case element::Type_t::u16: blob = MAKE_IE_TBLOB(uint16_t, U16, shape, layout); break;
         case element::Type_t::i32: blob = MAKE_IE_TBLOB(int32_t, I32, shape, layout); break;
+        case element::Type_t::u32: blob = MAKE_IE_TBLOB(uint32_t, U32, shape, layout); break;
         case element::Type_t::i64: blob = MAKE_IE_TBLOB(int64_t, I64, shape, layout); break;
         case element::Type_t::u64: blob = MAKE_IE_TBLOB(uint64_t, U64, shape, layout); break;
         case element::Type_t::boolean: blob = MAKE_IE_TBLOB(uint8_t, BOOL, shape, layout); break;
index 5bab1d0..683bbc6 100644 (file)
@@ -105,6 +105,9 @@ namespace
         case InferenceEngine::Precision::U16:
             return compare_blobs<uint16_t>(computed, expected, tolerance_bits);
             break;
+        case InferenceEngine::Precision::U32:
+            return compare_blobs<uint32_t>(computed, expected, tolerance_bits);
+            break;
         case InferenceEngine::Precision::U64:
             return compare_blobs<uint64_t>(computed, expected, tolerance_bits);
             break;