[GNA] fixed conversion from fp16 to fp32 networks in case of const blobs (#2446)
authorEugene Smirnov <eugene.smirnov@intel.com>
Tue, 29 Sep 2020 16:44:12 +0000 (19:44 +0300)
committerGitHub <noreply@github.com>
Tue, 29 Sep 2020 16:44:12 +0000 (19:44 +0300)
inference-engine/src/gna_plugin/frontend/weights_converter.hpp
inference-engine/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/fake_quantize.cpp

index 040f7bb..064e849 100644 (file)
@@ -50,15 +50,10 @@ inline bool convertWeights(InferenceEngine::CNNLayer* lp) {
     for (auto& dataItem : lp->outData) {
         dataItem->setPrecision(InferenceEngine::Precision::FP32);
     }
-    InferenceEngine::BlobMap newBlobs;
     for (auto& blob_pair : lp->blobs) {
-        auto blob_name = blob_pair.first;
-        auto blob_ptr = blob_pair.second;
+        auto &blob_ptr = blob_pair.second;
         if (blob_ptr->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP16) {
-            auto new_blob = make_fp32_blob(blob_ptr);
-            newBlobs[blob_name] = new_blob;
-        } else {
-            newBlobs[blob_name] = blob_ptr;
+            blob_ptr = make_fp32_blob(blob_ptr);
         }
     }
 
index bad19b0..9e5896e 100644 (file)
@@ -13,7 +13,7 @@ using namespace LayerTestsDefinitions;
 namespace {
 
 const std::vector<InferenceEngine::Precision> netPrecisions = {
-    InferenceEngine::Precision::FP32,
+    InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16
 };
 
 using ConfigType = std::map<std::string, std::string>;