[LPT] bfloat enabling fix (#2819)
authorEdward Shogulin <edward.shogulin@intel.com>
Mon, 26 Oct 2020 13:02:11 +0000 (16:02 +0300)
committerGitHub <noreply@github.com>
Mon, 26 Oct 2020 13:02:11 +0000 (16:02 +0300)
inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp

index e7bad8e..2d0ca6e 100644 (file)
@@ -54,8 +54,8 @@ MKLDNNExecNetwork::MKLDNNExecNetwork(const InferenceEngine::ICNNNetwork &network
     // we are cloning network if we have statistics and we can transform network.
     _clonedNetwork = cloneNet(network);
 
-#ifdef USE_CNNNETWORK_LPT
     if (_cfg.lpTransformsMode == Config::LPTransformsMode::On) {
+#ifdef USE_CNNNETWORK_LPT
         auto params = LayerTransformation::Params(true,  // updatePrecisions
                                                     true,  // quantizeOutputs
                                                     true,  // weightsToConst
@@ -70,6 +70,7 @@ MKLDNNExecNetwork::MKLDNNExecNetwork(const InferenceEngine::ICNNNetwork &network
                 LayerTransformation::Params(params).setPrecisionsOnActivations({ Precision::U8 }),
                 "ScaleShift"));
         transformer.transform(*_clonedNetwork);
+#endif
 
         // Check if network is INT8 or Binary.
         // BF16 transformations were disabled since CPU plug-in doesn't support mixed precision execution:
@@ -98,7 +99,6 @@ MKLDNNExecNetwork::MKLDNNExecNetwork(const InferenceEngine::ICNNNetwork &network
             bf16Transformer.convertToFloat(cnnetwork);
         }
     }
-#endif
 
     MKLDNNGraph::ApplyUnrollPasses(static_cast<ICNNNetwork&>(*_clonedNetwork));