IVGCVSW-3212 Refactor RefBatchNormalizationWorkload
authorMatteo Martincigh <matteo.martincigh@arm.com>
Thu, 20 Jun 2019 11:46:43 +0000 (12:46 +0100)
committerMatteo Martincigh <matteo.martincigh@arm.com>
Thu, 20 Jun 2019 11:51:17 +0000 (12:51 +0100)
 * Refactor the reference batch normalization workload to avoid unnecessary function
   calls and to improve readibility

Change-Id: I49d78dcac7bad36f57bd1eb196c12dbad01cc893
Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
src/backends/reference/workloads/RefBatchNormalizationWorkload.cpp

index b43b104..54e7d0d 100644 (file)
@@ -26,20 +26,20 @@ void RefBatchNormalizationWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchNormalizationWorkload_Execute");
 
-    std::unique_ptr<Decoder<float>> meanDecoder = MakeDecoder<float>(GetTensorInfo(m_Mean.get()),
-                                                                     m_Mean.get()->Map(true));
-    std::unique_ptr<Decoder<float>> varianceDecoder = MakeDecoder<float>(GetTensorInfo(m_Variance.get()),
-                                                                         m_Variance.get()->Map(true));
-    std::unique_ptr<Decoder<float>> gammaDecoder = MakeDecoder<float>(GetTensorInfo(m_Gamma.get()),
-                                                                      m_Gamma.get()->Map(true));
-    std::unique_ptr<Decoder<float>> betaDecoder = MakeDecoder<float>(GetTensorInfo(m_Beta.get()),
-                                                                     m_Beta.get()->Map(true));
-    std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(m_Data.m_Inputs[0]),
-                                                                      m_Data.m_Inputs[0]->Map());
-    std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(m_Data.m_Outputs[0]),
-                                                                       m_Data.m_Outputs[0]->Map());
+    std::unique_ptr<Decoder<float>> meanDecoder     = MakeDecoder<float>(m_Mean->GetTensorInfo(),
+                                                                         m_Mean->Map(true));
+    std::unique_ptr<Decoder<float>> varianceDecoder = MakeDecoder<float>(m_Variance->GetTensorInfo(),
+                                                                         m_Variance->Map(true));
+    std::unique_ptr<Decoder<float>> gammaDecoder    = MakeDecoder<float>(m_Gamma->GetTensorInfo(),
+                                                                         m_Gamma->Map(true));
+    std::unique_ptr<Decoder<float>> betaDecoder     = MakeDecoder<float>(m_Beta->GetTensorInfo(),
+                                                                         m_Beta->Map(true));
+    std::unique_ptr<Decoder<float>> inputDecoder    = MakeDecoder<float>(GetTensorInfo(m_Data.m_Inputs[0]),
+                                                                         m_Data.m_Inputs[0]->Map());
+    std::unique_ptr<Encoder<float>> outputEncoder   = MakeEncoder<float>(GetTensorInfo(m_Data.m_Outputs[0]),
+                                                                         m_Data.m_Outputs[0]->Map());
 
     BatchNormImpl(m_Data, *meanDecoder, *varianceDecoder, *betaDecoder, *gammaDecoder, *inputDecoder, *outputEncoder);
 }
 
-} //namespace armnn
+} // namespace armnn