2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
6 #include "RefBatchNormalizationUint8Workload.hpp"
8 #include "BatchNormImpl.hpp"
9 #include "RefWorkloadUtils.hpp"
11 #include "Profiling.hpp"
17 RefBatchNormalizationUint8Workload::RefBatchNormalizationUint8Workload(
18 const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info)
19 : Uint8Workload<BatchNormalizationQueueDescriptor>(descriptor, info),
20 m_Mean(std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Mean))),
21 m_Variance(std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Variance))),
22 m_Beta(std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Beta))),
23 m_Gamma(std::make_unique<ScopedCpuTensorHandle>(*(descriptor.m_Gamma))) {}
25 void RefBatchNormalizationUint8Workload::Execute() const
27 ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefBatchNormalizationUint8Workload_Execute");
29 const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
30 const TensorInfo& varInfo = GetTensorInfo(m_Variance.get());
31 const TensorInfo& meanInfo = GetTensorInfo(m_Mean.get());
32 const TensorInfo& gammaInfo = GetTensorInfo(m_Gamma.get());
33 const TensorInfo& betaInfo = GetTensorInfo(m_Beta.get());
34 const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
36 auto input = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo0);
37 auto var = Dequantize(m_Variance->GetConstTensor<uint8_t>(), varInfo);
38 auto mean = Dequantize(m_Mean->GetConstTensor<uint8_t>(), meanInfo);
39 auto gamma = Dequantize(m_Gamma->GetConstTensor<uint8_t>(), gammaInfo);
40 auto beta = Dequantize(m_Beta->GetConstTensor<uint8_t>(), betaInfo);
42 std::vector<float> results(outputInfo.GetNumElements());
43 BatchNormImpl(m_Data, var.data(), mean.data(), gamma.data(), beta.data(), results.data(), input.data());
44 Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo);