IVGCVSW-1946: Remove armnn/src from the include paths
[platform/upstream/armnn.git] / src / backends / neon / workloads / NeonBatchNormalizationFloatWorkload.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "NeonBatchNormalizationFloatWorkload.hpp"
7 #include <backendsCommon/CpuTensorHandle.hpp>
8 #include <aclCommon/ArmComputeTensorUtils.hpp>
9 #include <armnn/ArmNN.hpp>
10
11 namespace armnn
12 {
13 using namespace armcomputetensorutils;
14
15
16 arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo& input,
17                                                    const TensorInfo& output,
18                                                    const TensorInfo& mean,
19                                                    const TensorInfo& var,
20                                                    const TensorInfo& beta,
21                                                    const TensorInfo& gamma,
22                                                    const BatchNormalizationDescriptor& descriptor)
23 {
24     const DataLayout dataLayout = descriptor.m_DataLayout.GetDataLayout();
25
26     const arm_compute::TensorInfo aclInputInfo =
27           armcomputetensorutils::BuildArmComputeTensorInfo(input, dataLayout);
28     const arm_compute::TensorInfo aclOutputInfo =
29           armcomputetensorutils::BuildArmComputeTensorInfo(output, dataLayout);
30     const arm_compute::TensorInfo aclMeanInfo =
31           armcomputetensorutils::BuildArmComputeTensorInfo(mean, dataLayout);
32     const arm_compute::TensorInfo aclVarInfo =
33           armcomputetensorutils::BuildArmComputeTensorInfo(var, dataLayout);
34     const arm_compute::TensorInfo aclBetaInfo =
35           armcomputetensorutils::BuildArmComputeTensorInfo(beta, dataLayout);
36     const arm_compute::TensorInfo aclGammaInfo =
37           armcomputetensorutils::BuildArmComputeTensorInfo(gamma, dataLayout);
38
39     return arm_compute::NEBatchNormalizationLayer::validate(&aclInputInfo,
40                                                             &aclOutputInfo,
41                                                             &aclMeanInfo,
42                                                             &aclVarInfo,
43                                                             &aclBetaInfo,
44                                                             &aclGammaInfo,
45                                                             descriptor.m_Eps);
46 }
47
48 NeonBatchNormalizationFloatWorkload::NeonBatchNormalizationFloatWorkload(
49     const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info)
50     : FloatWorkload<BatchNormalizationQueueDescriptor>(descriptor, info)
51 {
52     m_Data.ValidateInputsOutputs("NeonBatchNormalizationFloatWorkload", 1, 1);
53
54     arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
55     arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
56
57     arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout());
58     input.info()->set_data_layout(aclDataLayout);
59     output.info()->set_data_layout(aclDataLayout);
60
61     m_Mean = std::make_unique<arm_compute::Tensor>();
62     BuildArmComputeTensor(*m_Mean, m_Data.m_Mean->GetTensorInfo());
63
64     m_Variance = std::make_unique<arm_compute::Tensor>();
65     BuildArmComputeTensor(*m_Variance, m_Data.m_Variance->GetTensorInfo());
66
67     m_Gamma = std::make_unique<arm_compute::Tensor>();
68     BuildArmComputeTensor(*m_Gamma, m_Data.m_Gamma->GetTensorInfo());
69
70     m_Beta = std::make_unique<arm_compute::Tensor>();
71     BuildArmComputeTensor(*m_Beta, m_Data.m_Beta->GetTensorInfo());
72
73     m_Layer.configure(&input,
74                       &output,
75                       m_Mean.get(),
76                       m_Variance.get(),
77                       m_Beta.get(),
78                       m_Gamma.get(),
79                       m_Data.m_Parameters.m_Eps);
80
81     InitializeArmComputeTensorData(*m_Mean, m_Data.m_Mean);
82     InitializeArmComputeTensorData(*m_Variance, m_Data.m_Variance);
83     InitializeArmComputeTensorData(*m_Gamma, m_Data.m_Gamma);
84     InitializeArmComputeTensorData(*m_Beta, m_Data.m_Beta);
85
86     // Force Compute Library to perform the necessary copying and reshaping, after which
87     // delete all the input tensors that will no longer be needed
88     m_Layer.prepare();
89     FreeUnusedTensors();
90 }
91
92 void NeonBatchNormalizationFloatWorkload::Execute() const
93 {
94     ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonBatchNormalizationFloatWorkload_Execute");
95     m_Layer.run();
96 }
97
98 void NeonBatchNormalizationFloatWorkload::FreeUnusedTensors()
99 {
100     FreeTensorIfUnused(m_Mean);
101     FreeTensorIfUnused(m_Variance);
102     FreeTensorIfUnused(m_Gamma);
103     FreeTensorIfUnused(m_Beta);
104 }
105
106 } //namespace armnn