IVGCVSW-1946: Remove armnn/src from the include paths
[platform/upstream/armnn.git] / src / backends / neon / workloads / NeonConvolution2dWorkload.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "NeonConvolution2dWorkload.hpp"
7
8 #include <backendsCommon/CpuTensorHandle.hpp>
9 #include <aclCommon/ArmComputeTensorUtils.hpp>
10 #include <neon/NeonLayerSupport.hpp>
11
12 #include <armnn/Types.hpp>
13 #include <Half.hpp>
14
15 namespace armnn
16 {
17
18 using namespace armcomputetensorutils;
19
20 arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input,
21     const TensorInfo& output,
22     const Convolution2dDescriptor& descriptor,
23     const TensorInfo& weights,
24     const Optional<TensorInfo>& biases)
25 {
26     const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
27     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
28     const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout);
29
30     arm_compute::TensorInfo aclBiasesInfo;
31     arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
32
33     if (descriptor.m_BiasEnabled)
34     {
35         BOOST_ASSERT(biases.has_value());
36
37         aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
38         optionalAclBiasesInfo = &aclBiasesInfo;
39     }
40
41     arm_compute::PadStrideInfo layerInfo = BuildArmComputePadStrideInfo(descriptor);
42
43     return arm_compute::NEConvolutionLayer::validate(&aclInputInfo,
44                                                      &aclWeightsInfo,
45                                                      optionalAclBiasesInfo,
46                                                      &aclOutputInfo,
47                                                      layerInfo);
48 }
49
50 NeonConvolution2dWorkload::NeonConvolution2dWorkload(
51     const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info,
52     std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
53     : BaseWorkload<Convolution2dQueueDescriptor>(descriptor, info)
54 {
55     using arm_compute::NEDirectConvolutionLayer;
56
57     m_Data.ValidateInputsOutputs("NeonConvolution2dWorkload", 1, 1);
58
59     // todo: check tensor shapes match.
60
61     arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
62     arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
63
64     arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
65     input.info()->set_data_layout(aclDataLayout);
66     output.info()->set_data_layout(aclDataLayout);
67
68     m_KernelTensor = std::make_unique<arm_compute::Tensor>();
69     BuildArmComputeTensor(*m_KernelTensor, m_Data.m_Weight->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
70
71     if (m_Data.m_Parameters.m_BiasEnabled)
72     {
73         m_BiasTensor = std::make_unique<arm_compute::Tensor>();
74         BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
75     }
76
77     arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
78                                              m_Data.m_Parameters.m_StrideY,
79                                              m_Data.m_Parameters.m_PadLeft,
80                                              m_Data.m_Parameters.m_PadRight,
81                                              m_Data.m_Parameters.m_PadTop,
82                                              m_Data.m_Parameters.m_PadBottom,
83                                              arm_compute::DimensionRoundingType::FLOOR);
84
85     const bool preferDirectConvolution =
86         IsNeonDirectConvolutionPreferred(m_Data.m_Weight->GetTensorInfo(),
87                                          m_Data.m_Parameters);
88
89     if (preferDirectConvolution)
90     {
91         auto directConvolutionLayer = std::make_unique<arm_compute::NEDirectConvolutionLayer>(memoryManager);
92         directConvolutionLayer->configure(&input,
93                                           m_KernelTensor.get(),
94                                           m_BiasTensor.get(),
95                                           &output,
96                                           padStrideInfo);
97         m_ConvolutionLayer.reset(directConvolutionLayer.release());
98     }
99     else
100     {
101         auto convolutionLayer = std::make_unique<arm_compute::NEConvolutionLayer>(memoryManager);
102         convolutionLayer->configure(&input,
103                                     m_KernelTensor.get(),
104                                     m_BiasTensor.get(),
105                                     &output,
106                                     padStrideInfo);
107         m_ConvolutionLayer.reset(convolutionLayer.release());
108     }
109     BOOST_ASSERT(m_ConvolutionLayer);
110
111     InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
112
113     if (m_Data.m_Parameters.m_BiasEnabled)
114     {
115         InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
116     }
117
118     m_ConvolutionLayer->prepare();
119     FreeUnusedTensors();
120 }
121
122 void NeonConvolution2dWorkload::Execute() const
123 {
124     ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonConvolution2dWorkload_Execute");
125     m_ConvolutionLayer->run();
126 }
127
128 void NeonConvolution2dWorkload::FreeUnusedTensors()
129 {
130     FreeTensorIfUnused(m_KernelTensor);
131     FreeTensorIfUnused(m_BiasTensor);
132 }
133
134 } //namespace armnn