2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
6 #include "ClConvolution2dWorkload.hpp"
8 #include "ClWorkloadUtils.hpp"
10 #include <backends/cl/ClLayerSupport.hpp>
11 #include <backends/cl/ClTensorHandle.hpp>
12 #include <backends/cl/ClLayerSupport.hpp>
13 #include <backends/aclCommon/ArmComputeUtils.hpp>
14 #include <backends/aclCommon/ArmComputeTensorUtils.hpp>
15 #include <backends/CpuTensorHandle.hpp>
17 #include <arm_compute/runtime/CL/functions/CLConvolutionLayer.h>
21 using namespace armcomputetensorutils;
23 arm_compute::Status ClConvolution2dWorkloadValidate(const TensorInfo& input,
24 const TensorInfo& output,
25 const Convolution2dDescriptor& descriptor,
26 const TensorInfo& weights,
27 const Optional<TensorInfo>& biases)
29 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
30 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
31 const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weights, descriptor.m_DataLayout);
33 arm_compute::TensorInfo aclBiasesInfo;
34 arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
36 if (descriptor.m_BiasEnabled)
38 BOOST_ASSERT(biases.has_value());
40 aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
41 optionalAclBiasesInfo = &aclBiasesInfo;
44 arm_compute::PadStrideInfo layerInfo = BuildArmComputePadStrideInfo(descriptor);
46 return arm_compute::CLConvolutionLayer::validate(&aclInputInfo,
48 optionalAclBiasesInfo,
53 ClConvolution2dWorkload::ClConvolution2dWorkload(const Convolution2dQueueDescriptor& descriptor,
54 const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
55 : BaseWorkload<Convolution2dQueueDescriptor>(descriptor, info)
56 , m_ConvolutionLayer(memoryManager)
58 // todo: check tensor shapes match.
59 const TensorInfo& weightInfo = m_Data.m_Weight->GetTensorInfo();
61 m_KernelTensor = std::make_unique<arm_compute::CLTensor>();
62 BuildArmComputeTensor(*m_KernelTensor, weightInfo, m_Data.m_Parameters.m_DataLayout);
64 arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
65 m_Data.m_Parameters.m_StrideY,
66 m_Data.m_Parameters.m_PadLeft,
67 m_Data.m_Parameters.m_PadRight,
68 m_Data.m_Parameters.m_PadTop,
69 m_Data.m_Parameters.m_PadBottom,
70 arm_compute::DimensionRoundingType::FLOOR);
72 if (m_Data.m_Parameters.m_BiasEnabled)
74 m_BiasTensor = std::make_unique<arm_compute::CLTensor>();
75 BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
78 m_Data.ValidateInputsOutputs("ClConvolution2dWorkload", 1, 1);
80 arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
81 arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
83 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
84 input.info()->set_data_layout(aclDataLayout);
85 output.info()->set_data_layout(aclDataLayout);
87 m_ConvolutionLayer.configure(&input,
93 InitializeArmComputeClTensorData(*m_KernelTensor, m_Data.m_Weight);
97 InitializeArmComputeClTensorData(*m_BiasTensor, m_Data.m_Bias);
100 // Force Compute Library to perform the necessary copying and reshaping, after which
101 // delete all the input tensors that will no longer be needed
102 m_ConvolutionLayer.prepare();
106 void ClConvolution2dWorkload::Execute() const
108 ARMNN_SCOPED_PROFILING_EVENT_CL("ClConvolution2dWorkload_Execute");
110 m_ConvolutionLayer.run();
113 void ClConvolution2dWorkload::FreeUnusedTensors()
115 FreeTensorIfUnused(m_KernelTensor);
116 FreeTensorIfUnused(m_BiasTensor);