Release 18.05.02
[platform/upstream/armnn.git] / src / armnn / backends / ClWorkloads / ClDepthwiseConvolutionHelper.hpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
4 //
5
6 #pragma once
7
8 #include <armnn/TypesUtils.hpp>
9 #include "backends/ClLayerSupport.hpp"
10 #include "backends/ArmComputeTensorUtils.hpp"
11 #include "backends/ClTensorHandle.hpp"
12
13 namespace armnn
14 {
15
16 template <typename WorkloadType>
17 void InitClDepthwiseConvolutionWorkload(WorkloadType& workload)
18 {
19     using T = typename WorkloadType::KernelDataType;
20     using B = typename WorkloadType::BiasDataType;
21
22     auto& m_Data = workload.GetData();
23     auto& m_KernelTensor = workload.m_KernelTensor;
24     auto& m_BiasTensor = workload.m_BiasTensor;
25     auto& m_pDepthwiseConvolutionLayer = workload.m_pDepthwiseConvolutionLayer;
26
27     auto& weightInfo = m_Data.m_Weight->GetTensorInfo();
28
29     std::string reasonIfUnsupported;
30     if (!IsClDepthwiseConvolution2dDescParamsSupported(&reasonIfUnsupported, m_Data.m_Parameters, weightInfo))
31     {
32         throw UnimplementedException(reasonIfUnsupported);
33     }
34
35     armcomputetensorutils::BuildArmComputeTensor(m_KernelTensor, weightInfo);
36
37     arm_compute::CLTensor* optionalBias = nullptr;
38     if (m_Data.m_Parameters.m_BiasEnabled)
39     {
40         armcomputetensorutils::BuildArmComputeTensor(m_BiasTensor, m_Data.m_Bias->GetTensorInfo());
41         optionalBias = &m_BiasTensor;
42     }
43
44     arm_compute::PadStrideInfo padStrideInfo(m_Data.m_Parameters.m_StrideX,
45                                              m_Data.m_Parameters.m_StrideY,
46                                              m_Data.m_Parameters.m_PadLeft,
47                                              m_Data.m_Parameters.m_PadRight,
48                                              m_Data.m_Parameters.m_PadTop,
49                                              m_Data.m_Parameters.m_PadBottom,
50                                              arm_compute::DimensionRoundingType::FLOOR);
51
52     std::string name = std::string("ClDepthwiseConvolution") + GetDataTypeName(GetDataType<T>()) + "Workload";
53     m_Data.ValidateInputsOutputs(name, 1, 1);
54
55     arm_compute::ICLTensor& input  = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
56     arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
57
58     //Check for optimisation opportunities.
59     bool use3x3Optimisation = (weightInfo.GetShape()[3] == 3) && (weightInfo.GetShape()[2] == 3);
60     if (use3x3Optimisation)
61     {
62         m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::CLDepthwiseConvolutionLayer3x3>();
63         static_cast<arm_compute::CLDepthwiseConvolutionLayer3x3*>(m_pDepthwiseConvolutionLayer.get())->configure(
64             &input,
65             &m_KernelTensor,
66             optionalBias,
67             &output,
68             padStrideInfo);
69     }
70     else
71     {
72         m_pDepthwiseConvolutionLayer = std::make_unique<arm_compute::CLDepthwiseConvolutionLayer>();
73         static_cast<arm_compute::CLDepthwiseConvolutionLayer*>(m_pDepthwiseConvolutionLayer.get())->configure(
74             &input,
75             &m_KernelTensor,
76             optionalBias,
77             &output,
78             padStrideInfo);
79     }
80
81     BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
82
83     InitialiseArmComputeClTensorData(m_KernelTensor, m_Data.m_Weight->template GetConstTensor<T>());
84
85     if (optionalBias)
86     {
87         InitialiseArmComputeClTensorData(*optionalBias, m_Data.m_Bias->template GetConstTensor<B>());
88     }
89 }
90
91 } //namespace armnn