Release 18.08
[platform/upstream/armnn.git] / src / armnn / backends / NeonWorkloads / NeonPooling2dBaseWorkload.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
4 //
5
6 #include "NeonPooling2dBaseWorkload.hpp"
7 #include "backends/NeonLayerSupport.hpp"
8 #include "backends/NeonTensorHandle.hpp"
9 #include "backends/ArmComputeUtils.hpp"
10 #include "backends/ArmComputeTensorUtils.hpp"
11
12 namespace armnn
13 {
14 using namespace armcomputetensorutils;
15
16 arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo& input,
17     const TensorInfo& output,
18     const Pooling2dDescriptor& descriptor)
19 {
20     const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
21     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
22
23     arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(descriptor);
24
25     return arm_compute::NEPoolingLayer::validate(&aclInputInfo, &aclOutputInfo, layerInfo);
26 }
27
28 template <armnn::DataType... dataTypes>
29 NeonPooling2dBaseWorkload<dataTypes...>::NeonPooling2dBaseWorkload(
30     const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info, const std::string& name)
31     : TypedWorkload<Pooling2dQueueDescriptor, dataTypes...>(descriptor, info)
32 {
33     m_Data.ValidateInputsOutputs(name, 1, 1);
34
35     arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
36     arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
37
38     arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(m_Data.m_Parameters);
39
40     m_PoolingLayer.configure(&input, &output, layerInfo);
41 }
42
43 template class NeonPooling2dBaseWorkload<DataType::Float16, DataType::Float32>;
44 template class NeonPooling2dBaseWorkload<DataType::QuantisedAsymm8>;
45
46 } //namespace armnn
47