2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
6 #include "NeonPooling2dBaseWorkload.hpp"
7 #include "backends/NeonLayerSupport.hpp"
8 #include "backends/NeonTensorHandle.hpp"
9 #include "backends/ArmComputeUtils.hpp"
10 #include "backends/ArmComputeTensorUtils.hpp"
14 using namespace armcomputetensorutils;
16 arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo& input,
17 const TensorInfo& output,
18 const Pooling2dDescriptor& descriptor)
20 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
21 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
23 arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(descriptor);
25 return arm_compute::NEPoolingLayer::validate(&aclInputInfo, &aclOutputInfo, layerInfo);
28 template <armnn::DataType... dataTypes>
29 NeonPooling2dBaseWorkload<dataTypes...>::NeonPooling2dBaseWorkload(
30 const Pooling2dQueueDescriptor& descriptor, const WorkloadInfo& info, const std::string& name)
31 : TypedWorkload<Pooling2dQueueDescriptor, dataTypes...>(descriptor, info)
33 m_Data.ValidateInputsOutputs(name, 1, 1);
35 arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
36 arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
38 arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(m_Data.m_Parameters);
40 m_PoolingLayer.configure(&input, &output, layerInfo);
43 template class NeonPooling2dBaseWorkload<DataType::Float16, DataType::Float32>;
44 template class NeonPooling2dBaseWorkload<DataType::QuantisedAsymm8>;