* Renamed NeonResizeBilinearWorkload.* to NeonResizeWorkload.* and added support for ResizeNearestNeighbour
* Added CreateWorkload and LayerTests for Neon backend
Signed-off-by: Ellen Norris-Thompson <ellen.norris-thompson@arm.com>
Change-Id: I72f5340608a0928f8b32a41d1915ee2c35849f18
#include "workloads/NeonPooling2dWorkload.hpp"
#include "workloads/NeonPreluWorkload.hpp"
#include "workloads/NeonQuantizeWorkload.hpp"
-#include "workloads/NeonResizeBilinearWorkload.hpp"
+#include "workloads/NeonResizeWorkload.hpp"
#include "workloads/NeonSoftmaxBaseWorkload.hpp"
#include "workloads/NeonSpaceToDepthWorkload.hpp"
#include "workloads/NeonSplitterWorkload.hpp"
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- if (descriptor.m_Method == ResizeMethod::Bilinear)
- {
- FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeBilinearWorkloadValidate,
- reasonIfUnsupported,
- input,
- output);
- }
-
- return false;
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output,
+ descriptor);
}
bool NeonLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- FORWARD_WORKLOAD_VALIDATE_FUNC(NeonResizeBilinearWorkloadValidate,
- reasonIfUnsupported,
- input,
- output);
+ ResizeDescriptor descriptor;
+ descriptor.m_Method = ResizeMethod::Bilinear;
+ descriptor.m_DataLayout = DataLayout::NCHW;
+
+ const TensorShape& outputShape = output.GetShape();
+ descriptor.m_TargetHeight = outputShape[2];
+ descriptor.m_TargetWidth = outputShape[3];
+
+ return IsResizeSupported(input, output, descriptor, reasonIfUnsupported);
}
bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (descriptor.m_Parameters.m_Method == ResizeMethod::Bilinear)
- {
- ResizeBilinearQueueDescriptor resizeBilinearDescriptor;
- resizeBilinearDescriptor.m_Inputs = descriptor.m_Inputs;
- resizeBilinearDescriptor.m_Outputs = descriptor.m_Outputs;
-
- resizeBilinearDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout;
- resizeBilinearDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth;
- resizeBilinearDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
-
- return std::make_unique<NeonResizeBilinearWorkload>(resizeBilinearDescriptor, info);
- }
-
- return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+ return std::make_unique<NeonResizeWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateResizeBilinear(
const ResizeBilinearQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return std::make_unique<NeonResizeBilinearWorkload>(descriptor, info);
+ ResizeQueueDescriptor resizeDescriptor;
+ resizeDescriptor.m_Inputs = descriptor.m_Inputs;
+ resizeDescriptor.m_Outputs = descriptor.m_Outputs;
+
+ resizeDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout;
+ resizeDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth;
+ resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
+
+ return CreateResize(resizeDescriptor, info);
}
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFakeQuantization(
workloads/NeonPreluWorkload.cpp \
workloads/NeonQuantizeWorkload.cpp \
workloads/NeonReshapeWorkload.cpp \
- workloads/NeonResizeBilinearWorkload.cpp \
+ workloads/NeonResizeWorkload.cpp \
workloads/NeonSoftmaxBaseWorkload.cpp \
workloads/NeonSoftmaxFloatWorkload.cpp \
workloads/NeonSoftmaxUint8Workload.cpp \
namespace
{
+boost::test_tools::predicate_result CompareIAclTensorHandleShape(IAclTensorHandle* tensorHandle,
+ std::initializer_list<unsigned int> expectedDimensions)
+{
+ return CompareTensorHandleShape<IAclTensorHandle>(tensorHandle, expectedDimensions);
+}
+
bool TestNeonTensorHandleInfo(armnn::IAclTensorHandle* handle, const armnn::TensorInfo& expectedInfo)
{
using namespace armnn::armcomputetensorutils;
NeonCreateReshapeWorkloadTest<DataType::QuantisedAsymm8>();
}
+template <typename ResizeWorkloadType, armnn::DataType DataType>
+static void NeonCreateResizeWorkloadTest(DataLayout dataLayout)
+{
+ Graph graph;
+ NeonWorkloadFactory factory =
+ NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
+ auto workload = CreateResizeBilinearWorkloadTest<ResizeWorkloadType, DataType>(factory, graph, dataLayout);
+
+ auto queueDescriptor = workload->GetData();
+
+ auto inputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = boost::polymorphic_downcast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
+
+ switch (dataLayout)
+ {
+ case DataLayout::NHWC:
+ BOOST_TEST(CompareIAclTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
+ BOOST_TEST(CompareIAclTensorHandleShape(outputHandle, { 2, 2, 2, 3 }));
+ break;
+ case DataLayout::NCHW:
+ default:
+ BOOST_TEST(CompareIAclTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
+ BOOST_TEST(CompareIAclTensorHandleShape(outputHandle, { 2, 3, 2, 2 }));
+ }
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeFloat32NchwWorkload)
+{
+ NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeUint8NchwWorkload)
+{
+ NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
+{
+ NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeUint8NhwcWorkload)
+{
+ NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
+}
+
template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
static void NeonCreateSoftmaxWorkloadTest()
{
ResizeBilinearMagTest<armnn::DataType::QuantisedAsymm8>,
armnn::DataLayout::NHWC)
+// Resize NearestNeighbor - NCHW
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor,
+ SimpleResizeNearestNeighborTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop,
+ ResizeNearestNeighborNopTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin,
+ ResizeNearestNeighborSqMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin,
+ ResizeNearestNeighborMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
+ ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
+
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
+ SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
+ ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
+ ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
+ ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
+ ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
+
+// Resize NearestNeighbor - NHWC
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc,
+ ResizeNearestNeighborNopTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc,
+ SimpleResizeNearestNeighborTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc,
+ ResizeNearestNeighborSqMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc,
+ ResizeNearestNeighborMinTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
+ ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
+ armnn::DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
+
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
+ ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
+ SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
+ ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
+ ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
+ ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
+ armnn::DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
+
// Quantize
ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test)
ARMNN_AUTO_TEST_CASE(QuantizeClampUint8, QuantizeClampUint8Test)
NeonQuantizeWorkload.hpp
NeonReshapeWorkload.cpp
NeonReshapeWorkload.hpp
- NeonResizeBilinearWorkload.cpp
- NeonResizeBilinearWorkload.hpp
+ NeonResizeWorkload.cpp
+ NeonResizeWorkload.hpp
NeonSoftmaxBaseWorkload.cpp
NeonSoftmaxBaseWorkload.hpp
NeonSoftmaxFloatWorkload.cpp
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonResizeBilinearWorkload.hpp"
-
-#include <aclCommon/ArmComputeUtils.hpp>
-#include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <backendsCommon/CpuTensorHandle.hpp>
-#include <neon/NeonTensorHandle.hpp>
-#include <neon/NeonLayerSupport.hpp>
-
-using namespace armnn::armcomputetensorutils;
-
-namespace armnn
-{
-
-arm_compute::Status NeonResizeBilinearWorkloadValidate(const TensorInfo& input, const TensorInfo& output)
-{
- const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
- const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
-
- return arm_compute::NEScale::validate(&aclInputInfo,
- &aclOutputInfo,
- arm_compute::InterpolationPolicy::BILINEAR,
- arm_compute::BorderMode::REPLICATE,
- arm_compute::PixelValue(0.f),
- arm_compute::SamplingPolicy::TOP_LEFT);
-}
-
-NeonResizeBilinearWorkload::NeonResizeBilinearWorkload(const ResizeBilinearQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : BaseWorkload<ResizeBilinearQueueDescriptor>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs("NeonResizeBilinearWorkload", 1, 1);
-
- arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
- input.info()->set_data_layout(aclDataLayout);
- output.info()->set_data_layout(aclDataLayout);
-
- m_ResizeBilinearLayer.configure(&input,
- &output,
- arm_compute::InterpolationPolicy::BILINEAR,
- arm_compute::BorderMode::REPLICATE,
- arm_compute::PixelValue(0.f),
- arm_compute::SamplingPolicy::TOP_LEFT);
-};
-
-void NeonResizeBilinearWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonResizeBilinearWorkload_Execute");
- m_ResizeBilinearLayer.run();
-}
-
-} //namespace armnn
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backendsCommon/Workload.hpp>
-
-#include <neon/workloads/NeonWorkloadUtils.hpp>
-
-#include <arm_compute/runtime/NEON/functions/NEScale.h>
-
-namespace armnn
-{
-
-arm_compute::Status NeonResizeBilinearWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
-
-class NeonResizeBilinearWorkload : public BaseWorkload<ResizeBilinearQueueDescriptor>
-{
-public:
- NeonResizeBilinearWorkload(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info);
- void Execute() const override;
-
-private:
- mutable arm_compute::NEScale m_ResizeBilinearLayer;
-};
-
-} //namespace armnn
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonResizeWorkload.hpp"
+
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeUtils.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <neon/NeonTensorHandle.hpp>
+
+using namespace armnn::armcomputetensorutils;
+
+namespace armnn
+{
+
+arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor)
+{
+ arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
+ arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(descriptor.m_DataLayout);
+ aclInputInfo.set_data_layout(aclDataLayout);
+ aclOutputInfo.set_data_layout(aclDataLayout);
+
+ arm_compute::InterpolationPolicy aclInterpolationPolicy =
+ ConvertResizeMethodToAclInterpolationPolicy(descriptor.m_Method);
+
+ return arm_compute::NEScale::validate(&aclInputInfo,
+ &aclOutputInfo,
+ aclInterpolationPolicy,
+ arm_compute::BorderMode::REPLICATE,
+ arm_compute::PixelValue(0.f),
+ arm_compute::SamplingPolicy::TOP_LEFT);
+}
+
+NeonResizeWorkload::NeonResizeWorkload(const ResizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload<ResizeQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("NeonResizeWorkload", 1, 1);
+
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+ input.info()->set_data_layout(aclDataLayout);
+ output.info()->set_data_layout(aclDataLayout);
+
+ arm_compute::InterpolationPolicy aclInterpolationPolicy =
+ ConvertResizeMethodToAclInterpolationPolicy(descriptor.m_Parameters.m_Method);
+
+ m_ResizeLayer.configure(&input,
+ &output,
+ aclInterpolationPolicy,
+ arm_compute::BorderMode::REPLICATE,
+ arm_compute::PixelValue(0.f),
+ arm_compute::SamplingPolicy::TOP_LEFT);
+};
+
+void NeonResizeWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonResizeWorkload_Execute");
+ m_ResizeLayer.run();
+}
+
+} //namespace armnn
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/runtime/NEON/functions/NEScale.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonResizeWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const ResizeDescriptor& descriptor);
+
+class NeonResizeWorkload : public BaseWorkload<ResizeQueueDescriptor>
+{
+public:
+ NeonResizeWorkload(const ResizeQueueDescriptor& descriptor, const WorkloadInfo& info);
+ void Execute() const override;
+
+private:
+ mutable arm_compute::NEScale m_ResizeLayer;
+};
+
+} //namespace armnn
#include "NeonPreluWorkload.hpp"
#include "NeonQuantizeWorkload.hpp"
#include "NeonReshapeWorkload.hpp"
-#include "NeonResizeBilinearWorkload.hpp"
+#include "NeonResizeWorkload.hpp"
#include "NeonSoftmaxFloatWorkload.hpp"
#include "NeonSoftmaxUint8Workload.hpp"
#include "NeonSpaceToDepthWorkload.hpp"