{
switch (type)
{
+ case LayerType::Abs: return "Abs";
case LayerType::Activation: return "Activation";
case LayerType::Addition: return "Addition";
case LayerType::BatchNormalization: return "BatchNormalization";
#if defined(ARMCOMPUTENEON_ENABLED)
#include <aclCommon/ArmComputeUtils.hpp>
+#include "workloads/NeonAbsWorkload.hpp"
#include "workloads/NeonAdditionWorkload.hpp"
#include "workloads/NeonActivationWorkload.hpp"
#include "workloads/NeonBatchNormalizationWorkload.hpp"
} // anonymous namespace
+bool NeonLayerSupport::IsAbsSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonAbsWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output);
+}
+
bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
class NeonLayerSupport : public LayerSupportBase
{
public:
+ bool IsAbsSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsActivationSupported(const TensorInfo& input,
const TensorInfo& output,
const ActivationDescriptor& descriptor,
return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::make_unique<NeonAbsWorkload>(descriptor, info);
+}
+
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
NeonTensorHandleFactory.cpp \
NeonTimer.cpp \
NeonWorkloadFactory.cpp \
+ workloads/NeonAbsWorkload.cpp \
workloads/NeonActivationWorkload.cpp \
workloads/NeonAdditionWorkload.cpp \
workloads/NeonBatchNormalizationWorkload.cpp \
#include <backendsCommon/test/EndToEndTestImpl.hpp>
+#include <backendsCommon/test/AbsEndToEndTestImpl.hpp>
#include <backendsCommon/test/ArithmeticTestImpl.hpp>
#include <backendsCommon/test/ConcatEndToEndTestImpl.hpp>
#include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuAcc};
+// Abs
+BOOST_AUTO_TEST_CASE(NeonAbsEndToEndTestFloat32)
+{
+ AbsEndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+// Constant
BOOST_AUTO_TEST_CASE(ConstantUsage_Neon_Float32)
{
BOOST_TEST(ConstantUsageFloat32Test(defaultBackends));
MultiChannelTransposeConvolution2dTest<DataType::QuantisedAsymm8, DataType::Signed32>,
DataLayout::NHWC)
+// Abs
+ARMNN_AUTO_TEST_CASE(Abs2d, Abs2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Abs3d, Abs3dTest<DataType::Float32>)
+
+ARMNN_AUTO_TEST_CASE(AbsZero, AbsZeroTest<DataType::Float32>)
+
#if defined(ARMNNREF_ENABLED)
// The ARMNN_COMPARE_REF_AUTO_TEST_CASE and the ARMNN_COMPARE_REF_FIXTURE_TEST_CASE test units are not available
#
list(APPEND armnnNeonBackendWorkloads_sources
+ NeonAbsWorkload.cpp
+ NeonAbsWorkload.hpp
NeonActivationWorkload.cpp
NeonActivationWorkload.hpp
NeonAdditionWorkload.cpp
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonAbsWorkload.hpp"
+
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorHandle.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <boost/cast.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo& input, const TensorInfo& output)
+{
+ const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+ const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+ return arm_compute::NEAbsLayer::validate(&aclInput, &aclOutput);
+}
+
+NeonAbsWorkload::NeonAbsWorkload(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<AbsQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("NeonAbsWorkload", 1, 1);
+
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ m_AbsLayer.configure(&input, &output);
+}
+
+void NeonAbsWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonAbsWorkload_Execute");
+ m_AbsLayer.run();
+}
+
+} // namespace armnn
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/NEON/functions/NEElementwiseUnaryLayer.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
+
+class NeonAbsWorkload : public BaseWorkload<AbsQueueDescriptor>
+{
+public:
+ NeonAbsWorkload(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info);
+ virtual void Execute() const override;
+
+private:
+ mutable arm_compute::NEAbsLayer m_AbsLayer;
+};
+
+} // namespace armnn
//
#pragma once
+#include "NeonAbsWorkload.hpp"
#include "NeonActivationWorkload.hpp"
#include "NeonAdditionWorkload.hpp"
#include "NeonBatchNormalizationWorkload.hpp"