#include "workloads/NeonConcatWorkload.hpp"
#include "workloads/NeonMinimumWorkload.hpp"
#include "workloads/NeonMultiplicationWorkload.hpp"
+#include "workloads/NeonDivisionWorkload.hpp"
#include "workloads/NeonNormalizationFloatWorkload.hpp"
#include "workloads/NeonFullyConnectedWorkload.hpp"
#include "workloads/NeonPadWorkload.hpp"
output);
}
+bool NeonLayerSupport::IsDivisionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDivisionWorkloadValidate,
+ reasonIfUnsupported,
+ input0,
+ input1,
+ output);
+}
+
bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
const TensorInfo& output,
const NormalizationDescriptor& descriptor,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsDivisionSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsNormalizationSupported(const TensorInfo& input,
const TensorInfo& output,
const NormalizationDescriptor& descriptor,
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateDivision(
const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+ return std::make_unique<NeonDivisionWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor&
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor&
descriptor,
const WorkloadInfo& info) const
{
workloads/NeonMeanWorkload.cpp \
workloads/NeonMinimumWorkload.cpp \
workloads/NeonMultiplicationWorkload.cpp \
+ workloads/NeonDivisionWorkload.cpp \
workloads/NeonNormalizationFloatWorkload.cpp \
workloads/NeonPadWorkload.cpp \
workloads/NeonPermuteWorkload.cpp \
DataType::QAsymmU8>();
}
+BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
+{
+ NeonCreateElementwiseWorkloadTest<NeonDivisionWorkload,
+ DivisionQueueDescriptor,
+ DivisionLayer,
+ armnn::DataType::Float32>();
+}
+
template <typename BatchNormalizationWorkloadType, typename armnn::DataType DataType>
static void NeonCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
{
ARMNN_AUTO_TEST_CASE(SubBroadcastUint8, SubtractionBroadcastUint8Test)
ARMNN_AUTO_TEST_CASE(SubBroadcast1ElementUint8, SubtractionBroadcast1ElementUint8Test)
+// Div
+ARMNN_AUTO_TEST_CASE(SimpleDivision, DivisionTest)
+ARMNN_AUTO_TEST_CASE(DivisionByZero, DivisionByZeroTest)
+ARMNN_AUTO_TEST_CASE(DivisionBroadcast1Element, DivisionBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE(DivisionBroadcast1DVector, DivisionBroadcast1DVectorTest)
+
// Mul
ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest)
ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest)
NeonMinimumWorkload.hpp
NeonMultiplicationWorkload.cpp
NeonMultiplicationWorkload.hpp
+ NeonDivisionWorkload.cpp
+ NeonDivisionWorkload.hpp
NeonNormalizationFloatWorkload.cpp
NeonNormalizationFloatWorkload.hpp
NeonPadWorkload.cpp
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonDivisionWorkload.hpp"
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output)
+{
+ const arm_compute::TensorInfo aclInput0 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
+ const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
+ const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+ return arm_compute::NEElementwiseDivision::validate(&aclInput0,
+ &aclInput1,
+ &aclOutput);
+}
+
+NeonDivisionWorkload::NeonDivisionWorkload(const DivisionQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload<DivisionQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("NeonDivisionWorkload", 2, 1);
+
+ arm_compute::ITensor& input0 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input1 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ m_DivLayer.configure(&input0, &input1, &output);
+}
+
+void NeonDivisionWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonDivisionWorkload_Execute");
+ m_DivLayer.run();
+}
+
+} //namespace armnn
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <neon/workloads/NeonWorkloadUtils.hpp>
+
+#include <arm_compute/runtime/NEON/functions/NEElementwiseOperations.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output);
+
+class NeonDivisionWorkload : public BaseWorkload<DivisionQueueDescriptor>
+{
+public:
+ NeonDivisionWorkload(const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+ virtual void Execute() const override;
+
+private:
+ mutable arm_compute::NEElementwiseDivision m_DivLayer;
+};
+
+} //namespace armnn
#include "NeonAbsWorkload.hpp"
#include "NeonActivationWorkload.hpp"
#include "NeonAdditionWorkload.hpp"
+#include "NeonDivisionWorkload.hpp"
#include "NeonArgMinMaxWorkload.hpp"
#include "NeonBatchNormalizationWorkload.hpp"
#include "NeonConstantWorkload.hpp"