#include "workloads/NeonBatchNormalizationWorkload.hpp"
#include "workloads/NeonConvolution2dWorkload.hpp"
#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
+#include "workloads/NeonDequantizeWorkload.hpp"
#include "workloads/NeonGreaterWorkload.hpp"
#include "workloads/NeonL2NormalizationFloatWorkload.hpp"
#include "workloads/NeonMaximumWorkload.hpp"
biases);
}
+bool NeonLayerSupport::IsDequantizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDequantizeWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output);
+}
+
bool NeonLayerSupport::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsDequantizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
const TensorInfo& output,
const DepthwiseConvolution2dDescriptor& descriptor,
return std::make_unique<NeonDepthwiseConvolutionWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDequantize(const DequantizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::make_unique<NeonDequantizeWorkload>(descriptor, info);
+}
+
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDetectionPostProcess(
const armnn::DetectionPostProcessQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const
{
std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
workloads/NeonConvertFp32ToFp16Workload.cpp \
workloads/NeonConvolution2dWorkload.cpp \
workloads/NeonDepthwiseConvolutionWorkload.cpp \
+ workloads/NeonDequantizeWorkload.cpp \
workloads/NeonFloorFloatWorkload.cpp \
workloads/NeonFullyConnectedWorkload.cpp \
workloads/NeonGreaterWorkload.cpp \
weightsInfo3x3, biasesInfo));
}
+// Dequantize
+ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8, DequantizeSimpleUint8Test)
+ARMNN_AUTO_TEST_CASE(DequantizeOffsetUint8, DequantizeOffsetUint8Test)
+
// Pooling
ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, true)
ARMNN_AUTO_TEST_CASE(SimpleMaxPooling2dSize3x3Stride2x4Uint8, SimpleMaxPooling2dSize3x3Stride2x4Uint8Test, true)
NeonConvolution2dWorkload.hpp
NeonDepthwiseConvolutionWorkload.cpp
NeonDepthwiseConvolutionWorkload.hpp
+ NeonDequantizeWorkload.cpp
+ NeonDequantizeWorkload.hpp
NeonFloorFloatWorkload.cpp
NeonFloorFloatWorkload.hpp
NeonFullyConnectedWorkload.cpp
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonDequantizeWorkload.hpp"
+
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <neon/NeonTensorHandle.hpp>
+
+namespace armnn
+{
+
+using namespace armcomputetensorutils;
+
+arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output)
+{
+ const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
+ const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
+
+ return arm_compute::NEDequantizationLayer::validate(&aclInput, &aclOutput);
+}
+
+NeonDequantizeWorkload::NeonDequantizeWorkload(const DequantizeQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<DequantizeQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("NeonDequantizeWorkload", 1, 1);
+
+ arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ m_Layer.reset(new arm_compute::NEDequantizationLayer());
+ m_Layer->configure(&input, &output);
+ m_Layer->prepare();
+}
+
+void NeonDequantizeWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonDequantizeWorkload_Execute");
+ m_Layer->run();
+}
+
+} //namespace armnn
+
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/runtime/NEON/NEFunctions.h>
+
+#include <functional>
+
+namespace armnn
+{
+
+arm_compute::Status NeonDequantizeWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output);
+
+class NeonDequantizeWorkload : public BaseWorkload<DequantizeQueueDescriptor>
+{
+public:
+ NeonDequantizeWorkload(const DequantizeQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+ void Execute() const override;
+
+private:
+ mutable std::unique_ptr<arm_compute::NEDequantizationLayer> m_Layer;
+};
+
+} //namespace armnn
#include "NeonConvertFp32ToFp16Workload.hpp"
#include "NeonConvolution2dWorkload.hpp"
#include "NeonDepthwiseConvolutionWorkload.hpp"
+#include "NeonDequantizeWorkload.hpp"
#include "NeonFloorFloatWorkload.hpp"
#include "NeonFullyConnectedWorkload.hpp"
#include "NeonGreaterWorkload.hpp"