2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
5 #include "NeonWorkloadFactory.hpp"
6 #include <armnn/Utils.hpp>
7 #include <backends/CpuTensorHandle.hpp>
10 #ifdef ARMCOMPUTENEON_ENABLED
11 #include <arm_compute/runtime/Allocator.h>
13 #include <backends/MemCopyWorkload.hpp>
14 #include "NeonTensorHandle.hpp"
15 #include "workloads/NeonWorkloadUtils.hpp"
16 #include "workloads/NeonWorkloads.hpp"
18 #include <memory/IPoolManager.hpp>
21 #include <backends/MakeWorkloadHelper.hpp>
23 #include <boost/polymorphic_cast.hpp>
28 bool NeonWorkloadFactory::IsLayerSupported(const Layer& layer, boost::optional<DataType> dataType,
29 std::string& outReasonIfUnsupported)
31 return IWorkloadFactory::IsLayerSupported(Compute::CpuAcc, layer, dataType, outReasonIfUnsupported);
34 #ifdef ARMCOMPUTENEON_ENABLED
36 NeonWorkloadFactory::NeonWorkloadFactory()
37 : m_MemoryManager(std::make_unique<arm_compute::Allocator>(), BaseMemoryManager::MemoryAffinity::Offset)
41 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent,
42 TensorShape const& subTensorShape,
43 unsigned int const* subTensorOrigin) const
45 BOOST_ASSERT(parent.GetType() == ITensorHandle::Neon);
47 const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
49 arm_compute::Coordinates coords;
50 coords.set_num_dimensions(subTensorShape.GetNumDimensions());
51 for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++)
53 // Arm compute indexes tensor coords in reverse order.
54 unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
55 coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex]));
58 return std::make_unique<NeonSubTensorHandle>(
59 boost::polymorphic_downcast<INeonTensorHandle*>(&parent), shape, coords);
62 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const
64 auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo);
65 tensorHandle->SetMemoryGroup(m_MemoryManager.GetInterLayerMemoryGroup());
70 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
71 DataLayout dataLayout) const
73 auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo, dataLayout);
74 tensorHandle->SetMemoryGroup(m_MemoryManager.GetInterLayerMemoryGroup());
79 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
80 const WorkloadInfo& info) const
82 return MakeWorkload<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
85 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
86 const WorkloadInfo& info) const
88 return MakeWorkload<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
91 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
92 const WorkloadInfo& info) const
94 return std::make_unique<NeonActivationWorkload>(descriptor, info);
97 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
98 const WorkloadInfo& info) const
100 return MakeWorkload<NeonSoftmaxFloatWorkload, NeonSoftmaxUint8Workload>(descriptor, info,
101 m_MemoryManager.GetIntraLayerManager());
104 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
105 const WorkloadInfo& info) const
107 return std::make_unique<NeonSplitterWorkload>(descriptor, info);
110 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
111 const WorkloadInfo& info) const
113 return std::make_unique<NeonMergerWorkload>(descriptor, info);
116 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateFullyConnected(
117 const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info) const
119 return MakeWorkload<NeonFullyConnectedWorkload, NeonFullyConnectedWorkload>(descriptor, info,
120 m_MemoryManager.GetIntraLayerManager());
123 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
124 const WorkloadInfo& info) const
126 return MakeWorkload<NeonPermuteFloatWorkload, NeonPermuteUint8Workload>(descriptor, info);
129 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
130 const WorkloadInfo& info) const
132 return MakeWorkload<NeonPooling2dFloatWorkload, NeonPooling2dUint8Workload>(descriptor, info);
135 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateConvolution2d(
136 const Convolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
138 return MakeWorkload<NeonConvolution2dFloatWorkload, NeonConvolution2dUint8Workload>(descriptor, info,
139 m_MemoryManager.GetIntraLayerManager());
142 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDepthwiseConvolution2d(
143 const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
145 return MakeWorkload<NeonDepthwiseConvolutionFloatWorkload, NeonDepthwiseConvolutionUint8Workload>(
149 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateNormalization(
150 const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
152 return MakeWorkload<NeonNormalizationFloatWorkload, NullWorkload>(descriptor, info,
153 m_MemoryManager.GetIntraLayerManager());
156 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
157 const WorkloadInfo& info) const
159 return MakeWorkload<NeonAdditionFloatWorkload, NullWorkload>(descriptor, info);
162 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMultiplication(
163 const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info) const
165 return MakeWorkload<NeonMultiplicationFloatWorkload, NullWorkload>(descriptor, info);
168 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateDivision(
169 const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const
171 return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
174 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateSubtraction(
175 const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const
177 return MakeWorkload<NeonSubtractionFloatWorkload, NullWorkload>(descriptor, info);
180 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateBatchNormalization(
181 const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
183 return MakeWorkload<NeonBatchNormalizationFloatWorkload, NullWorkload>(descriptor, info);
186 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
187 const WorkloadInfo& info) const
189 if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0])
191 throw InvalidArgumentException("NeonWorkloadFactory: Invalid null input for MemCopy workload");
194 return MakeWorkload<CopyMemGenericWorkload, CopyMemGenericWorkload>(descriptor, info);
197 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateResizeBilinear(
198 const ResizeBilinearQueueDescriptor& descriptor,
199 const WorkloadInfo& info) const
204 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFakeQuantization(
205 const FakeQuantizationQueueDescriptor& descriptor,
206 const WorkloadInfo& info) const
211 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
212 const WorkloadInfo& info) const
214 return MakeWorkload<NeonL2NormalizationFloatWorkload, NullWorkload>(descriptor, info,
215 m_MemoryManager.GetIntraLayerManager());
218 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
219 const WorkloadInfo& info) const
221 return MakeWorkload<NeonConstantFloatWorkload, NeonConstantUint8Workload>(descriptor, info);
224 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
225 const WorkloadInfo& info) const
227 return std::make_unique<NeonReshapeWorkload>(descriptor, info);
230 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
231 const WorkloadInfo& info) const
233 return MakeWorkload<NeonFloorFloatWorkload, NullWorkload>(descriptor, info);
236 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
237 const WorkloadInfo& info) const
239 return MakeWorkload<NeonLstmFloatWorkload, NullWorkload>(descriptor, info);
242 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp16ToFp32(
243 const ConvertFp16ToFp32QueueDescriptor& descriptor,
244 const WorkloadInfo& info) const
246 return std::make_unique<NeonConvertFp16ToFp32Workload>(descriptor, info);
249 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp32ToFp16(
250 const ConvertFp32ToFp16QueueDescriptor& descriptor,
251 const WorkloadInfo& info) const
253 return std::make_unique<NeonConvertFp32ToFp16Workload>(descriptor, info);
256 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
257 const WorkloadInfo& info) const
259 return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
262 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
263 const WorkloadInfo& info) const
265 return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
268 void NeonWorkloadFactory::Finalize()
270 m_MemoryManager.Finalize();
273 void NeonWorkloadFactory::Release()
275 m_MemoryManager.Release();
278 void NeonWorkloadFactory::Acquire()
280 m_MemoryManager.Acquire();
283 #else // Compiled without ArmCompute libs
285 NeonWorkloadFactory::NeonWorkloadFactory()
289 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent,
290 TensorShape const& subTensorShape,
291 unsigned int const* subTensorOrigin) const
296 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const
301 std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
302 DataLayout dataLayout) const
307 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
308 const WorkloadInfo& info) const
313 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
314 const WorkloadInfo& info) const
319 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
320 const WorkloadInfo& info) const
325 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
326 const WorkloadInfo& info) const
331 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
332 const WorkloadInfo& info) const
337 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
338 const WorkloadInfo& info) const
343 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
344 const WorkloadInfo& info) const
349 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
350 const WorkloadInfo& info) const
355 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
356 const WorkloadInfo& info) const
361 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
362 const WorkloadInfo& info) const
367 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDepthwiseConvolution2d(
368 const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
373 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
374 const WorkloadInfo& info) const
379 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
380 const WorkloadInfo& info) const
385 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateBatchNormalization(const BatchNormalizationQueueDescriptor& data,
386 const WorkloadInfo& info) const
391 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& data,
392 const WorkloadInfo& info) const
397 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
398 const WorkloadInfo& info) const
403 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
404 const WorkloadInfo& info) const
409 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFakeQuantization(
410 const FakeQuantizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
415 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
416 const WorkloadInfo& info) const
421 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
422 const WorkloadInfo& info) const
427 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
428 const WorkloadInfo& info) const
433 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
434 const WorkloadInfo& info) const
439 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
440 const WorkloadInfo& info) const
445 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp16ToFp32(
446 const ConvertFp16ToFp32QueueDescriptor& descriptor,
447 const WorkloadInfo& info) const
452 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateConvertFp32ToFp16(
453 const ConvertFp32ToFp16QueueDescriptor& descriptor,
454 const WorkloadInfo& info) const
459 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& data,
460 const WorkloadInfo& info) const
465 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& data,
466 const WorkloadInfo& info) const
471 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
472 const WorkloadInfo& info) const
477 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
478 const WorkloadInfo& info) const
483 void NeonWorkloadFactory::Finalize()
486 void NeonWorkloadFactory::Release()
489 void NeonWorkloadFactory::Acquire()