ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName);
ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
-
- ValidatePointer(m_Beta, descriptorName, "beta");
- ValidatePointer(m_Eps, descriptorName, "epsilon");
- ValidatePointer(m_Gamma, descriptorName, "gamma");
-
- const TensorInfo& beta = m_Beta->GetTensorInfo();
- const TensorInfo& epsilon = m_Eps->GetTensorInfo();
- const TensorInfo& gamma = m_Gamma->GetTensorInfo();
-
- ValidateTensorNumDimensions(beta, descriptorName, 1, "beta");
- ValidateTensorNumDimensions(epsilon, descriptorName, 1, "epsilon");
- ValidateTensorNumDimensions(gamma, descriptorName, 1, "gamma");
-
- ValidateTensorDataTypesMatch(inputTensorInfo, beta, descriptorName, "input", "beta");
- ValidateTensorDataTypesMatch(inputTensorInfo, epsilon, descriptorName, "input", "epsilon");
- ValidateTensorDataTypesMatch(inputTensorInfo, gamma, descriptorName, "input", "gamma");
}
void L2NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
struct InstanceNormalizationQueueDescriptor : QueueDescriptorWithParameters<InstanceNormalizationDescriptor>
{
- InstanceNormalizationQueueDescriptor()
- : m_Beta(nullptr)
- , m_Eps(nullptr)
- , m_Gamma(nullptr)
- {
- }
-
- const ConstCpuTensorHandle* m_Beta;
- const ConstCpuTensorHandle* m_Eps;
- const ConstCpuTensorHandle* m_Gamma;
void Validate(const WorkloadInfo& workloadInfo) const;
};
test/layerTests/FullyConnectedTestImpl.cpp \
test/layerTests/GatherTestImpl.cpp \
test/layerTests/GreaterTestImpl.cpp \
+ test/layerTests/InstanceNormalizationTestImpl.cpp \
test/layerTests/L2NormalizationTestImpl.cpp \
test/layerTests/LstmTestImpl.cpp \
test/layerTests/MaximumTestImpl.cpp \
layerTests/GatherTestImpl.hpp
layerTests/GreaterTestImpl.cpp
layerTests/GreaterTestImpl.hpp
+ layerTests/InstanceNormalizationTestImpl.cpp
+ layerTests/InstanceNormalizationTestImpl.hpp
layerTests/L2NormalizationTestImpl.cpp
layerTests/L2NormalizationTestImpl.hpp
layerTests/LayerTestResult.hpp
#include <backendsCommon/test/layerTests/FullyConnectedTestImpl.hpp>
#include <backendsCommon/test/layerTests/GatherTestImpl.hpp>
#include <backendsCommon/test/layerTests/GreaterTestImpl.hpp>
+#include <backendsCommon/test/layerTests/InstanceNormalizationTestImpl.hpp>
#include <backendsCommon/test/layerTests/L2NormalizationTestImpl.hpp>
#include <backendsCommon/test/layerTests/LstmTestImpl.hpp>
#include <backendsCommon/test/layerTests/MaximumTestImpl.hpp>
--- /dev/null
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "InstanceNormalizationTestImpl.hpp"
+
+#include <ResolveType.hpp>
+
+#include <armnn/ArmNN.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <backendsCommon/test/DataLayoutUtils.hpp>
+#include <backendsCommon/test/QuantizeHelper.hpp>
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+namespace
+{
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> InstanceNormTestImpl(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::TensorInfo& inputTensorInfo,
+ const armnn::TensorInfo& outputTensorInfo,
+ const std::vector<float>& inputValues,
+ const std::vector<float>& expectedOutputValues,
+ armnn::InstanceNormalizationQueueDescriptor descriptor,
+ float qScale = 0.0f,
+ int32_t qOffset = 0)
+{
+ auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputValues));
+
+ LayerTestResult<T, 4> result(outputTensorInfo);
+
+ result.outputExpected =
+ MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::WorkloadInfo info;
+
+
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateInstanceNormalization(descriptor, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
+
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+
+ return result;
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> InstanceNormTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout)
+{
+ // BatchSize: 2
+ // Height: 2
+ // Width: 2
+ // Channels: 2
+
+ const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
+
+ armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
+ armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
+
+ std::vector<float> inputValues
+ {
+ // Batch 0, Height 0, Width 0 x Channel (2)
+ 0.f, 1.f,
+ // Batch 0, Height 0, Width 1 x Channel (2)
+ 0.f, 2.f,
+
+ // Batch 0, Height 1, Width 0 x Channel (2)
+ 0.f, 2.f,
+ // Batch 0, Height 1, Width 1 x Channel (2)
+ 0.f, 4.f,
+
+ // Batch 1, Height 0, Width 0 x Channel (2)
+ 1.f, -1.f,
+ // Batch 1, Height 0, Width 1 x Channel (2)
+ -1.f, 2.f,
+
+ // Batch 1, Height 1, Width 0 x Channel (2)
+ -1.f, -2.f,
+ // Batch 1, Height 1, Width 1 x Channel (2)
+ 1.f, 4.f
+ };
+
+ std::vector<float> expectedOutputValues
+ {
+ // Batch 0, Height 0, Width 0 x Channel (2)
+ 0.f, -1.1470304f,
+ // Batch 0, Height 0, Width 1 x Channel (2)
+ 0.f, -0.22940612f,
+ // Batch 0, Height 1, Width 0 x Channel (2)
+ 0.f, -0.22940612f,
+ // Batch 0, Height 1, Width 1 x Channel (2)
+ 0.f, 1.6058424f,
+
+ // Batch 1, Height 0, Width 0 x Channel (2)
+ 0.99995005f, -0.7337929f,
+ // Batch 1, Height 0, Width 1 x Channel (2)
+ -0.99995005f, 0.52413774f,
+
+ // Batch 1, Height 1, Width 0 x Channel (2)
+ -0.99995005f, -1.1531031f,
+ // Batch 1, Height 1, Width 1 x Channel (2)
+ 0.99995005f, 1.3627582f
+ };
+
+ if (dataLayout == armnn::DataLayout::NCHW)
+ {
+ PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
+ PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
+ }
+
+ armnn::InstanceNormalizationQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_Eps = 0.0001f;
+ descriptor.m_Parameters.m_Beta = 0.0f;
+ descriptor.m_Parameters.m_Gamma = 1.0f;
+ descriptor.m_Parameters.m_DataLayout = dataLayout;
+
+ return InstanceNormTestImpl<ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ inputTensorInfo,
+ outputTensorInfo,
+ inputValues,
+ expectedOutputValues,
+ descriptor);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> InstanceNormTest2(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout)
+{
+ // BatchSize: 2
+ // Height: 2
+ // Width: 2
+ // Channels: 2
+
+ const armnn::TensorShape inputOutputShape{ 2, 2, 2, 2 };
+
+ armnn::TensorInfo inputTensorInfo(inputOutputShape, ArmnnType);
+ armnn::TensorInfo outputTensorInfo(inputOutputShape, ArmnnType);
+
+ std::vector<float> inputValues
+ {
+ // Batch 0, Height 0, Width 0 x Channel (2)
+ 0.f, 1.f,
+ // Batch 0, Height 0, Width 1 x Channel (2)
+ 0.f, 2.f,
+
+ // Batch 0, Height 1, Width 0 x Channel (2)
+ 0.f, 2.f,
+ // Batch 0, Height 1, Width 1 x Channel (2)
+ 0.f, 4.f,
+
+ // Batch 1, Height 0, Width 0 x Channel (2)
+ 1.f, -1.f,
+ // Batch 1, Height 0, Width 1 x Channel (2)
+ -1.f, 2.f,
+
+ // Batch 1, Height 1, Width 0 x Channel (2)
+ -1.f, -2.f,
+ // Batch 1, Height 1, Width 1 x Channel (2)
+ 1.f, 4.f
+ };
+
+ std::vector<float> expectedOutputValues
+ {
+ // Batch 0, Height 0, Width 0 x Channel (2)
+ 10.f, 7.7059393f,
+ // Batch 0, Height 0, Width 1 x Channel (2)
+ 10.f, 9.541187f,
+
+ // Batch 0, Height 1, Width 0 x Channel (2)
+ 10.f, 9.541187f,
+ // Batch 0, Height 1, Width 1 x Channel (2)
+ 10.f, 13.211685f,
+
+ // Batch 1, Height 0, Width 0 x Channel (2)
+ 11.9999f, 8.532414f,
+ // Batch 1, Height 0, Width 1 x Channel (2)
+ 8.0001f, 11.048275f,
+
+ // Batch 1, Height 1, Width 0 x Channel (2)
+ 8.0001f, 7.693794f,
+ // Batch 1, Height 1, Width 1 x Channel (2)
+ 11.9999f, 12.725516f
+ };
+
+ if (dataLayout == armnn::DataLayout::NCHW)
+ {
+ PermuteTensorNhwcToNchw(inputTensorInfo, inputValues);
+ PermuteTensorNhwcToNchw(outputTensorInfo, expectedOutputValues);
+ }
+
+ armnn::InstanceNormalizationQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_Eps = 0.0001f;
+ descriptor.m_Parameters.m_Beta = 10.0f;
+ descriptor.m_Parameters.m_Gamma = 2.0f;
+ descriptor.m_Parameters.m_DataLayout = dataLayout;
+
+ return InstanceNormTestImpl<ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ inputTensorInfo,
+ outputTensorInfo,
+ inputValues,
+ expectedOutputValues,
+ descriptor);
+}
+
+} // anonymous namespace
+
+LayerTestResult<float, 4> InstanceNormFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout)
+{
+ return InstanceNormTest<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
+}
+
+LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout)
+{
+ return InstanceNormTest<armnn::DataType::Float16>(workloadFactory, memoryManager, dataLayout);
+}
+
+LayerTestResult<float, 4> InstanceNormFloat32Test2(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout)
+{
+ return InstanceNormTest2<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
+}
+
+LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test2(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout)
+{
+ return InstanceNormTest2<armnn::DataType::Float16>(workloadFactory, memoryManager, dataLayout);
+}
--- /dev/null
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <Half.hpp>
+
+#include <armnn/Types.hpp>
+
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+LayerTestResult<float, 4> InstanceNormFloat32Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout);
+
+LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout);
+
+LayerTestResult<float, 4> InstanceNormFloat32Test2(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout);
+
+LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test2(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::DataLayout dataLayout);
+
#include "workloads/ClDivisionFloatWorkload.hpp"
#include "workloads/ClFullyConnectedWorkload.hpp"
#include "workloads/ClGreaterWorkload.hpp"
+#include "workloads/ClInstanceNormalizationWorkload.hpp"
#include "workloads/ClL2NormalizationFloatWorkload.hpp"
#include "workloads/ClLstmFloatWorkload.hpp"
#include "workloads/ClMaximumWorkload.hpp"
return IsClBackendSupported(reasonIfUnsupported);
}
+bool ClLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const InstanceNormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClInstanceNormalizationWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output,
+ descriptor);
+}
+
bool ClLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
const TensorInfo& output,
const L2NormalizationDescriptor& descriptor,
bool IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsInstanceNormalizationSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const InstanceNormalizationDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsL2NormalizationSupported(const TensorInfo& input,
const TensorInfo& output,
const L2NormalizationDescriptor& descriptor,
return tensorHandle;
}
-std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent,
- TensorShape const& subTensorShape,
+std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorHandle& parent,
+ TensorShape const& subTensorShape,
unsigned int const* subTensorOrigin) const
{
arm_compute::Coordinates coords;
boost::polymorphic_downcast<IClTensorHandle*>(&parent), shape, coords);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
-}
-
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+ const WorkloadInfo& info) const
{
return MakeWorkload<ClAbsWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+ const WorkloadInfo& info) const
{
return MakeWorkload<ClActivationWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClSoftmaxFloatWorkload, ClSoftmaxUint8Workload>(descriptor, info,
- m_MemoryManager->GetIntraLayerManager());
+ return MakeWorkload<ClAdditionWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateBatchNormalization(
+ const BatchNormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClSplitterWorkload>(descriptor, info);
+ return MakeWorkload<ClBatchNormalizationFloatWorkload, NullWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return CreateConcat(descriptor, info);
+ return MakeWorkload<ClBatchToSpaceNdWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateFullyConnected(
- const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClFullyConnectedWorkload>(descriptor, info, m_MemoryManager->GetIntraLayerManager());
+ return MakeWorkload<ClConcatWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClPermuteWorkload>(descriptor, info);
+ return MakeWorkload<ClConstantWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvertFp16ToFp32(
+ const ConvertFp16ToFp32QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClPooling2dWorkload>(descriptor, info);
+ return MakeWorkload<ClConvertFp16ToFp32Workload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreatePrelu(const armnn::PreluQueueDescriptor &descriptor,
- const armnn::WorkloadInfo &info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvertFp32ToFp16(
+ const ConvertFp32ToFp16QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClPreluWorkload>(descriptor, info);
+ return MakeWorkload<ClConvertFp32ToFp16Workload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
return MakeWorkload<ClConvolution2dWorkload>(descriptor, info, m_MemoryManager->GetIntraLayerManager());
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDepthwiseConvolution2d(
- const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClDepthwiseConvolutionWorkload>(descriptor, info);
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDetectionPostProcess(
- const armnn::DetectionPostProcessQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkload<ClDepthToSpaceWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDepthwiseConvolution2d(
+ const DepthwiseConvolution2dQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return MakeWorkload<ClDepthwiseConvolutionWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDequantize(const DequantizeQueueDescriptor& descriptor,
return MakeWorkload<ClDequantizeWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDetectionPostProcess(
+ const DetectionPostProcessQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClNormalizationFloatWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClAdditionWorkload>(descriptor, info);
+ return MakeWorkload<ClDivisionFloatWorkload, NullWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateMultiplication(
- const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClMultiplicationWorkload>(descriptor, info);
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateDivision(
- const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFakeQuantization(
+ const FakeQuantizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClDivisionFloatWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClSubtractionWorkload>(descriptor, info);
+ return MakeWorkload<ClFloorFloatWorkload, NullWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateBatchNormalization(
- const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClBatchNormalizationFloatWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkload<ClFullyConnectedWorkload>(descriptor, info, m_MemoryManager->GetIntraLayerManager());
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGather(const GatherQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0])
- {
- throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemCopy workload");
- }
-
- return MakeWorkload<CopyMemGenericWorkload>(descriptor, info);
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0])
- {
- throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemImport workload");
- }
-
- return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
+ return MakeWorkload<ClGreaterFloat32Workload, ClGreaterUint8Workload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateInput(const InputQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClResizeWorkload>(descriptor, info);
+ return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateResizeBilinear(
- const ResizeBilinearQueueDescriptor& descriptor,
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateInstanceNormalization(
+ const InstanceNormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- ResizeQueueDescriptor resizeDescriptor;
- resizeDescriptor.m_Inputs = descriptor.m_Inputs;
- resizeDescriptor.m_Outputs = descriptor.m_Outputs;
-
- resizeDescriptor.m_Parameters.m_Method = ResizeMethod::Bilinear;
- resizeDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout;
- resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
- resizeDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth;
+ return MakeWorkload<ClInstanceNormalizationWorkload>(descriptor, info);
+}
- return CreateResize(resizeDescriptor, info);
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return MakeWorkload<ClL2NormalizationFloatWorkload, NullWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFakeQuantization(
- const FakeQuantizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkload<ClLstmFloatWorkload, NullWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClQuantizeWorkload>(descriptor, info);
+ return MakeWorkload<ClMaximumWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClL2NormalizationFloatWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkload<ClMeanWorkload>(descriptor, info);
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClConcatWorkload>(descriptor, info);
+ if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0])
+ {
+ throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemCopy workload");
+ }
+
+ return MakeWorkload<CopyMemGenericWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClConstantWorkload>(descriptor, info);
+ if (descriptor.m_Inputs.empty() || !descriptor.m_Inputs[0])
+ {
+ throw InvalidArgumentException("ClWorkloadFactory: Invalid null input for MemImport workload");
+ }
+
+ return std::make_unique<ImportMemGenericWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClReshapeWorkload>(descriptor, info);
+ return CreateConcat(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClSpaceToBatchNdWorkload>(descriptor, info);
+ return MakeWorkload<ClMinimumWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClFloorFloatWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkload<ClMultiplicationWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClLstmFloatWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkload<ClNormalizationFloatWorkload, NullWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvertFp16ToFp32(
- const ConvertFp16ToFp32QueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClConvertFp16ToFp32Workload>(descriptor, info);
+ return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateConvertFp32ToFp16(
- const ConvertFp32ToFp16QueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClConvertFp32ToFp16Workload>(descriptor, info);
+ return MakeWorkload<ClPadWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<ClMaximumWorkload>(descriptor, info);
+ return MakeWorkload<ClPermuteWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClMeanWorkload>(descriptor, info);
+ return MakeWorkload<ClPooling2dWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClPadWorkload>(descriptor, info);
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &descriptor,
+ const WorkloadInfo &info) const
{
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkload<ClPreluWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClBatchToSpaceNdWorkload>(descriptor, info);
+ return MakeWorkload<ClQuantizeWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClStridedSliceWorkload>(descriptor, info);
+ return MakeWorkload<ClQuantizedLstmWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<ClMinimumWorkload>(descriptor, info);
+ return MakeWorkload<ClReshapeWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClGreaterFloat32Workload, ClGreaterUint8Workload>(descriptor, info);
+ return MakeWorkload<ClResizeWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+ ResizeQueueDescriptor resizeDescriptor;
+ resizeDescriptor.m_Inputs = descriptor.m_Inputs;
+ resizeDescriptor.m_Outputs = descriptor.m_Outputs;
+
+ resizeDescriptor.m_Parameters.m_Method = ResizeMethod::Bilinear;
+ resizeDescriptor.m_Parameters.m_DataLayout = descriptor.m_Parameters.m_DataLayout;
+ resizeDescriptor.m_Parameters.m_TargetHeight = descriptor.m_Parameters.m_TargetHeight;
+ resizeDescriptor.m_Parameters.m_TargetWidth = descriptor.m_Parameters.m_TargetWidth;
+
+ return CreateResize(resizeDescriptor, info);
}
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
return MakeWorkload<ClRsqrtWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
-}
-
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGather(const armnn::GatherQueueDescriptor& descriptor,
- const armnn::WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkload<ClSoftmaxFloatWorkload, ClSoftmaxUint8Workload>(descriptor, info,
+ m_MemoryManager->GetIntraLayerManager());
}
-std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateTransposeConvolution2d(
- const TransposeConvolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClTransposeConvolution2dWorkload>(descriptor, info, m_MemoryManager->GetIntraLayerManager());
+ return MakeWorkload<ClSpaceToBatchNdWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
return MakeWorkload<ClSpaceToDepthWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
{
- return MakeWorkload<ClQuantizedLstmWorkload>(descriptor, info);
+ return MakeWorkload<ClSplitterWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
return MakeWorkload<ClStackWorkload>(descriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<ClDepthToSpaceWorkload>(descriptor, info);
+ return MakeWorkload<ClStridedSliceWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return MakeWorkload<ClSubtractionWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateTransposeConvolution2d(
+ const TransposeConvolution2dQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return MakeWorkload<ClTransposeConvolution2dWorkload>(descriptor, info, m_MemoryManager->GetIntraLayerManager());
}
} // namespace armnn
DataLayout dataLayout,
const bool IsMemoryManaged = true) const override;
- std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
- std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
- std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
+ std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- ARMNN_DEPRECATED_MSG("Use CreateConcat instead")
- std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
+ std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
+ std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
std::unique_ptr<IWorkload> CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
+ std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
+ std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- ARMNN_DEPRECATED_MSG("Use CreateResize instead")
- std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
+ std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const override;
+
+ std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG("Use CreateConcat instead")
+ std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& descriptor,
+ std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const override;
+ std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
+ std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
+ std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG("Use CreateResize instead")
+ std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
- std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
+ std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
+ std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
private:
template<typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
static std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor,
workloads/ClFloorFloatWorkload.cpp \
workloads/ClFullyConnectedWorkload.cpp \
workloads/ClGreaterWorkload.cpp \
+ workloads/ClInstanceNormalizationWorkload.cpp \
workloads/ClL2NormalizationFloatWorkload.cpp \
workloads/ClLstmFloatWorkload.cpp \
workloads/ClMaximumWorkload.cpp \
ARMNN_AUTO_TEST_CASE(BatchNormFloat32, BatchNormFloat32Test)
ARMNN_AUTO_TEST_CASE(BatchNormFloat32Nhwc, BatchNormFloat32NhwcTest)
+// InstanceNormalization
+ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nchw, InstanceNormFloat32Test, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(InstanceNormFloat16Nchw, InstanceNormFloat16Test, DataLayout::NCHW);
+
+ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nhwc, InstanceNormFloat32Test, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(InstanceNormFloat16Nhwc, InstanceNormFloat16Test, DataLayout::NHWC);
+
+ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nchw2, InstanceNormFloat32Test2, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE(InstanceNormFloat16Nchw2, InstanceNormFloat16Test2, DataLayout::NCHW);
+
+ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nhwc2, InstanceNormFloat32Test2, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE(InstanceNormFloat16Nhwc2, InstanceNormFloat16Test2, DataLayout::NHWC);
+
// L2 Normalization
ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest, DataLayout::NCHW)
ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest, DataLayout::NCHW)
ClFullyConnectedWorkload.hpp
ClGreaterWorkload.cpp
ClGreaterWorkload.hpp
+ ClInstanceNormalizationWorkload.cpp
+ ClInstanceNormalizationWorkload.hpp
ClL2NormalizationFloatWorkload.cpp
ClL2NormalizationFloatWorkload.hpp
ClLstmFloatWorkload.cpp
--- /dev/null
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClInstanceNormalizationWorkload.hpp"
+#include "ClWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <cl/ClTensorHandle.hpp>
+
+using namespace armnn::armcomputetensorutils;
+
+namespace armnn
+{
+
+arm_compute::Status ClInstanceNormalizationWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const InstanceNormalizationDescriptor& descriptor)
+{
+ const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
+
+ return arm_compute::CLInstanceNormalizationLayer::validate(&aclInputInfo,
+ &aclOutputInfo,
+ descriptor.m_Gamma,
+ descriptor.m_Beta,
+ descriptor.m_Eps);
+}
+
+ClInstanceNormalizationWorkload::ClInstanceNormalizationWorkload(
+ const InstanceNormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload<InstanceNormalizationQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("ClInstanceNormalizationWorkload", 1, 1);
+
+ arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+ input.info()->set_data_layout(aclDataLayout);
+ output.info()->set_data_layout(aclDataLayout);
+
+ m_Layer.configure(&input,
+ &output,
+ descriptor.m_Parameters.m_Gamma,
+ descriptor.m_Parameters.m_Beta,
+ descriptor.m_Parameters.m_Eps);
+};
+
+void ClInstanceNormalizationWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClInstanceNormalizationWorkload_Execute");
+ RunClFunction(m_Layer, CHECK_LOCATION());
+}
+
+} // namespace armnn
--- /dev/null
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h>
+
+namespace armnn
+{
+
+arm_compute::Status ClInstanceNormalizationWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const InstanceNormalizationDescriptor& descriptor);
+
+class ClInstanceNormalizationWorkload : public BaseWorkload<InstanceNormalizationQueueDescriptor>
+{
+public:
+ ClInstanceNormalizationWorkload(const InstanceNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info);
+ void Execute() const override;
+
+private:
+ mutable arm_compute::CLInstanceNormalizationLayer m_Layer;
+};
+
+} // namespace armnn
#include "ClFloorFloatWorkload.hpp"
#include "ClFullyConnectedWorkload.hpp"
#include "ClGreaterWorkload.hpp"
+#include "ClInstanceNormalizationWorkload.hpp"
#include "ClL2NormalizationFloatWorkload.hpp"
#include "ClLstmFloatWorkload.hpp"
#include "ClConcatWorkload.hpp"