IVGCVSW-2325 Reduce duplication in LayerTests by reusing the ElementwiseTestHelper
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Tue, 27 Aug 2019 17:14:26 +0000 (18:14 +0100)
committerAron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Tue, 27 Aug 2019 17:16:18 +0000 (18:16 +0100)
* Refactored tests for element-wise operations to use ElementwiseTestHelper
* Moved the etasts for each operation in a separate file under backendsCommon/test/layerTests

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: Icefb6b35df78b9619f69378229789505bf92670e

22 files changed:
src/backends/backendsCommon/common.mk
src/backends/backendsCommon/test/CMakeLists.txt
src/backends/backendsCommon/test/LayerTests.cpp
src/backends/backendsCommon/test/LayerTests.hpp
src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/AdditionTestImpl.hpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/DivisionTestImpl.hpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/EqualTestImpl.cpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/EqualTestImpl.hpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/GreaterTestImpl.cpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/GreaterTestImpl.hpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/MaximumTestImpl.hpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/MinimumTestImpl.hpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.hpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.hpp [new file with mode: 0644]

index d7fe9b1..9f09743 100644 (file)
@@ -33,7 +33,15 @@ COMMON_TEST_SOURCES := \
     test/CommonTestUtils.cpp \
     test/JsonPrinterTestImpl.cpp \
     test/LayerTests.cpp \
-    test/TensorCopyUtils.cpp
+    test/TensorCopyUtils.cpp \
+    test/layerTests/AdditionTestImpl.cpp \
+    test/layerTests/DivisionTestImpl.cpp \
+    test/layerTests/EqualTestImpl.cpp \
+    test/layerTests/GreaterTestImpl.cpp \
+    test/layerTests/MaximumTestImpl.cpp \
+    test/layerTests/MinimumTestImpl.cpp \
+    test/layerTests/MultiplicationTestImpl.cpp \
+    test/layerTests/SubtractionTestImpl.cpp
 
 ifeq ($(ARMNN_COMPUTE_REF_ENABLED),1)
 COMMON_TEST_SOURCES += \
index 7357473..7dca047 100644 (file)
@@ -57,6 +57,24 @@ list(APPEND armnnBackendsCommonUnitTests_sources
     TensorCopyUtils.hpp
     WorkloadFactoryHelper.hpp
     WorkloadTestUtils.hpp
+    layerTests/AdditionTestImpl.cpp
+    layerTests/AdditionTestImpl.hpp
+    layerTests/DivisionTestImpl.cpp
+    layerTests/DivisionTestImpl.hpp
+    layerTests/ElementwiseTestImpl.hpp
+    layerTests/EqualTestImpl.cpp
+    layerTests/EqualTestImpl.hpp
+    layerTests/GreaterTestImpl.cpp
+    layerTests/GreaterTestImpl.hpp
+    layerTests/LayerTestResult.hpp
+    layerTests/MaximumTestImpl.cpp
+    layerTests/MaximumTestImpl.hpp
+    layerTests/MinimumTestImpl.cpp
+    layerTests/MinimumTestImpl.hpp
+    layerTests/MultiplicationTestImpl.cpp
+    layerTests/MultiplicationTestImpl.hpp
+    layerTests/SubtractionTestImpl.cpp
+    layerTests/SubtractionTestImpl.hpp
 )
 
 if (ARMNNREF)
index 5fd8f3e..2d71e60 100644 (file)
@@ -2789,4977 +2789,3056 @@ LayerTestResult<float,3> ConcatTest(
     return ret;
 }
 
-LayerTestResult<float,4> AdditionTest(
+LayerTestResult<float,4> CompareBatchNormTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    armnn::IWorkloadFactory& refWorkloadFactory)
 {
-    unsigned int batchSize = 2;
-    unsigned int channels  = 2;
-    unsigned int height    = 2;
-    unsigned int width     = 3;
+    const unsigned int width     = 2;
+    const unsigned int height    = 3;
+    const unsigned int channels  = 5;
+    const unsigned int batchSize = 3;
 
-    armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
+    armnn::TensorInfo inputTensorInfo;
     armnn::TensorInfo outputTensorInfo;
+    armnn::TensorInfo tensorInfo;
 
-    unsigned int shape[] = {batchSize, channels, height, width};
+    constexpr unsigned int shape[]       = {batchSize, channels, height, width};
+    constexpr unsigned int tensorShape[] = {channels};
 
-    inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
-    inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
     outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
 
+    auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
 
-    auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
-        {
-            0.0f, 2.0f, 1.0f,
-            0.2f, 1.0f, 2.0f,
-
-            1.0f, 2.0f, 1.0f,
-            0.2f, 1.0f, 2.0f,
-
-            0.0f, 2.0f, 1.0f,
-            4.2f, 1.0f, 2.0f,
-
-            0.0f, 0.0f, 1.0f,
-            0.2f, 1.0f, 2.0f,
-        }));
-
-    auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
-        {
-            1.0f, 2.0f, 1.0f,
-            0.0f, 1.0f, 2.0f,
-
-            1.0f, 2.0f, -2.0f,
-            0.2f, 1.0f, 2.0f,
-
-            0.0f, 2.0f, 1.0f,
-            4.2f, 0.0f, -3.0f,
-
-            0.0f, 0.0f, 1.0f,
-            0.7f, 1.0f, 5.0f,
-        }));
+    auto mean     = MakeRandomTensor<float, 1>(tensorInfo, 123);
+    auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
+    auto beta     = MakeRandomTensor<float, 1>(tensorInfo, 123);
+    auto gamma    = MakeRandomTensor<float, 1>(tensorInfo, 345);
 
     LayerTestResult<float,4> ret(outputTensorInfo);
-    ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
-        {
-            1.0f, 4.0f, 2.0f,
-            0.2f, 2.0f, 4.0f,
 
-            2.0f, 4.0f, -1.0f,
-            0.4f, 2.0f, 4.0f,
+    std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-            0.0f, 4.0f, 2.0f,
-            8.4f, 1.0f, -1.0f,
+    std::unique_ptr<armnn::ITensorHandle> inputHandleRef  = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
 
-            0.0f, 0.0f, 2.0f,
-            0.9f, 2.0f, 7.0f,
-        }));
+    armnn::BatchNormalizationQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
+    armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
+    armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
+    armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
+    AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
+    AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
+    AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
 
-    armnn::AdditionQueueDescriptor data;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
-    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+    data.m_Mean             = &meanTensor;
+    data.m_Variance         = &varianceTensor;
+    data.m_Beta             = &betaTensor;
+    data.m_Gamma            = &gammaTensor;
+    data.m_Parameters.m_Eps = 0.01f;
+
+    armnn::BatchNormalizationQueueDescriptor refData = data;
+    armnn::WorkloadInfo refInfo = info;
+    SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
+    SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
 
-    inputHandle1->Allocate();
-    inputHandle2->Allocate();
+    inputHandle->Allocate();
     outputHandle->Allocate();
+    inputHandleRef->Allocate();
+    outputHandleRef->Allocate();
 
-    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
-    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
 
     workload->PostAllocationConfigure();
     workload->Execute();
+    workloadRef->PostAllocationConfigure();
+    workloadRef->Execute();
 
     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+    CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
 
     return ret;
 }
 
-LayerTestResult<float, 5> Addition5dTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+template<typename T>
+void PermuteTensorData(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::PermutationVector& mappings,
+        armnn::TensorInfo & inputTensorInfo,
+        const T * inputData,
+        std::vector<T>& outputData)
 {
-    unsigned int depth     = 2;
-    unsigned int batchSize = 2;
-    unsigned int channels  = 2;
-    unsigned int height    = 2;
-    unsigned int width     = 3;
-
-    armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
-    armnn::TensorInfo outputTensorInfo;
-
-    unsigned int shape[] = {depth, batchSize, channels, height, width};
-
-    inputTensorInfo1 = armnn::TensorInfo(5, shape, armnn::DataType::Float32);
-    inputTensorInfo2 = armnn::TensorInfo(5, shape, armnn::DataType::Float32);
-    outputTensorInfo = armnn::TensorInfo(5, shape, armnn::DataType::Float32);
-
-
-    auto input1 = MakeTensor<float, 5>(inputTensorInfo1, std::vector<float>(
-        {
-            2.6f, 4.0f, 4.4f,  2.7f, 4.6f, 2.8f,
-            2.3f, 1.9f, 3.4f,  2.9f, 2.2f, 4.5f,
-
-            2.8f, 1.9f, 2.3f,  2.6f, 4.7f, 3.5f,
-            0.4f, 1.5f, 2.1f,  0.7f, 5.0f, 1.1f,
-
-
-            1.0f, 2.7f, 0.0f,  0.6f, 0.8f, 0.9f,
-            1.0f, 2.6f, 0.4f,  3.8f, 0.4f, 0.8f,
-
-            0.5f, 4.3f, 3.1f,  4.4f, 0.7f, 1.4f,
-            0.4f, 4.4f, 0.7f,  0.6f, 4.7f, 1.2f,
-
-        }));
-
-    auto input2 = MakeTensor<float, 5>(inputTensorInfo2, std::vector<float>(
-        {
-            4.4f, 3.0f, 1.0f,  0.0f, 3.9f, 3.1f,
-            1.7f, 2.9f, 1.3f,  0.4f, 0.4f, 4.3f,
-
-            4.5f, 0.2f, 2.2f,  4.1f, 3.9f, 3.0f,
-            0.1f, 2.5f, 4.1f,  4.6f, 1.5f, 0.0f,
-
-
-            0.5f, 4.9f, 2.5f,  1.5f, 3.4f, 4.5f,
-            2.0f, 3.0f, 4.9f,  1.6f, 2.4f, 3.4f,
-
-            3.6f, 1.8f, 1.3f,  2.6f, 2.1f, 4.8f,
-            2.0f, 4.3f, 4.0f,  0.2f, 0.6f, 4.4f,
-        }));
-
-    LayerTestResult<float, 5> ret(outputTensorInfo);
-    ret.outputExpected = MakeTensor<float, 5>(outputTensorInfo, std::vector<float>(
-        {
-            7.0f, 7.0f, 5.4f,  2.7f, 8.5f, 5.9f,
-            4.0f, 4.8f, 4.7f,  3.3f, 2.6f, 8.8f,
-
-            7.3f, 2.1f, 4.5f,  6.7f, 8.6f, 6.5f,
-            0.5f, 4.0f, 6.2f,  5.3f, 6.5f, 1.1f,
-
-
-            1.5f, 7.6f, 2.5f,  2.1f, 4.2f, 5.4f,
-            3.0f, 5.6f, 5.3f,  5.4f, 2.8f, 4.2f,
+    BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
+    if (inputData == nullptr)
+    {
+        // Nullptr is an error in the test. By returning without doing the concatenation
+        // I expect the caller to fail the test. It still makes sense to report this as
+        // an assert for Debug builds.
+        return;
+    }
 
-            4.1f, 6.1f, 4.4f,  7.0f, 2.8f, 6.2f,
-            2.4f, 8.7f, 4.7f,  0.8f, 5.3f, 5.6f,
-        }));
+    armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-    armnn::AdditionQueueDescriptor data;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
-    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
-    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+    armnn::PermuteQueueDescriptor queueDescriptor;
+    queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
+    armnn::WorkloadInfo workloadInfo;
+    AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
 
-    inputHandle1->Allocate();
-    inputHandle2->Allocate();
+    inputHandle->Allocate();
     outputHandle->Allocate();
 
-    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0][0]);
-    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle.get(), inputData);
 
     workload->PostAllocationConfigure();
     workload->Execute();
 
-    CopyDataFromITensorHandle(&ret.output[0][0][0][0][0], outputHandle.get());
-
-    return ret;
+    outputData.resize(outputTensorInfo.GetNumElements());
+    CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
+    inputTensorInfo = outputTensorInfo;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> AdditionBroadcastTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
+armnn::OriginsDescriptor CreateDescriptorForConcatenation(
+        const std::vector<armnn::TensorInfo> & inputTensorInfos,
+        unsigned int concatDim)
 {
-    armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
-    armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
-    armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
-
-    if (armnn::IsQuantizedType<T>())
+    std::vector<armnn::TensorShape> shapes;
+    shapes.reserve(inputTensorInfos.size());
+    for (const armnn::TensorInfo& it: inputTensorInfos)
     {
-        inputTensorInfo1.SetQuantizationScale(qScale);
-        inputTensorInfo1.SetQuantizationOffset(qOffset);
-        inputTensorInfo2.SetQuantizationScale(qScale);
-        inputTensorInfo2.SetQuantizationOffset(qOffset);
-        outputTensorInfo.SetQuantizationScale(qScale);
-        outputTensorInfo.SetQuantizationOffset(qOffset);
+        shapes.push_back(it.GetShape());
     }
 
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
-        {
-            0.0f,
-            1.0f,
+    return armnn::CreateDescriptorForConcatenation(shapes.begin(),
+                                                   shapes.end(),
+                                                   concatDim);
+}
 
-            2.0f,
-            3.0f,
+//
+// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
+// In case of <4 dimensions we need to make sure that the concat dimensions are at least
+// the 3rd slowest iterating one or the inner most dimension.
+//
 
-            4.0f,
-            5.0f,
-        }));
+bool NeedPermuteForConcat(
+        const std::vector<armnn::TensorInfo> & inputTensorInfos,
+        unsigned int concatDim)
+{
+    // See note above. Additionally we expect the input shapes to have the
+    // same number of dimensions.
+    unsigned int nDimensions = 0;
 
-    auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
+    // Determine the number of dimensions as well as sanity check them
+    // agains test implementation issues.
+    for (auto && tensorInfo : inputTensorInfos)
+    {
+        if (!nDimensions)
         {
-            0.5f, 1.5f, 2.5f,
-            3.5f, 4.5f, 5.5f,
-        }));
-
-    LayerTestResult<T,4> ret(outputTensorInfo);
-    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
+            nDimensions = tensorInfo.GetShape().GetNumDimensions();
+        }
+        else
         {
-            0.5f, 1.5f, 2.5f,
-            4.5f, 5.5f, 6.5f,
-
-            2.5f, 3.5f, 4.5f,
-            6.5f, 7.5f, 8.5f,
-
-            4.5f, 5.5f, 6.5f,
-            8.5f, 9.5f, 10.5f,
-        }));
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::AdditionQueueDescriptor data;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
-    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
-    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
-
-    inputHandle1->Allocate();
-    inputHandle2->Allocate();
-    outputHandle->Allocate();
-
-    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
-    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+            BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
+                "Input shapes must have the same number of dimensions");
+        }
+    }
 
-    return ret;
+    return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
+armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
 {
-    armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
-    armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
-    armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
-
-    if (armnn::IsQuantizedType<T>())
+    unsigned int numDims = inputShape.GetNumDimensions();
+    if (numDims >= 3)
     {
-        inputTensorInfo1.SetQuantizationScale(qScale);
-        inputTensorInfo1.SetQuantizationOffset(qOffset);
-        inputTensorInfo2.SetQuantizationScale(qScale);
-        inputTensorInfo2.SetQuantizationOffset(qOffset);
-        outputTensorInfo.SetQuantizationScale(qScale);
-        outputTensorInfo.SetQuantizationOffset(qOffset);
+        // Nothing to do if the inputShape has at least 3 dimensions.
+        return inputShape;
     }
 
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
-        {
-             0.0f,  1.0f,  2.0f,
-             3.0f,  4.0f,  5.0f,
-             6.0f,  7.0f,  8.0f,
-             9.0f, 10.0f, 11.0f,
-            12.0f, 13.0f, 14.0f,
-            15.0f, 16.0f, 17.0f,
-        }));
-
-    auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
-        {
-            0.5f,
-        }));
-
-    LayerTestResult<T,4> ret(outputTensorInfo);
-    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
-        {
-             0.5f,  1.5f,  2.5f,
-             3.5f,  4.5f,  5.5f,
-             6.5f,  7.5f,  8.5f,
-             9.5f, 10.5f, 11.5f,
-            12.5f, 13.5f, 14.5f,
-            15.5f, 16.5f, 17.5f,
-        }));
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::AdditionQueueDescriptor data;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
-    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
-    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
-
-    inputHandle1->Allocate();
-    inputHandle2->Allocate();
-    outputHandle->Allocate();
-
-    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
-    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
-
-    return ret;
+    std::vector<unsigned int> newDims(size_t(3), 1u);
+    unsigned int expandedBy = 3 - numDims;
+    for (unsigned int i=0; i<numDims; ++i)
+    {
+        newDims[expandedBy+i] = inputShape[i];
+    }
+    return armnn::TensorShape(3u, &newDims[0]);
 }
 
-LayerTestResult<float, 4> AdditionBroadcastTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+void Generate3dPermuteVectorForConcat(
+        unsigned int numDimensions,
+        unsigned int & concatDim,
+        std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
 {
-    return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
-        workloadFactory, memoryManager, 0.0f, 0);
-}
+    BOOST_ASSERT_MSG(numDimensions <= 3,
+       "Only dimensions 1,2 and 3 are supported by this helper");
+    unsigned int expandedBy = 3 - numDimensions;
+    unsigned int expandedConcatAxis = concatDim + expandedBy;
 
-LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
-        workloadFactory, memoryManager, 2.f, 0);
-}
-
-LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
-        workloadFactory, memoryManager, 2.f, 0);
-}
-
-LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
-        workloadFactory, memoryManager, 0.0f, 0);
-}
-
-LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
-        workloadFactory, memoryManager, 0.1333333f, 128);
+    if (expandedConcatAxis == 2)
+    {
+        concatDim = 0;
+        armnn::PermutationVector forwardPermutation({1, 2, 0});
+        armnn::PermutationVector reversePermutation({2, 0, 1});
+        permutations = std::make_pair(forwardPermutation, reversePermutation);
+    }
+    else if (expandedConcatAxis == 1)
+    {
+        concatDim = 0;
+        armnn::PermutationVector forwardPermutation({2, 0, 1});
+        armnn::PermutationVector reversePermutation({1, 2, 0});
+        permutations = std::make_pair(forwardPermutation, reversePermutation);
+    }
+    else
+    {
+        BOOST_ASSERT(expandedConcatAxis == 0);
+        concatDim = 0;
+    }
 }
 
-LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+//
+// Permute the input tensors so we can do a supported concatenation.
+// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
+// at the front. Finally this function tells what the output shape
+// of the permuted concatenated tensor is going to be.
+//
+template <typename T>
+void PermuteInputsForConcat(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        std::vector<armnn::TensorInfo> & inputTensorInfos,
+        std::vector<T *> & inputData,
+        std::vector<std::vector<T>> & inputDataStorage,
+        armnn::PermutationVector & permuteVector,
+        unsigned int & concatDim,
+        armnn::TensorInfo & outputTensorInfo)
 {
-    return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
-        workloadFactory, memoryManager, 0.1333333f, 0);
-}
+    BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
+        "Expecting more than one tensor to be concatenated here");
 
-LayerTestResult<float,4> CompareAdditionTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::IWorkloadFactory& refWorkloadFactory)
-{
-    unsigned int batchSize = 4;
-    unsigned int channels  = 1;
-    unsigned int height    = 2;
-    unsigned int width     = 3;
+    unsigned int numDims = 0;
+    unsigned int nthInput = 0;
+    const armnn::PermutationVector identity({0, 1, 2});
 
-    armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
-    armnn::TensorInfo outputTensorInfo;
+    std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
+        std::make_pair(identity, identity);
 
-    unsigned int shape[] = {batchSize, channels, height, width};
+    inputDataStorage.resize(inputData.size());
 
-    inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
-    inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
-    outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    for (auto && tensorInfo : inputTensorInfos)
+    {
+        if (numDims == 0)
+        {
+            numDims = tensorInfo.GetShape().GetNumDimensions();
+            Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
 
-    auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
-    auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
+            // Store the reverese permutation.
+            permuteVector = permutations.second;
+            BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
+                "Test logic error, we don't need permutation, so we shouldn't arrive here");
+        }
+        else
+        {
+            BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
+                "All inputs must have the same number of dimensions");
+        }
 
-    LayerTestResult<float,4> ret(outputTensorInfo);
+        armnn::TensorInfo newTensorInfo = tensorInfo;
+        newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+        PermuteTensorData<T>(workloadFactory,
+                             memoryManager,
+                             permutations.first,
+                             newTensorInfo,
+                             inputData[nthInput],
+                             inputDataStorage[nthInput]);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
-    std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
+        inputData[nthInput] = inputDataStorage[nthInput].data();
+        inputTensorInfos[nthInput] = newTensorInfo;
 
-    armnn::AdditionQueueDescriptor data;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
-    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
-    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+        ++nthInput;
+    }
 
-    armnn::AdditionQueueDescriptor refData = data;
-    armnn::WorkloadInfo refInfo = info;
-    SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
-    SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
-    SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
+    outputTensorInfo.SetShape(
+        armnnUtils::Permuted(
+            ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
+            permutations.first));
+}
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
-    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
 
-    inputHandle1->Allocate();
-    inputHandle2->Allocate();
-    outputHandle->Allocate();
-    inputHandle1Ref->Allocate();
-    inputHandle2Ref->Allocate();
-    outputHandleRef->Allocate();
+//
+// This is the pair of PermuteInputsForConcat(...) which permutes back
+// the output of the concatenation so we can check it against an expected
+// output.
+//
+template <typename T>
+void PermuteOutputForConcat(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::TensorInfo & tensorInfo,
+        const armnn::PermutationVector & permuteVector,
+        std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
+        T * data)
+{
+    BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
+    if (data == nullptr)
+    {
+        // Nullptr is an error in the test. By returning without doing the permutation
+        // I expect the caller to fail the test. It still makes sense to report this as
+        // an assert for Debug builds.
+        return;
+    }
 
-    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
-    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
-    CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
-    CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
+    armnn::TensorInfo resultTensorInfo = tensorInfo;
+    std::vector<T> inputData(tensorInfo.GetNumElements());
+    std::vector<T> outputData;
 
-    workload->PostAllocationConfigure();
-    workload->Execute();
-    workloadRef->PostAllocationConfigure();
-    workloadRef->Execute();
+    CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
 
-    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
-    CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
+    PermuteTensorData<T>(workloadFactory,
+                         memoryManager,
+                         permuteVector,
+                         resultTensorInfo,
+                         &inputData[0],
+                         outputData);
 
-    return ret;
+    ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
 }
 
-namespace {
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> DivisionTestHelper(
+template <typename T>
+void Concatenate(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const unsigned int shape0[4],
-    const std::vector<T>& values0,
-    float scale0,
-    int32_t offset0,
-    const unsigned int shape1[4],
-    const std::vector<T> & values1,
-    float scale1,
-    int32_t offset1,
-    const unsigned int outShape[4],
-    const std::vector<T> & outValues,
-    float outScale,
-    int32_t outOffset)
+    std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
+    std::initializer_list<T *> inputsOrig,
+    const armnn::TensorInfo& outputTensorInfoOrig,
+    T * output,
+    unsigned int concatDim,
+    bool useSubtensor)
 {
-    armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
-    armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
-    armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
+    BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
+    if (output == nullptr)
+    {
+        // Nullptr is an error in the test. By returning without doing the permutation
+        // I expect the caller to fail the test. It still makes sense to report this as
+        // an assert for Debug builds.
+        return;
+    }
 
-    inputTensorInfo0.SetQuantizationScale(scale0);
-    inputTensorInfo0.SetQuantizationOffset(offset0);
+    // Saves a copy of the parameters which we might need to change.
+    std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
+    std::vector<T *> inputs            = inputsOrig;
+    armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
 
-    inputTensorInfo1.SetQuantizationScale(scale1);
-    inputTensorInfo1.SetQuantizationOffset(offset1);
+    armnn::PermutationVector permuteVector{0, 1, 2};
 
-    outputTensorInfo.SetQuantizationScale(outScale);
-    outputTensorInfo.SetQuantizationOffset(outOffset);
+    // Holds and automatically releases memory for the reshaped input data.
+    std::vector<std::vector<T>> tmpInputDataStorage;
 
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
+    const size_t inputCount = inputTensorInfos.size();
 
-    LayerTestResult<T, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
+    bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    if (needPermuteForConcat)
+    {
+        //
+        // We need to permute the inputs, because concatenation along
+        // the requested axis is not supported.
+        //
+        PermuteInputsForConcat<T>(workloadFactory,
+                                  memoryManager,
+                                  inputTensorInfos,
+                                  inputs,
+                                  tmpInputDataStorage,
+                                  permuteVector,
+                                  concatDim,
+                                  outputTensorInfo);
+    }
 
-    armnn::DivisionQueueDescriptor data;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
-    AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
-    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+    armnn::WorkloadInfo workloadInfo;
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
+    std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
+    inputHandles.reserve(inputCount);
 
-    inputHandle0->Allocate();
-    inputHandle1->Allocate();
-    outputHandle->Allocate();
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-    CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
-    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+    armnn::ConcatQueueDescriptor queueDescriptor;
+    armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
+    queueDescriptor.m_Parameters = viewsDescriptor;
 
-    workload->PostAllocationConfigure();
-    workload->Execute();
+    if (useSubtensor)
+    {
+        queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
+        for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
+        {
+            queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
+                viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
+        }
 
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+        outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-    return result;
-}
-} // anonymous namespace
+        const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
+        for (unsigned int i = 0; i < inputCount; ++i)
+        {
+            const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
+            std::unique_ptr<armnn::ITensorHandle> inputHandle =
+                subTensorsSupported ?
+                    workloadFactory.CreateSubTensorHandle(*outputHandle,
+                                                          inputTensorInfo.GetShape(),
+                                                          queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
+                    workloadFactory.CreateTensorHandle(inputTensorInfo);
 
-LayerTestResult<float,4> DivisionByZeroTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int width = 2;
-    const unsigned int height = 2;
-    const unsigned int channelCount = 2;
-    const unsigned int batchSize = 2;
+            inputHandles.emplace_back(std::move(inputHandle));
+        }
 
-    unsigned int shape[] = { batchSize, channelCount, height, width };
+    }
+    else
+    {
+        for (unsigned int i = 0; i < inputCount; ++i)
+        {
+            std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
+            inputHandles.emplace_back(std::move(inputHandle));
+        }
+    }
 
-    std::vector<float> input0({
-                                1.f,  1.f,  1.f,  1.f,  0.f, 0.f, 0.f, 0.f,
-                               -1.f, -1.f, -1.f, -1.f,  5.f, 5.f, 5.f, 5.f });
+    for (unsigned int i = 0; i < inputCount; ++i)
+    {
+        AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
+    }
 
-    std::vector<float> input1({
-                               0.f, 0.f, -0.f, -0.f,  0.f, 0.f, -0.f, -0.f,
-                               0.f, 0.f, -0.f, -0.f,  5.f, 5.f,  5.f,  5.f });
+    AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
 
-    std::vector<float> output({
-                               INFINITY, INFINITY, -INFINITY, -INFINITY,  NAN, NAN, -NAN, -NAN,
-                               -INFINITY, -INFINITY, INFINITY, INFINITY,  1, 1, 1, 1 });
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
 
-    return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
-                                                        memoryManager,
-                                                        shape, input0, 1.0f, 0,
-                                                        shape, input1, 1.0f, 0,
-                                                        shape, output, 1.0f, 0);
-}
-
-LayerTestResult<float,4> DivisionTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int width = 2;
-    const unsigned int height = 2;
-    const unsigned int channelCount = 2;
-    const unsigned int batchSize = 2;
-
-    unsigned int shape[] = { batchSize, channelCount, height, width };
-
-    std::vector<float> input0({
-                                      2,  2,  2,  2,    3,  3,  3,  3,
-                                      4,  4,  4,  4,    5,  5,  5,  5 });
+    for (auto& inputHandle : inputHandles)
+    {
+        inputHandle->Allocate();
+    }
 
-    std::vector<float> input1({
-                                      1,  1,  1,  1,    2,  2,  2,  2,
-                                      4,  4,  4,  4,    4,  4,  4,  4 });
+    outputHandle->Allocate();
 
-    std::vector<float> output({
-                                      2,  2,  2,  2,    1.5,  1.5,  1.5,  1.5,
-                                      1, 1, 1, 1,  1.25, 1.25, 1.25, 1.25 });
+    unsigned int nextInputId = 0;
+    for (auto& inputHandle : inputHandles)
+    {
+        CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
+        ++nextInputId;
+    }
 
+    workload->PostAllocationConfigure();
+    workload->Execute();
 
-    return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
-                                                        memoryManager,
-                                                        shape, input0, 1.0f, 0,
-                                                        shape, input1, 1.0f, 0,
-                                                        shape, output, 1.0f, 0);
+    if (needPermuteForConcat)
+    {
+        PermuteOutputForConcat<T>(workloadFactory,
+                                  memoryManager,
+                                  outputTensorInfo,
+                                  permuteVector,
+                                  std::move(outputHandle),
+                                  output);
+    }
+    else
+    {
+        CopyDataFromITensorHandle(output, outputHandle.get());
+    }
 }
 
-LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 1> Concatenation1dTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
 {
-    unsigned int shape0[] = { 1, 2, 2, 2 };
-    std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
+    armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
 
-    unsigned int shape1[] = { 1, 1, 1, 1 };
-    std::vector<float> input1({ 2 });
+    auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
+    auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
+    auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
+
+    armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
+
+    LayerTestResult<T, 1> result(outputTensorInfo);
 
-    std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory, memoryManager,
+                   { inputTensorInfo, inputTensorInfo, inputTensorInfo },
+                   { input0.data(), input1.data(), input2.data() },
+                   outputTensorInfo,
+                   output.data(),
+                   0,
+                   true);
 
+    result.output = MakeTensor<T, 1>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
+    }));
 
-    return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
-                                                        memoryManager,
-                                                        shape0, input0, 1.0f, 0,
-                                                        shape1, input1, 1.0f, 0,
-                                                        shape0, output, 1.0f, 0);
+    return result;
 }
 
-LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
+LayerTestResult<float, 1> Concatenation1dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    unsigned int shape0[] = { 1, 3, 3, 2 };
-    std::vector<float> input0({
-                                      1,   4,       3,  8,      5, 12,
-                                      7,   16,      9, 20,     11, 24,
-                                      13,  28,     15, 32,     17, 36});
-
-    unsigned int shape1[] = { 1, 1, 1, 2 };
-    std::vector<float> input1({ 1, 2 });
-
-    std::vector<float> output({
-                                      1,   2,      3,  4,      5,  6,
-                                      7,   8,      9, 10,     11, 12,
-                                      13, 14,     15, 16,     17, 18});
-
-    return DivisionTestHelper<armnn::DataType::Float32>(workloadFactory,
-                                                        memoryManager,
-                                                        shape0, input0, 1.0f, 0,
-                                                        shape1, input1, 1.0f, 0,
-                                                        shape0, output, 1.0f, 0);
+    return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
-LayerTestResult<uint8_t,4> DivisionUint8Test(
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> Concatenation2dTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::TensorInfo& outputTensorInfo,
+    unsigned int dimension,
+    const float qScale,
+    const int32_t qOffset)
 {
-    const unsigned int width = 2;
-    const unsigned int height = 2;
-    const unsigned int channelCount = 2;
-    const unsigned int batchSize = 2;
-
-    unsigned int shape[] = { batchSize, channelCount, height, width };
+    armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
 
-    std::vector<uint8_t> input0({2,  2,  2,  2,    3,  3,  3,  3,
-                                 4,  4,  4,  4,    5,  5,  5,  5 });
+    auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        1.0f, 2.0f, 3.0f,
 
-    std::vector<uint8_t> input1({1,  1,  1,  1,    2,  2,  2,  2,
-                                 4,  4,  4,  4,    4,  4,  4,  4 });
+        // Batch 1
+        10.0f, 11.0f, 12.0f,
+    }));
 
-    std::vector<uint8_t> output({8,  8,  8,  8,    6,  6,  6,  6,
-                                 4,  4,  4,  4,    5,  5,  5,  5});
+    auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        4.0f, 5.0f, 6.0f,
 
+        // Batch 1
+        13.0f, 14.0f, 15.0f,
+    }));
 
-    return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
-                                                                memoryManager,
-                                                                shape, input0, 1.0f,  0,
-                                                                shape, input1, 1.0f,  0,
-                                                                shape, output, 0.25f, 0);
-}
+    auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        7.0f, 8.0f, 9.0f,
 
-LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    unsigned int shape0[] = { 1, 2, 2, 2 };
-    std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
+        // Batch 1
+        16.0f, 17.0f, 18.0f,
+    }));
 
-    unsigned int shape1[] = { 1, 1, 1, 1 };
-    std::vector<uint8_t> input1({ 2 });
+    LayerTestResult<T, 2> result(outputTensorInfo);
 
-    std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory, memoryManager,
+                   { inputTensorInfo, inputTensorInfo, inputTensorInfo },
+                   { input0.data(), input1.data(), input2.data() },
+                   outputTensorInfo,
+                   output.data(),
+                   dimension,
+                   true);
 
-    return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
-                                                                memoryManager,
-                                                                shape0, input0, 1.0f, 0,
-                                                                shape1, input1, 1.0f, 0,
-                                                                shape0, output, 1.0f, 0);
+    result.output = MakeTensor<T, 2>(outputTensorInfo, output);
+    return result;
 }
 
-LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
 {
-    unsigned int shape0[] = { 1, 3, 3, 2 };
-    std::vector<uint8_t> input0({1,   4,     3,  8,      5,  12,
-                                 7,   16,    9,  20,     11, 24,
-                                 13,  28,    15, 32,     17, 36});
-
-    unsigned int shape1[] = { 1, 1, 1, 2 };
-    std::vector<uint8_t> input1({ 1, 2 });
+    armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
 
-    std::vector<uint8_t> output({1,   2,      3,  4,      5,  6,
-                                 7,   8,      9, 10,     11, 12,
-                                 13, 14,     15, 16,     17, 18});
+    LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
+        workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
 
-    return DivisionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
-                                                                memoryManager,
-                                                                shape0, input0, 1.0f, 0,
-                                                                shape1, input1, 1.0f, 0,
-                                                                shape0, output, 1.0f, 0);
-}
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        1.0f, 2.0f, 3.0f,
 
-LayerTestResult<int16_t,4> DivisionInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    unsigned int shape[] = { 2, 2, 2, 2 };
+        // Batch 1
+        10.0f, 11.0f, 12.0f,
 
-    std::vector<int16_t> input0({2,  2,  2,  2,    3,  3,  3,  3,
-                                 4,  4,  4,  4,    5,  5,  5,  5 });
+        // Batch 2
+        4.0f, 5.0f, 6.0f,
 
-    std::vector<int16_t> input1({1,  1,  1,  1,    2,  2,  2,  2,
-                                 4,  4,  4,  4,    4,  4,  4,  4 });
+        // Batch 3
+        13.0f, 14.0f, 15.0f,
 
-    std::vector<int16_t> output({8,  8,  8,  8,    6,  6,  6,  6,
-                                 4,  4,  4,  4,    5,  5,  5,  5});
+        // Batch 4
+        7.0f, 8.0f, 9.0f,
 
+        // Batch 5
+        16.0f, 17.0f, 18.0f,
+    }));
 
-    return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
-                                                                memoryManager,
-                                                                shape, input0, 1.0f,  0,
-                                                                shape, input1, 1.0f,  0,
-                                                                shape, output, 0.25f, 0);
+    return result;
 }
 
-LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
+LayerTestResult<float, 2> Concatenation2dDim0Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    unsigned int shape0[] = { 1, 2, 2, 2 };
-    std::vector<int16_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
-
-    unsigned int shape1[] = { 1, 1, 1, 1 };
-    std::vector<int16_t> input1({ 2 });
-
-    std::vector<int16_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
-
-    return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
-                                                                memoryManager,
-                                                                shape0, input0, 1.0f, 0,
-                                                                shape1, input1, 1.0f, 0,
-                                                                shape0, output, 1.0f, 0);
+    return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
-LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
 {
-    unsigned int shape0[] = { 1, 3, 3, 2 };
-    std::vector<int16_t> input0({1,   4,     3,  8,      5,  12,
-                                 7,   16,    9,  20,     11, 24,
-                                 13,  28,    15, 32,     17, 36});
-
-    unsigned int shape1[] = { 1, 1, 1, 2 };
-    std::vector<int16_t> input1({ 1, 2 });
+    armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
 
-    std::vector<int16_t> output({1,   2,      3,  4,      5,  6,
-                                 7,   8,      9, 10,     11, 12,
-                                 13, 14,     15, 16,     17, 18});
+    LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
+        workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
 
-    return DivisionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
-                                                                memoryManager,
-                                                                shape0, input0, 1.0f, 0,
-                                                                shape1, input1, 1.0f, 0,
-                                                                shape0, output, 1.0f, 0);
-}
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
 
-template<typename DescriptorType>
-std::unique_ptr<armnn::IWorkload> CreateWorkload(
-    const armnn::IWorkloadFactory& workloadFactory,
-    const armnn::WorkloadInfo& info,
-    const DescriptorType& descriptor)
-{
-    return CreateWorkload(workloadFactory, info, descriptor);
-};
+        // Batch 1
+        10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
+    }));
 
-template<>
-std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
-    const armnn::IWorkloadFactory& workloadFactory,
-    const armnn::WorkloadInfo& info,
-    const armnn::MaximumQueueDescriptor& descriptor)
-{
-    return workloadFactory.CreateMaximum(descriptor, info);
+    return result;
 }
 
-template<>
-std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
-    const armnn::IWorkloadFactory& workloadFactory,
-    const armnn::WorkloadInfo& info,
-    const armnn::MinimumQueueDescriptor& descriptor)
+LayerTestResult<float, 2> Concatenation2dDim1Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return workloadFactory.CreateMinimum(descriptor, info);
+    return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
-template<>
-std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
-        const armnn::IWorkloadFactory& workloadFactory,
-        const armnn::WorkloadInfo& info,
-        const armnn::EqualQueueDescriptor& descriptor)
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
 {
-    return workloadFactory.CreateEqual(descriptor, info);
-}
+    armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        1.0f, 2.0f, 3.0f,
 
-template<>
-std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
-        const armnn::IWorkloadFactory& workloadFactory,
-        const armnn::WorkloadInfo& info,
-        const armnn::GreaterQueueDescriptor& descriptor)
-{
-    return workloadFactory.CreateGreater(descriptor, info);
-}
+        // Batch 1
+        10.0f, 11.0f, 12.0f,
+    }));
 
-namespace {
+    armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
+    auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        4.0f, 5.0f, 6.0f,
 
-template <typename Descriptor,
-          armnn::DataType ArmnnTypeInput,
-          armnn::DataType ArmnnTypeOutput,
-          typename TInput = armnn::ResolveType<ArmnnTypeInput>,
-          typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
-LayerTestResult<TOutput, 4> ElementwiseTestHelper(
-    armnn::IWorkloadFactory & workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
-    const unsigned int shape0[4], std::vector<TInput> values0,
-    const unsigned int shape1[4], std::vector<TInput> values1,
-    const unsigned int outShape[4], std::vector<TOutput> outValues,
-    float qScale = 0.0f, int qOffset = 0)
-{
-    const uint32_t dimensionCount = 4;
-    armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnTypeInput};
-    armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnTypeInput};
-    armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnTypeOutput};
-
-    auto input0 = MakeTensor<TInput, 4>(inputTensorInfo0, values0);
-    auto input1 = MakeTensor<TInput, 4>(inputTensorInfo1, values1);
-
-    if (armnn::IsQuantizedType<TInput>())
-    {
-        inputTensorInfo0.SetQuantizationScale(qScale);
-        inputTensorInfo0.SetQuantizationOffset(qOffset);
+        // Batch 1
+        13.0f, 14.0f, 15.0f,
 
-        inputTensorInfo1.SetQuantizationScale(qScale);
-        inputTensorInfo1.SetQuantizationOffset(qOffset);
+        // Batch 0
+        7.0f, 8.0f, 9.0f,
+    }));
 
-        outputTensorInfo.SetQuantizationScale(qScale);
-        outputTensorInfo.SetQuantizationOffset(qOffset);
-    }
+    armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
+    auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 1
+        16.0f, 17.0f, 18.0f,
+    }));
 
-    LayerTestResult<TOutput,4> ret(outputTensorInfo);
+    armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
+    LayerTestResult<T, 2> result(outputTensorInfo);
 
-    if(ArmnnTypeOutput == armnn::DataType::Boolean)
-    {
-        ret.compareBoolean = true;
-    }
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory, memoryManager,
+                   { input0TensorInfo, input1TensorInfo, input2TensorInfo },
+                   { input0.data(), input1.data(), input2.data() },
+                   outputTensorInfo,
+                   output.data(),
+                   0,
+                   true);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    result.output = MakeTensor<T, 2>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        1.0f, 2.0f, 3.0f,
 
-    Descriptor data;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
-    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
-    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-    auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
+        // Batch 1
+        10.0f, 11.0f, 12.0f,
 
-    inputHandle0->Allocate();
-    inputHandle1->Allocate();
-    outputHandle->Allocate();
+        // Batch 2
+        4.0f, 5.0f, 6.0f,
 
-    CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
-    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+        // Batch 3
+        13.0f, 14.0f, 15.0f,
 
-    workload->PostAllocationConfigure();
-    ExecuteWorkload(*workload, memoryManager);
+        // Batch 4
+        7.0f, 8.0f, 9.0f,
 
-    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+        // Batch 5
+        16.0f, 17.0f, 18.0f,
+    }));
 
-    ret.outputExpected = MakeTensor<TOutput, 4>(outputTensorInfo, outValues);
-    return ret;
+    return result;
 }
 
-template <typename Descriptor, armnn::DataType ArmnnT, typename T = armnn::ResolveType<ArmnnT>>
-LayerTestResult<T, 4> ElementwiseTestHelper(
-    armnn::IWorkloadFactory & workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
-    const unsigned int shape0[4], std::vector<T> values0,
-    const unsigned int shape1[4], std::vector<T> values1,
-    const unsigned int outShape[4], std::vector<T> outValues,
-    float qScale = 0.0f, int qOffset = 0)
+LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return ElementwiseTestHelper<Descriptor, ArmnnT, ArmnnT>
-        (workloadFactory,
-         memoryManager,
-         shape0,
-         values0,
-         shape1,
-         values1,
-         outShape,
-         outValues,
-         qScale,
-         qOffset);
-}
+    return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
+        workloadFactory, memoryManager, 0.0f, 0);
 }
 
-LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
-                                            const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
 {
-    const unsigned int width = 2;
-    const unsigned int height = 2;
-    const unsigned int channelCount = 2;
-    const unsigned int batchSize = 2;
-
-    unsigned int shape[] = { batchSize, channelCount, height, width };
-
-    std::vector<float> input0({ 1, 1, 1, 1,  5, 5, 5, 5,
-                                3, 3, 3, 3,  4, 4, 4, 4 });
-
-    std::vector<float> input1({ 1, 1, 1, 1,  3, 3, 3, 3,
-                                5, 5, 5, 5,  4, 4, 4, 4 });
-
-    std::vector<uint8_t> output({ 1, 1, 1, 1,  0, 0, 0, 0,
-                                  0, 0, 0, 0,  1, 1, 1, 1 });
+    armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        1.0f, 2.0f, 3.0f,
 
-    return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
-        workloadFactory,
-        memoryManager,
-        shape,
-        input0,
-        shape,
-        input1,
-        shape,
-        output);
-}
+        // Batch 1
+        10.0f, 11.0f, 12.0f,
+    }));
 
-LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    unsigned int shape0[] = { 1, 2, 2, 2 };
-    std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
+    armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
+    auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
 
-    unsigned int shape1[] = { 1, 1, 1, 1 };
-    std::vector<float> input1({ 1 });
+        // Batch 1
+        13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
+    }));
 
-    std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
+    armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
+    auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        9.0f,
 
-    return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output);
-}
+        // Batch 1
+        18.0f
+    }));
 
-LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 3 };
+    armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
+    LayerTestResult<T, 2> result(outputTensorInfo);
 
-    std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
-                                7, 8, 9, 10, 11, 12 });
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory, memoryManager,
+                   { input0TensorInfo, input1TensorInfo, input2TensorInfo },
+                   { input0.data(), input1.data(), input2.data() },
+                   outputTensorInfo,
+                   output.data(),
+                   1,
+                   true);
 
-    std::vector<float> input1({ 1, 2, 3});
+    result.output = MakeTensor<T, 2>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
 
-    std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
-                                  0, 0, 0, 0, 0, 0 });
+        // Batch 1
+        10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
+    }));
 
-    return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output);
+    return result;
 }
 
-LayerTestResult<uint8_t, 4> EqualUint8Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    unsigned int shape[] = { 2, 2, 2, 2 };
-
-    // See dequantized values to the right.
-    std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
-                                  3, 3, 3, 3, 7, 7, 7, 7 });
-
-    std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
-                                  3, 3, 3, 3, 5, 5, 5, 5 });
-
-    std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
-                                  1, 1, 1, 1, 0, 0, 0, 0 });
-
-    return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
-                                 armnn::DataType::QuantisedAsymm8,
-                                 armnn::DataType::Boolean>(
-        workloadFactory,
-        memoryManager,
-        shape,
-        input0,
-        shape,
-        input1,
-        shape,
-        output,
-        1.0f,
-        0);
+    return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
+        workloadFactory, memoryManager, 0.0f, 0);
 }
 
-LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> Concatenation3dTestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::TensorInfo& outputTensorInfo,
+    unsigned int dimension,
+    bool useSubtensor,
+    float qScale,
+    int32_t qOffset)
 {
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 1 };
-
-    std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
-                                  7, 8, 9, 10, 11, 12 });
-
-    std::vector<uint8_t> input1({ 1 });
-
-    std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
-                                  0, 0, 0, 0, 0, 0 });
+    armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
 
-    return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
-                                 armnn::DataType::QuantisedAsymm8,
-                                 armnn::DataType::Boolean>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output,
-        1.0f,
-        0);
-}
+    auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f,
 
-LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 3 };
+        // Batch 0, Channel 1
+        3.0f, 4.0f,
 
-    std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
-                                  7, 8, 9, 10, 11, 12 });
+        // Batch 0, Channel 2
+        5.0f, 6.0f,
 
-    std::vector<uint8_t> input1({ 1, 1, 3});
+        // Batch 1, Channel 0
+        19.0f, 20.0f,
 
-    std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
-                                  0, 0, 0, 0, 0, 0 });
+        // Batch 1, Channel 1
+        21.0f, 22.0f,
 
-    return ElementwiseTestHelper<armnn::EqualQueueDescriptor,
-                                 armnn::DataType::QuantisedAsymm8,
-                                 armnn::DataType::Boolean>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output,
-        1.0f,
-        0);
-}
+        // Batch 1, Channel 2
+        23.0f, 24.0f
+    }));
 
-LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
-                                            const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int width = 2;
-    const unsigned int height = 2;
-    const unsigned int channelCount = 2;
-    const unsigned int batchSize = 2;
+    auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        7.0f, 8.0f,
 
-    unsigned int shape[] = { batchSize, channelCount, height, width };
+        // Batch 0, Channel 1
+        9.0f, 10.0f,
 
-    std::vector<float> input0({ 1, 1, 1, 1,  5, 5, 5, 5,
-                                3, 3, 3, 3,  4, 4, 4, 4 });
+        // Batch 0, Channel 2
+        11.0f, 12.0f,
 
-    std::vector<float> input1({ 1, 1, 1, 1,  3, 3, 3, 3,
-                                5, 5, 5, 5,  4, 4, 4, 4 });
+        // Batch 1, Channel 0
+        25.0f, 26.0f,
 
-    std::vector<uint8_t> output({ 0, 0, 0, 0,  1, 1, 1, 1,
-                                  0, 0, 0, 0,  0, 0, 0, 0 });
+        // Batch 1, Channel 1
+        27.0f, 28.0f,
 
-    return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
-        workloadFactory,
-        memoryManager,
-        shape,
-        input0,
-        shape,
-        input1,
-        shape,
-        output);
-}
+        // Batch 1, Channel 2
+        29.0f, 30.0f
+    }));
 
-LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    unsigned int shape0[] = { 1, 2, 2, 2 };
-    std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
+    auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        13.0f, 14.0f,
 
-    unsigned int shape1[] = { 1, 1, 1, 1 };
-    std::vector<float> input1({ 1 });
+        // Batch 0, Channel 1
+        15.0f, 16.0f,
 
-    std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1, 1, 1});
+        // Batch 0, Channel 2
+        17.0f, 18.0f,
 
-    return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output);
-}
+        // Batch 1, Channel 0
+        31.0f, 32.0f,
 
-LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 3 };
+        // Batch 1, Channel 1
+        33.0f, 34.0f,
 
-    std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
-                                7, 8, 9, 10, 11, 12 });
+        // Batch 1, Channel 2
+        35.0f, 36.0f
+    }));
 
-    std::vector<float> input1({ 1, 3, 2});
+    LayerTestResult<T, 3> result(outputTensorInfo);
 
-    std::vector<uint8_t> output({ 0, 0, 1, 1, 1, 1,
-                                  1, 1, 1, 1, 1, 1 });
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory, memoryManager,
+                   { inputTensorInfo, inputTensorInfo, inputTensorInfo },
+                   { input0.data(), input1.data(), input2.data() },
+                   outputTensorInfo,
+                   output.data(),
+                   dimension,
+                   useSubtensor);
 
-    return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output);
+    result.output = MakeTensor<T, 3>(outputTensorInfo, output);
+    return result;
 }
 
-LayerTestResult<uint8_t, 4> GreaterUint8Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
 {
-    unsigned int shape[] = { 2, 2, 2, 2 };
+    armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
 
-    // See dequantized values to the right.
-    std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
-                                  3, 3, 3, 3, 5, 5, 5, 5 });
+    LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
+        workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
 
-    std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
-                                  2, 2, 2, 2, 5, 5, 5, 5 });
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f,
 
-    std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
-                                  1, 1, 1, 1, 0, 0, 0, 0 });
+        // Batch 0, Channel 1
+        3.0f, 4.0f,
 
-    return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
-                                 armnn::DataType::QuantisedAsymm8,
-                                 armnn::DataType::Boolean>(
-        workloadFactory,
-        memoryManager,
-        shape,
-        input0,
-        shape,
-        input1,
-        shape,
-        output,
-        1.0f,
-        0);
-}
+        // Batch 0, Channel 2
+        5.0f, 6.0f,
 
-LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 1 };
+        // Batch 1, Channel 0
+        19.0f, 20.0f,
 
-    std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
-                                  7, 8, 9, 10, 11, 12 });
+        // Batch 1, Channel 1
+        21.0f, 22.0f,
 
-    std::vector<uint8_t> input1({ 1 });
+        // Batch 1, Channel 2
+        23.0f, 24.0f,
 
-    std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
-                                  1, 1, 1, 1, 1, 1 });
+        // Batch 2, Channel 0
+        7.0f, 8.0f,
 
-    return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
-                                 armnn::DataType::QuantisedAsymm8,
-                                 armnn::DataType::Boolean>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output,
-        1.0f,
-        0);
-}
+        // Batch 2, Channel 1
+        9.0f, 10.0f,
 
-LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 3 };
+        // Batch 2, Channel 2
+        11.0f, 12.0f,
 
-    std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
-                                  7, 8, 9, 10, 11, 12 });
+        // Batch 3, Channel 0
+        25.0f, 26.0f,
 
-    std::vector<uint8_t> input1({ 1, 1, 3});
+        // Batch 3, Channel 1
+        27.0f, 28.0f,
 
-    std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
-                                  1, 1, 1, 1, 1, 1 });
+        // Batch 3, Channel 2
+        29.0f, 30.0f,
 
-    return ElementwiseTestHelper<armnn::GreaterQueueDescriptor,
-                                 armnn::DataType::QuantisedAsymm8,
-                                 armnn::DataType::Boolean>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output,
-        1.0f,
-        0);
-}
+        // Batch 4, Channel 0
+        13.0f, 14.0f,
 
-LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
-                                           const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int width = 2;
-    const unsigned int height = 2;
-    const unsigned int channelCount = 2;
-    const unsigned int batchSize = 2;
+        // Batch 4, Channel 1
+        15.0f, 16.0f,
 
-    unsigned int shape[] = { batchSize, channelCount, height, width };
+        // Batch 4, Channel 2
+        17.0f, 18.0f,
 
-    std::vector<float> input0({ 1, 1, 1, 1,  5, 5, 5, 5,
-                                3, 3, 3, 3,  4, 4, 4, 4 });
+        // Batch 5, Channel 0
+        31.0f, 32.0f,
 
-    std::vector<float> input1({ 2, 2, 2, 2,  3, 3, 3, 3,
-                                4, 4, 4, 4,  5, 5, 5, 5 });
+        // Batch 5, Channel 1
+        33.0f, 34.0f,
 
-    std::vector<float> output({ 2, 2, 2, 2,  5, 5, 5, 5,
-                                4, 4, 4, 4,  5, 5, 5, 5 });
+        // Batch 5, Channel 2
+        35.0f, 36.0f
+    }));
 
-    return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
-        workloadFactory,
-        memoryManager,
-        shape,
-        input0,
-        shape,
-        input1,
-        shape,
-        output);
+    return result;
 }
 
-LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+LayerTestResult<float, 3> Concatenation3dDim0Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    unsigned int shape0[] = { 1, 2, 2, 2 };
-    std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
-
-    unsigned int shape1[] = { 1, 1, 1, 1 };
-    std::vector<float> input1({ 2 });
-
-    std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
-
-    return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output);
+    return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
-LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
 {
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 3 };
+    armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
 
-    std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
-                                  7, 8, 9, 10, 11, 12 });
+    LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
+        workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
 
-    std::vector<float> input1({ 1, 2, 3});
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f,
 
-    std::vector<float> output({ 1, 2, 3, 4, 5, 6,
-                                7, 8, 9, 10, 11, 12 });
+        // Batch 0, Channel 1
+        3.0f, 4.0f,
 
-    return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output);
-}
+        // Batch 0, Channel 2
+        5.0f, 6.0f,
 
-LayerTestResult<uint8_t, 4> MaximumUint8Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    unsigned int shape[] = { 2, 2, 2, 2 };
+        // Batch 0, Channel 3
+        7.0f, 8.0f,
 
-    // See dequantized values to the right.
-    std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
-                                  3, 3, 3, 3, 4, 4, 4, 4 });
+        // Batch 0, Channel 4
+        9.0f, 10.0f,
 
-    std::vector<uint8_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
-                                  4, 4, 4, 4, 5, 5, 5, 5 });
+        // Batch 0, Channel 5
+        11.0f, 12.0f,
 
-    std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
-                                  4, 4, 4, 4, 5, 5, 5, 5 });
+        // Batch 0, Channel 6
+        13.0f, 14.0f,
 
-    return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
-        workloadFactory,
-        memoryManager,
-        shape,
-        input0,
-        shape,
-        input1,
-        shape,
-        output,
-        1.0f,
-        0);
-}
+        // Batch 0, Channel 7
+        15.0f, 16.0f,
 
-LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 1 };
+        // Batch 0, Channel 8
+        17.0f, 18.0f,
 
-    std::vector<uint8_t> input0({ 1, 2, 3, 4,  5, 6,
-                                  7, 8, 9, 10, 11, 12 });
+        // Batch 1, Channel 0
+        19.0f, 20.0f,
 
-    std::vector<uint8_t> input1({2});
+        // Batch 1, Channel 1
+        21.0f, 22.0f,
 
-    std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
-                                  7, 8, 9, 10, 11, 12 });
+        // Batch 1, Channel 2
+        23.0f, 24.0f,
 
-    return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output,
-        1.0f,
-        0);
-}
+        // Batch 1, Channel 3
+        25.0f, 26.0f,
 
-LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 3 };
+        // Batch 1, Channel 4
+        27.0f, 28.0f,
 
-    std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
-                                  7, 8, 9, 10, 11, 12 });
+        // Batch 1, Channel 5
+        29.0f, 30.0f,
 
-    std::vector<uint8_t> input1({ 1, 10, 3});
+        // Batch 1, Channel 6
+        31.0f, 32.0f,
 
-    std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
-                                  7, 10, 9, 10, 11, 12 });
+        // Batch 1, Channel 7
+        33.0f, 34.0f,
 
-    return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output,
-        1.0f,
-        0);
+        // Batch 1, Channel 8
+        35.0f, 36.0f
+    }));
+
+    return result;
 }
 
-LayerTestResult<int16_t, 4> MaximumInt16Test(
+LayerTestResult<float, 3> Concatenation3dDim1Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    unsigned int shape[] = { 2, 2, 2, 2 };
-
-    std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
-                                  3, 3, 3, 3, 4, 4, 4, 4 });
-
-    std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
-                                  4, 4, 4, 4, 5, 5, 5, 5 });
-
-    std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
-                                  4, 4, 4, 4, 5, 5, 5, 5 });
-
-    return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
-        workloadFactory,
-        memoryManager,
-        shape,
-        input0,
-        shape,
-        input1,
-        shape,
-        output,
-        1.0f,
-        0);
+    return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
-LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    bool useSubtensor,
+    float qScale,
+    int32_t qOffset)
 {
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 1 };
-
-    std::vector<int16_t> input0({ 1, 2, 3, 4,  5, 6,
-                                  7, 8, 9, 10, 11, 12 });
+    armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
 
-    std::vector<int16_t> input1({2});
+    LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
+        workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
 
-    std::vector<int16_t> output({ 2, 2, 3, 4, 5, 6,
-                                  7, 8, 9, 10, 11, 12 });
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
 
-    return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output,
-        1.0f,
-        0);
-}
+        // Batch 0, Channel 1
+        3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
 
-LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 3 };
+        // Batch 0, Channel 2
+        5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
 
-    std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
-                                  7, 8, 9, 10, 11, 12 });
+        // Batch 1, Channel 0
+        19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
 
-    std::vector<int16_t> input1({ 1, 10, 3});
+        // Batch 1, Channel 1
+        21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
 
-    std::vector<int16_t> output({ 1, 10, 3, 4, 10, 6,
-                                  7, 10, 9, 10, 11, 12 });
+        // Batch 1, Channel 2
+        23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
+    }));
 
-    return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output,
-        1.0f,
-        0);
+    return result;
 }
 
-LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
+LayerTestResult<float, 3> Concatenation3dDim2Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    bool useSubtensor)
 {
-    unsigned int shape0[] = { 1, 2, 2, 2 };
-    std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
-
-    unsigned int shape1[] = { 1, 1, 1, 1 };
-    std::vector<float> input1({ 2 });
-
-    std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
-
-    return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output);
+    return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
+        workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
 }
 
-
-LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
 {
-    unsigned int shape0[] = { 1, 2, 2, 2 };
-    std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
+    armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
+    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+            // Batch 0, Channel 0
+            1.0f, 2.0f,
 
-    unsigned int shape1[] = { 1, 1, 1, 1 };
-    std::vector<float> input1({ 5 });
+            // Batch 0, Channel 1
+            3.0f, 4.0f,
 
-    std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
+            // Batch 0, Channel 2
+            5.0f, 6.0f,
 
-    return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output);
-}
+            // Batch 1, Channel 0
+            19.0f, 20.0f,
 
-LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
-    armnn::IWorkloadFactory & workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
-{
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 3 };
+            // Batch 1, Channel 1
+            21.0f, 22.0f,
 
-    std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
-                                  7, 1, 2, 3, 4, 5 });
+            // Batch 1, Channel 2
+            23.0f, 24.0f
+    }));
 
-    std::vector<uint8_t> input1({ 1, 2, 3});
+    armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
+    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+            // Batch 0, Channel 0
+            7.0f, 8.0f,
 
-    std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
-                                  1, 1, 2, 1, 2, 3 });
+            // Batch 0, Channel 1
+            9.0f, 10.0f,
 
-    return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output,
-        1.0f,
-        0);
-}
+            // Batch 0, Channel 2
+            11.0f, 12.0f,
+    }));
 
-LayerTestResult<int16_t, 4> MinimumInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    unsigned int shape[] = { 2, 2, 2, 2 };
+    armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
+    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+            // Batch 0, Channel 0
+            25.0f, 26.0f,
 
-    std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
-                                  3, 3, 3, 3, 4, 4, 4, 4 });
+            // Batch 0, Channel 1
+            27.0f, 28.0f,
 
-    std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
-                                  4, 4, 4, 4, 5, 5, 5, 5 });
+            // Batch 0, Channel 2
+            29.0f, 30.0f,
 
-    std::vector<int16_t> output({ 1, 1, 1, 1, 3, 3, 3, 3,
-                                  3, 3, 3, 3, 4, 4, 4, 4 });
+            // Batch 1, Channel 0
+            13.0f, 14.0f,
 
-    return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
-        workloadFactory,
-        memoryManager,
-        shape,
-        input0,
-        shape,
-        input1,
-        shape,
-        output,
-        1.0f,
-        0);
-}
+            // Batch 1, Channel 1
+            15.0f, 16.0f,
 
-LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 1 };
+            // Batch 1, Channel 2
+            17.0f, 18.0f,
 
-    std::vector<int16_t> input0({ 1, 2, 3, 4,  5, 6,
-                                  7, 8, 9, 10, 11, 12 });
+            // Batch 2, Channel 0
+            31.0f, 32.0f,
 
-    std::vector<int16_t> input1({2});
+            // Batch 2, Channel 1
+            33.0f, 34.0f,
 
-    std::vector<int16_t> output({ 1, 2, 2, 2, 2, 2,
-                                  2, 2, 2, 2, 2, 2 });
+            // Batch 2, Channel 2
+            35.0f, 36.0f
+    }));
 
-    return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output,
-        1.0f,
-        0);
-}
+    armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
+    LayerTestResult<T, 3> result(outputTensorInfo);
 
-LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 3 };
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory, memoryManager,
+                   { input0TensorInfo, input1TensorInfo, input2TensorInfo },
+                   { input0.data(), input1.data(), input2.data() },
+                   outputTensorInfo,
+                   output.data(),
+                   0,
+                   true);
 
-    std::vector<int16_t> input0({ 1, 2, 3, 4, 5, 6,
-                                  7, 8, 9, 10, 11, 12 });
+    result.output = MakeTensor<T, 3>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f,
 
-    std::vector<int16_t> input1({ 1, 10, 3});
+        // Batch 0, Channel 1
+        3.0f, 4.0f,
 
-    std::vector<int16_t> output({ 1, 2, 3, 1, 5, 3,
-                                  1, 8, 3, 1, 10, 3 });
+        // Batch 0, Channel 2
+        5.0f, 6.0f,
 
-    return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
-        workloadFactory,
-        memoryManager,
-        shape0,
-        input0,
-        shape1,
-        input1,
-        shape0,
-        output,
-        1.0f,
-        0);
-}
+        // Batch 1, Channel 0
+        19.0f, 20.0f,
 
-namespace {
-template<std::size_t NumDims>
-LayerTestResult<float,NumDims> MultiplicationTestHelper(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const unsigned int shape0[NumDims],
-    const std::vector<float> & values0,
-    const unsigned int shape1[NumDims],
-    const std::vector<float> & values1,
-    const unsigned int outShape[NumDims],
-    const std::vector<float> & outValues)
-{
-    armnn::TensorInfo inputTensorInfo0{NumDims, shape0, armnn::DataType::Float32};
-    armnn::TensorInfo inputTensorInfo1{NumDims, shape1, armnn::DataType::Float32};
-    armnn::TensorInfo outputTensorInfo{NumDims, outShape, armnn::DataType::Float32};
+        // Batch 1, Channel 1
+        21.0f, 22.0f,
 
-    auto input0 = MakeTensor<float, NumDims>(inputTensorInfo0, values0);
-    auto input1 = MakeTensor<float, NumDims>(inputTensorInfo1, values1);
+        // Batch 1, Channel 2
+        23.0f, 24.0f,
 
-    LayerTestResult<float,NumDims> ret(outputTensorInfo);
+        // Batch 2, Channel 0
+        7.0f, 8.0f,
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+        // Batch 2, Channel 1
+        9.0f, 10.0f,
 
-    armnn::MultiplicationQueueDescriptor data;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
-    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
-    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+        // Batch 2, Channel 2
+        11.0f, 12.0f,
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
+        // Batch 3, Channel 0
+        25.0f, 26.0f,
 
-    inputHandle0->Allocate();
-    inputHandle1->Allocate();
-    outputHandle->Allocate();
+        // Batch 3, Channel 1
+        27.0f, 28.0f,
 
-    CopyDataToITensorHandle(inputHandle0.get(), input0.origin());
-    CopyDataToITensorHandle(inputHandle1.get(), input1.origin());
+        // Batch 3, Channel 2
+        29.0f, 30.0f,
 
-    workload->PostAllocationConfigure();
-    workload->Execute();
+        // Batch 4, Channel 0
+        13.0f, 14.0f,
 
-    CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
+        // Batch 4, Channel 1
+        15.0f, 16.0f,
 
-    ret.outputExpected = MakeTensor<float, NumDims>(outputTensorInfo, outValues);
-    return ret;
-}
-} // anonymous namespace
-
-
-LayerTestResult<float,4> MultiplicationTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int width = 2;
-    const unsigned int height = 2;
-    const unsigned int channelCount = 2;
-    const unsigned int batchSize = 2;
-
-    unsigned int shape[] = { batchSize, channelCount, height, width };
+        // Batch 4, Channel 2
+        17.0f, 18.0f,
 
-    std::vector<float> input0({
-        1,  1,  1,  1,    2,  2,  2,  2,
-        3,  3,  3,  3,    4,  4,  4,  4 });
+        // Batch 5, Channel 0
+        31.0f, 32.0f,
 
-    std::vector<float> input1({
-        2,  2,  2,  2,    3,  3,  3,  3,
-        4,  4,  4,  4,    5,  5,  5,  5 });
+        // Batch 5, Channel 1
+        33.0f, 34.0f,
 
-    std::vector<float> output({
-        2,  2,  2,  2,    6,  6,  6,  6,
-        12, 12, 12, 12,  20, 20, 20, 20 });
+        // Batch 5, Channel 2
+        35.0f, 36.0f
+    }));
 
-    return MultiplicationTestHelper<4>(workloadFactory,
-                                       memoryManager,
-                                       shape,
-                                       input0,
-                                       shape,
-                                       input1,
-                                       shape,
-                                       output);
+    return result;
 }
 
-LayerTestResult<float,5> Multiplication5dTest(
+LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    const unsigned int width = 3;
-    const unsigned int height = 2;
-    const unsigned int channelCount = 2;
-    const unsigned int batchSize = 2;
-    const unsigned int depth = 2;
-
-    unsigned int shape[] = { depth, batchSize, channelCount, height, width };
-
-    std::vector<float> input0({
-        1.80f, 0.20f, 2.30f,  1.30f, 2.10f, 1.00f,
-        2.60f, 0.60f, 2.10f,  2.30f, 2.30f, 2.00f,
-
-        2.50f, 1.00f, 2.90f,  3.10f, 1.50f, 2.40f,
-        2.80f, 1.10f, 1.00f,  3.20f, 1.00f, 2.30f,
-
+    return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
+        workloadFactory, memoryManager, 0.0f, 0);
+}
 
-        0.30f, 2.20f, 1.00f,  0.20f, 1.60f, 1.40f,
-        0.80f, 3.20f, 0.10f,  0.10f, 3.10f, 2.10f,
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
+{
+    armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f,
 
-        1.50f, 2.40f, 1.40f,  0.70f, 2.40f, 1.40f,
-        1.60f, 1.20f, 1.90f,  0.80f, 0.00f, 0.10f,
-    });
+        // Batch 0, Channel 1
+        3.0f, 4.0f,
 
-    std::vector<float> input1({
-        0.70f, 1.00f, 2.90f,  2.20f, 3.10f, 2.80f,
-        1.80f, 2.00f, 0.50f,  2.30f, 1.20f, 2.70f,
+        // Batch 0, Channel 2
+        5.0f, 6.0f,
 
-        2.40f, 0.20f, 3.20f,  1.60f, 0.20f, 2.50f,
-        2.30f, 0.70f, 2.70f,  1.80f, 2.90f, 2.70f,
+        // Batch 1, Channel 0
+        19.0f, 20.0f,
 
+        // Batch 1, Channel 1
+        21.0f, 22.0f,
 
-        3.20f, 3.20f, 0.70f,  1.90f, 2.70f, 2.50f,
-        2.40f, 0.90f, 2.30f,  1.80f, 2.50f, 2.00f,
+        // Batch 1, Channel 2
+        23.0f, 24.0f
+    }));
 
-        1.60f, 2.20f, 1.60f,  2.00f, 0.30f, 3.20f,
-        0.40f, 3.00f, 2.60f,  0.30f, 0.00f, 2.50f,
-    });
+    armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
+    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        7.0f, 8.0f,
 
-    std::vector<float> output({
-        1.26f, 0.20f, 6.67f,  2.86f, 6.51f, 2.80f,
-        4.68f, 1.20f, 1.05f,  5.29f, 2.76f, 5.40f,
+        // Batch 0, Channel 1
+        9.0f, 10.0f,
 
-        6.00f, 0.20f, 9.28f,  4.96f, 0.30f, 6.00f,
-        6.44f, 0.77f, 2.70f,  5.76f, 2.90f, 6.21f,
+        // Batch 0, Channel 2
+        11.0f, 12.0f,
 
+        // Batch 0, Channel 3
+        25.0f, 26.0f,
 
-        0.96f, 7.04f, 0.70f,  0.38f, 4.32f, 3.50f,
-        1.92f, 2.88f, 0.23f,  0.18f, 7.75f, 4.20f,
+        // Batch 1, Channel 0
+        27.0f, 28.0f,
 
-        2.40f, 5.28f, 2.24f,  1.40f, 0.72f, 4.48f,
-        0.64f, 3.60f, 4.94f,  0.24f, 0.00f, 0.25f,
-    });
+        // Batch 1, Channel 1
+        29.0f, 30.0f,
 
-    return MultiplicationTestHelper<5>(workloadFactory,
-                                       memoryManager,
-                                       shape,
-                                       input0,
-                                       shape,
-                                       input1,
-                                       shape,
-                                       output);
-}
+        // Batch 1, Channel 2
+        13.0f, 14.0f,
 
-LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    unsigned int shape0[] = { 1, 2, 2, 2 };
-    std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
+        // Batch 1, Channel 3
+        15.0f, 16.0f,
+    }));
 
-    unsigned int shape1[] = { 1, 1, 1, 1 };
-    std::vector<float> input1({ 2 });
+    armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
+    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        17.0f, 18.0f,
 
-    std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
+        // Batch 1, Channel 0
+        31.0f, 32.0f,
+    }));
 
-    return MultiplicationTestHelper<4>(workloadFactory,
-                                       memoryManager,
-                                       shape0,
-                                       input0,
-                                       shape1,
-                                       input1,
-                                       shape0,
-                                       output);
-}
+    armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
+    LayerTestResult<T, 3> result(outputTensorInfo);
 
-LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    unsigned int shape0[] = { 1, 3, 3, 2 };
-    std::vector<float> input0({
-        1,   2,      3,  4,      5,  6,
-        7,   8,      9, 10,     11, 12,
-        13, 14,     15, 16,     17, 18});
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory, memoryManager,
+                   { input0TensorInfo, input1TensorInfo, input2TensorInfo },
+                   { input0.data(), input1.data(), input2.data() },
+                   outputTensorInfo,
+                   output.data(),
+                   1,
+                   true);
 
-    unsigned int shape1[] = { 1, 1, 1, 2 };
-    std::vector<float> input1({ 1, 2 });
+    result.output = MakeTensor<T, 3>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f,
 
-    std::vector<float> output({
-        1,   4,       3,  8,      5, 12,
-        7,   16,      9, 20,     11, 24,
-        13,  28,     15, 32,     17, 36});
+        // Batch 0, Channel 1
+        3.0f, 4.0f,
 
-    return MultiplicationTestHelper<4>(workloadFactory,
-                                       memoryManager,
-                                       shape0,
-                                       input0,
-                                       shape1,
-                                       input1,
-                                       shape0,
-                                       output);
-}
+        // Batch 0, Channel 2
+        5.0f, 6.0f,
 
-LayerTestResult<float,4> CompareMultiplicationTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::IWorkloadFactory& refWorkloadFactory)
-{
-    const unsigned int width = 16;
-    const unsigned int height = 32;
-    const unsigned int channelCount = 2;
-    const unsigned int batchSize = 5;
+        // Batch 0, Channel 3
+        7.0f, 8.0f,
 
-    armnn::TensorInfo inputTensorInfo0;
-    armnn::TensorInfo inputTensorInfo1;
-    armnn::TensorInfo outputTensorInfo;
+        // Batch 0, Channel 4
+        9.0f, 10.0f,
 
-    constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
+        // Batch 0, Channel 5
+        11.0f, 12.0f,
 
-    inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
-    inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
-    outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+        // Batch 0, Channel 6
+        25.0f, 26.0f,
 
-    LayerTestResult<float,4> comparisonResult(outputTensorInfo);
+        // Batch 0, Channel 7
+        17.0f, 18.0f,
 
-    auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
-    auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
+        // Batch 1, Channel 0
+        19.0f, 20.0f,
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+        // Batch 1, Channel 1
+        21.0f, 22.0f,
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
-    std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
+        // Batch 1, Channel 2
+        23.0f, 24.0f,
 
-    armnn::MultiplicationQueueDescriptor data;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
-    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
-    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+        // Batch 1, Channel 3
+        27.0f, 28.0f,
 
-    armnn::MultiplicationQueueDescriptor refData = data;
-    armnn::WorkloadInfo refInfo = info;
-    SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
-    SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
-    SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
+        // Batch 1, Channel 4
+        29.0f, 30.0f,
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
-    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
+        // Batch 1, Channel 5
+        13.0f, 14.0f,
 
-    inputHandle0->Allocate();
-    inputHandle1->Allocate();
-    outputHandle->Allocate();
-    inputHandle0Ref->Allocate();
-    inputHandle1Ref->Allocate();
-    outputHandleRef->Allocate();
+        // Batch 1, Channel 6
+        15.0f, 16.0f,
 
-    CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
-    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
-    CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
-    CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
+        // Batch 1, Channel 7
+        31.0f, 32.0f,
+    }));
 
-    workload->PostAllocationConfigure();
-    workload->Execute();
-    workloadRef->PostAllocationConfigure();
-    workloadRef->Execute();
-    CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
-    CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
+    return result;
+}
 
-    return comparisonResult;
+LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
+        workloadFactory, memoryManager, 0.0f, 0);
 }
 
-LayerTestResult<float,4> CompareBatchNormTest(
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::IWorkloadFactory& refWorkloadFactory)
+    bool useSubtensor,
+    float qScale,
+    int32_t qOffset)
 {
-    const unsigned int width     = 2;
-    const unsigned int height    = 3;
-    const unsigned int channels  = 5;
-    const unsigned int batchSize = 3;
+    armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f,
 
-    armnn::TensorInfo inputTensorInfo;
-    armnn::TensorInfo outputTensorInfo;
-    armnn::TensorInfo tensorInfo;
+        // Batch 0, Channel 1
+        3.0f, 4.0f,
 
-    constexpr unsigned int shape[]       = {batchSize, channels, height, width};
-    constexpr unsigned int tensorShape[] = {channels};
+        // Batch 0, Channel 2
+        5.0f, 6.0f,
 
-    inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
-    outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
-    tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
+        // Batch 1, Channel 0
+        19.0f, 20.0f,
 
-    auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
+        // Batch 1, Channel 1
+        21.0f, 22.0f,
 
-    auto mean     = MakeRandomTensor<float, 1>(tensorInfo, 123);
-    auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
-    auto beta     = MakeRandomTensor<float, 1>(tensorInfo, 123);
-    auto gamma    = MakeRandomTensor<float, 1>(tensorInfo, 345);
+        // Batch 1, Channel 2
+        23.0f, 24.0f
+    }));
 
-    LayerTestResult<float,4> ret(outputTensorInfo);
+    armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
+    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        7.0f,
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+        // Batch 0, Channel 1
+        9.0f,
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandleRef  = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
+        // Batch 0, Channel 2
+        11.0f,
 
-    armnn::BatchNormalizationQueueDescriptor data;
-    armnn::WorkloadInfo info;
-    armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
-    armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
-    armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
-    armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
+        // Batch 1, Channel 0
+        25.0f,
 
-    AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
-    AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
-    AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
-    AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
+        // Batch 1, Channel 1
+        27.0f,
 
-    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-    data.m_Mean             = &meanTensor;
-    data.m_Variance         = &varianceTensor;
-    data.m_Beta             = &betaTensor;
-    data.m_Gamma            = &gammaTensor;
-    data.m_Parameters.m_Eps = 0.01f;
+        // Batch 1, Channel 2
+        29.0f
+    }));
 
-    armnn::BatchNormalizationQueueDescriptor refData = data;
-    armnn::WorkloadInfo refInfo = info;
-    SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
-    SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
+    armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
+    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        13.0f, 14.0f, 50.0f,
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
-    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
+        // Batch 0, Channel 1
+        15.0f, 16.0f, 51.0f,
 
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-    inputHandleRef->Allocate();
-    outputHandleRef->Allocate();
+        // Batch 0, Channel 2
+        17.0f, 18.0f, 52.0f,
 
-    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-    CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
+        // Batch 1, Channel 0
+        31.0f, 32.0f, 53.0f,
 
-    workload->PostAllocationConfigure();
-    workload->Execute();
-    workloadRef->PostAllocationConfigure();
-    workloadRef->Execute();
-
-    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
-    CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
+        // Batch 1, Channel 1
+        33.0f, 34.0f, 54.0f,
 
-    return ret;
-}
+        // Batch 1, Channel 2
+        35.0f, 36.0f, 55.0f,
+    }));
 
-template<typename T>
-void PermuteTensorData(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        const armnn::PermutationVector& mappings,
-        armnn::TensorInfo & inputTensorInfo,
-        const T * inputData,
-        std::vector<T>& outputData)
-{
-    BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
-    if (inputData == nullptr)
-    {
-        // Nullptr is an error in the test. By returning without doing the concatenation
-        // I expect the caller to fail the test. It still makes sense to report this as
-        // an assert for Debug builds.
-        return;
-    }
+    armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
+    LayerTestResult<T, 3> result(outputTensorInfo);
 
-    armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory, memoryManager,
+                   { input0TensorInfo, input1TensorInfo, input2TensorInfo },
+                   { input0.data(), input1.data(), input2.data() },
+                   outputTensorInfo,
+                   output.data(),
+                   2,
+                   useSubtensor);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    result.output = MakeTensor<T, 3>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
 
-    armnn::PermuteQueueDescriptor queueDescriptor;
-    queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
-    armnn::WorkloadInfo workloadInfo;
-    AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
+        // Batch 0, Channel 1
+        3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
+        // Batch 0, Channel 2
+        5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
 
-    inputHandle->Allocate();
-    outputHandle->Allocate();
+        // Batch 1, Channel 0
+        19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
 
-    CopyDataToITensorHandle(inputHandle.get(), inputData);
+        // Batch 1, Channel 1
+        21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
 
-    workload->PostAllocationConfigure();
-    workload->Execute();
+        // Batch 1, Channel 2
+        23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
+    }));
 
-    outputData.resize(outputTensorInfo.GetNumElements());
-    CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
-    inputTensorInfo = outputTensorInfo;
+    return result;
 }
 
-armnn::OriginsDescriptor CreateDescriptorForConcatenation(
-        const std::vector<armnn::TensorInfo> & inputTensorInfos,
-        unsigned int concatDim)
+LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    bool useSubtensor)
 {
-    std::vector<armnn::TensorShape> shapes;
-    shapes.reserve(inputTensorInfos.size());
-    for (const armnn::TensorInfo& it: inputTensorInfos)
-    {
-        shapes.push_back(it.GetShape());
-    }
-
-    return armnn::CreateDescriptorForConcatenation(shapes.begin(),
-                                                   shapes.end(),
-                                                   concatDim);
+    return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
+        workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
 }
 
-//
-// Concatenation is only supported for N and C dimensions for NCHW and the inner most dimension
-// In case of <4 dimensions we need to make sure that the concat dimensions are at least
-// the 3rd slowest iterating one or the inner most dimension.
-//
-
-bool NeedPermuteForConcat(
-        const std::vector<armnn::TensorInfo> & inputTensorInfos,
-        unsigned int concatDim)
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> Concatenation4dTestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::TensorInfo& outputTensorInfo,
+    unsigned int dimension,
+    bool useSubtensor,
+    float qScale,
+    int32_t qOffset)
 {
-    // See note above. Additionally we expect the input shapes to have the
-    // same number of dimensions.
-    unsigned int nDimensions = 0;
+    armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
 
-    // Determine the number of dimensions as well as sanity check them
-    // agains test implementation issues.
-    for (auto && tensorInfo : inputTensorInfos)
-    {
-        if (!nDimensions)
-        {
-            nDimensions = tensorInfo.GetShape().GetNumDimensions();
-        }
-        else
-        {
-            BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
-                "Input shapes must have the same number of dimensions");
-        }
-    }
+    auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        1.0f, 2.0f,
+        3.0f, 4.0f,
+        5.0f, 6.0f,
+        7.0f, 8.0f,
+        9.0f, 10.0f,
+        11.0f, 12.0f
+    }));
 
-    return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
-}
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        11.0f, 12.0f,
+        13.0f, 14.0f,
+        15.0f, 16.0f,
+        17.0f, 18.0f,
+        19.0f, 20.0f,
+        21.0f, 22.0f
+    }));
 
-armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
-{
-    unsigned int numDims = inputShape.GetNumDimensions();
-    if (numDims >= 3)
-    {
-        // Nothing to do if the inputShape has at least 3 dimensions.
-        return inputShape;
-    }
+    auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        21.0f, 22.0f,
+        23.0f, 24.0f,
+        25.0f, 26.0f,
+        27.0f, 28.0f,
+        29.0f, 30.0f,
+        31.0f, 32.0f
+    }));
 
-    std::vector<unsigned int> newDims(size_t(3), 1u);
-    unsigned int expandedBy = 3 - numDims;
-    for (unsigned int i=0; i<numDims; ++i)
-    {
-        newDims[expandedBy+i] = inputShape[i];
-    }
-    return armnn::TensorShape(3u, &newDims[0]);
-}
+    LayerTestResult<T, 4> result(outputTensorInfo);
 
-void Generate3dPermuteVectorForConcat(
-        unsigned int numDimensions,
-        unsigned int & concatDim,
-        std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
-{
-    BOOST_ASSERT_MSG(numDimensions <= 3,
-       "Only dimensions 1,2 and 3 are supported by this helper");
-    unsigned int expandedBy = 3 - numDimensions;
-    unsigned int expandedConcatAxis = concatDim + expandedBy;
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
 
-    if (expandedConcatAxis == 2)
-    {
-        concatDim = 0;
-        armnn::PermutationVector forwardPermutation({1, 2, 0});
-        armnn::PermutationVector reversePermutation({2, 0, 1});
-        permutations = std::make_pair(forwardPermutation, reversePermutation);
-    }
-    else if (expandedConcatAxis == 1)
-    {
-        concatDim = 0;
-        armnn::PermutationVector forwardPermutation({2, 0, 1});
-        armnn::PermutationVector reversePermutation({1, 2, 0});
-        permutations = std::make_pair(forwardPermutation, reversePermutation);
-    }
-    else
-    {
-        BOOST_ASSERT(expandedConcatAxis == 0);
-        concatDim = 0;
-    }
+    Concatenate<T>(workloadFactory,
+                   memoryManager,
+                   {inputTensorInfo, inputTensorInfo, inputTensorInfo},
+                   {input0.data(), input1.data(), input2.data()},
+                   outputTensorInfo,
+                   output.data(),
+                   dimension,
+                   useSubtensor);
+
+    result.output = MakeTensor<T, 4>(outputTensorInfo, output);
+    return result;
 }
 
-//
-// Permute the input tensors so we can do a supported concatenation.
-// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
-// at the front. Finally this function tells what the output shape
-// of the permuted concatenated tensor is going to be.
-//
-template <typename T>
-void PermuteInputsForConcat(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        std::vector<armnn::TensorInfo> & inputTensorInfos,
-        std::vector<T *> & inputData,
-        std::vector<std::vector<T>> & inputDataStorage,
-        armnn::PermutationVector & permuteVector,
-        unsigned int & concatDim,
-        armnn::TensorInfo & outputTensorInfo)
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
 {
-    BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
-        "Expecting more than one tensor to be concatenated here");
-
-    unsigned int numDims = 0;
-    unsigned int nthInput = 0;
-    const armnn::PermutationVector identity({0, 1, 2});
+    armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
 
-    std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
-        std::make_pair(identity, identity);
+    LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
+        workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
 
-    inputDataStorage.resize(inputData.size());
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        1.0f, 2.0f,
+        3.0f, 4.0f,
+        5.0f, 6.0f,
+        7.0f, 8.0f,
+        9.0f, 10.0f,
+        11.0f, 12.0f,
 
-    for (auto && tensorInfo : inputTensorInfos)
-    {
-        if (numDims == 0)
-        {
-            numDims = tensorInfo.GetShape().GetNumDimensions();
-            Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
+        11.0f, 12.0f,
+        13.0f, 14.0f,
+        15.0f, 16.0f,
+        17.0f, 18.0f,
+        19.0f, 20.0f,
+        21.0f, 22.0f,
 
-            // Store the reverese permutation.
-            permuteVector = permutations.second;
-            BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
-                "Test logic error, we don't need permutation, so we shouldn't arrive here");
-        }
-        else
-        {
-            BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
-                "All inputs must have the same number of dimensions");
-        }
+        21.0f, 22.0f,
+        23.0f, 24.0f,
+        25.0f, 26.0f,
+        27.0f, 28.0f,
+        29.0f, 30.0f,
+        31.0f, 32.0f
+    }));
+    return result;
+}
 
-        armnn::TensorInfo newTensorInfo = tensorInfo;
-        newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
+LayerTestResult<float, 4> Concatenation4dDim0Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+}
 
-        PermuteTensorData<T>(workloadFactory,
-                             memoryManager,
-                             permutations.first,
-                             newTensorInfo,
-                             inputData[nthInput],
-                             inputDataStorage[nthInput]);
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
+{
+    armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
 
-        inputData[nthInput] = inputDataStorage[nthInput].data();
-        inputTensorInfos[nthInput] = newTensorInfo;
+    LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
+        workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
 
-        ++nthInput;
-    }
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        1.0f, 2.0f,
+        3.0f, 4.0f,
+        5.0f, 6.0f,
+        7.0f, 8.0f,
+        9.0f, 10.0f,
+        11.0f, 12.0f,
 
-    outputTensorInfo.SetShape(
-        armnnUtils::Permuted(
-            ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
-            permutations.first));
+        11.0f, 12.0f,
+        13.0f, 14.0f,
+        15.0f, 16.0f,
+        17.0f, 18.0f,
+        19.0f, 20.0f,
+        21.0f, 22.0f,
+
+        21.0f, 22.0f,
+        23.0f, 24.0f,
+        25.0f, 26.0f,
+        27.0f, 28.0f,
+        29.0f, 30.0f,
+        31.0f, 32.0f
+    }));
+
+    return result;
 }
 
+LayerTestResult<float, 4> Concatenation4dDim1Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+}
 
-//
-// This is the pair of PermuteInputsForConcat(...) which permutes back
-// the output of the concatenation so we can check it against an expected
-// output.
-//
-template <typename T>
-void PermuteOutputForConcat(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        const armnn::TensorInfo & tensorInfo,
-        const armnn::PermutationVector & permuteVector,
-        std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
-        T * data)
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
 {
-    BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
-    if (data == nullptr)
-    {
-        // Nullptr is an error in the test. By returning without doing the permutation
-        // I expect the caller to fail the test. It still makes sense to report this as
-        // an assert for Debug builds.
-        return;
-    }
+    armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
 
-    armnn::TensorInfo resultTensorInfo = tensorInfo;
-    std::vector<T> inputData(tensorInfo.GetNumElements());
-    std::vector<T> outputData;
+    LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
+        workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
 
-    CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        1.0f, 2.0f,
+        3.0f, 4.0f,
+        11.0f, 12.0f,
+        13.0f, 14.0f,
+        21.0f, 22.0f,
+        23.0f, 24.0f,
 
-    PermuteTensorData<T>(workloadFactory,
-                         memoryManager,
-                         permuteVector,
-                         resultTensorInfo,
-                         &inputData[0],
-                         outputData);
+        5.0f, 6.0f,
+        7.0f, 8.0f,
+        15.0f, 16.0f,
+        17.0f, 18.0f,
+        25.0f, 26.0f,
+        27.0f, 28.0f,
 
-    ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
+        9.0f, 10.0f,
+        11.0f, 12.0f,
+        19.0f, 20.0f,
+        21.0f, 22.0f,
+        29.0f, 30.0f,
+        31.0f, 32.0f
+    }));
+
+    return result;
 }
 
-template <typename T>
-void Concatenate(
+LayerTestResult<float, 4> Concatenation4dDim2Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
-    std::initializer_list<T *> inputsOrig,
-    const armnn::TensorInfo& outputTensorInfoOrig,
-    T * output,
-    unsigned int concatDim,
+    float qScale,
+    int32_t qOffset,
     bool useSubtensor)
 {
-    BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
-    if (output == nullptr)
-    {
-        // Nullptr is an error in the test. By returning without doing the permutation
-        // I expect the caller to fail the test. It still makes sense to report this as
-        // an assert for Debug builds.
-        return;
-    }
-
-    // Saves a copy of the parameters which we might need to change.
-    std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
-    std::vector<T *> inputs            = inputsOrig;
-    armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
-
-    armnn::PermutationVector permuteVector{0, 1, 2};
-
-    // Holds and automatically releases memory for the reshaped input data.
-    std::vector<std::vector<T>> tmpInputDataStorage;
+    armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
 
-    const size_t inputCount = inputTensorInfos.size();
+    LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
+        workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
 
-    bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        1.0f, 2.0f,
+        11.0f, 12.0f,
+        21.0f, 22.0f,
+        3.0f, 4.0f,
+        13.0f, 14.0f,
+        23.0f, 24.0f,
 
-    if (needPermuteForConcat)
-    {
-        //
-        // We need to permute the inputs, because concatenation along
-        // the requested axis is not supported.
-        //
-        PermuteInputsForConcat<T>(workloadFactory,
-                                  memoryManager,
-                                  inputTensorInfos,
-                                  inputs,
-                                  tmpInputDataStorage,
-                                  permuteVector,
-                                  concatDim,
-                                  outputTensorInfo);
-    }
+        5.0f, 6.0f,
+        15.0f, 16.0f,
+        25.0f, 26.0f,
+        7.0f, 8.0f,
+        17.0f, 18.0f,
+        27.0f, 28.0f,
 
-    armnn::WorkloadInfo workloadInfo;
+        9.0f, 10.0f,
+        19.0f, 20.0f,
+        29.0f, 30.0f,
+        11.0f, 12.0f,
+        21.0f, 22.0f,
+        31.0f, 32.0f
+    }));
 
-    std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
-    inputHandles.reserve(inputCount);
+    return result;
+}
 
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+LayerTestResult<float, 4> Concatenation4dDim3Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    bool useSubtensor)
+{
+    return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
+        workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
+}
 
-    armnn::ConcatQueueDescriptor queueDescriptor;
-    armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcatenation(inputTensorInfos, concatDim);
-    queueDescriptor.m_Parameters = viewsDescriptor;
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
+{
+    unsigned int dimension = 0;
+    armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
 
-    if (useSubtensor)
-    {
-        queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
-        for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
-        {
-            queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
-                viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
-        }
+    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
+        1.0f, 2.0f,
+        3.0f, 4.0f,
+        5.0f, 6.0f,
+        7.0f, 8.0f,
+        9.0f, 10.0f,
+        11.0f, 12.0f
+    }));
 
-        outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
 
-        const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
-        for (unsigned int i = 0; i < inputCount; ++i)
-        {
-            const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
-            std::unique_ptr<armnn::ITensorHandle> inputHandle =
-                subTensorsSupported ?
-                    workloadFactory.CreateSubTensorHandle(*outputHandle,
-                                                          inputTensorInfo.GetShape(),
-                                                          queueDescriptor.m_ViewOrigins[i].m_Origin.data()) :
-                    workloadFactory.CreateTensorHandle(inputTensorInfo);
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
+        11.0f, 12.0f,
+        13.0f, 14.0f,
+        15.0f, 16.0f,
+        17.0f, 18.0f,
+        19.0f, 20.0f,
+        21.0f, 22.0f,
 
-            inputHandles.emplace_back(std::move(inputHandle));
-        }
+        21.0f, 22.0f,
+        23.0f, 24.0f,
+        25.0f, 26.0f,
+        27.0f, 28.0f,
+        29.0f, 30.0f,
+        31.0f, 32.0f
 
-    }
-    else
-    {
-        for (unsigned int i = 0; i < inputCount; ++i)
-        {
-            std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
-            inputHandles.emplace_back(std::move(inputHandle));
-        }
-    }
+    }));
 
-    for (unsigned int i = 0; i < inputCount; ++i)
-    {
-        AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
-    }
+    armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
 
-    AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
+    LayerTestResult<T, 4> result(outputTensorInfo);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory,
+                   memoryManager,
+                   {inputTensorInfo0, inputTensorInfo1},
+                   {input0.data(), input1.data()},
+                   outputTensorInfo,
+                   output.data(),
+                   dimension,
+                   true);
 
-    for (auto& inputHandle : inputHandles)
-    {
-        inputHandle->Allocate();
-    }
+    result.output = MakeTensor<T, 4>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        1.0f, 2.0f,
+        3.0f, 4.0f,
+        5.0f, 6.0f,
+        7.0f, 8.0f,
+        9.0f, 10.0f,
+        11.0f, 12.0f,
 
-    outputHandle->Allocate();
+        11.0f, 12.0f,
+        13.0f, 14.0f,
+        15.0f, 16.0f,
+        17.0f, 18.0f,
+        19.0f, 20.0f,
+        21.0f, 22.0f,
 
-    unsigned int nextInputId = 0;
-    for (auto& inputHandle : inputHandles)
-    {
-        CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
-        ++nextInputId;
-    }
+        21.0f, 22.0f,
+        23.0f, 24.0f,
+        25.0f, 26.0f,
+        27.0f, 28.0f,
+        29.0f, 30.0f,
+        31.0f, 32.0f
+    }));
 
-    workload->PostAllocationConfigure();
-    workload->Execute();
+    return result;
+}
 
-    if (needPermuteForConcat)
-    {
-        PermuteOutputForConcat<T>(workloadFactory,
-                                  memoryManager,
-                                  outputTensorInfo,
-                                  permuteVector,
-                                  std::move(outputHandle),
-                                  output);
-    }
-    else
-    {
-        CopyDataFromITensorHandle(output, outputHandle.get());
-    }
+LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
+        workloadFactory, memoryManager, 0.0f, 0);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 1> Concatenation1dTestImpl(
+LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
+    unsigned int dimension = 1;
+    armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
 
-    auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
-    auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
-    auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
+    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
+        1.0f, 2.0f,
+        3.0f, 4.0f,
+        5.0f, 6.0f,
+        7.0f, 8.0f,
+        9.0f, 10.0f,
+        11.0f, 12.0f
+    }));
 
-    armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
+    armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
 
-    LayerTestResult<T, 1> result(outputTensorInfo);
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
+        11.0f, 12.0f,
+        13.0f, 14.0f,
+        15.0f, 16.0f,
+        17.0f, 18.0f,
+
+    }));
+
+    armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
+
+    LayerTestResult<T, 4> result(outputTensorInfo);
 
     std::vector<T> output;
     output.resize(outputTensorInfo.GetNumElements());
-    Concatenate<T>(workloadFactory, memoryManager,
-                   { inputTensorInfo, inputTensorInfo, inputTensorInfo },
-                   { input0.data(), input1.data(), input2.data() },
+    Concatenate<T>(workloadFactory,
+                   memoryManager,
+                   {inputTensorInfo0, inputTensorInfo1},
+                   {input0.data(), input1.data()},
                    outputTensorInfo,
                    output.data(),
-                   0,
+                   dimension,
                    true);
 
-    result.output = MakeTensor<T, 1>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
+    result.output = MakeTensor<T, 4>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        1.0f, 2.0f,
+        3.0f, 4.0f,
+        5.0f, 6.0f,
+        7.0f, 8.0f,
+        9.0f, 10.0f,
+        11.0f, 12.0f,
+        11.0f, 12.0f,
+        13.0f, 14.0f,
+        15.0f, 16.0f,
+        17.0f, 18.0f
     }));
 
     return result;
 }
 
-LayerTestResult<float, 1> Concatenation1dTest(
+LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
+        workloadFactory, memoryManager, 0.0f, 0);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 2> Concatenation2dTestImpl(
+LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::TensorInfo& outputTensorInfo,
-    unsigned int dimension,
-    const float qScale,
-    const int32_t qOffset)
+    float qScale,
+    int32_t qOffset)
 {
-    armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
-
-    auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f,
+    unsigned int dimension = 2;
+    armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f,
+    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
+        1.0f, 2.0f,
+        3.0f, 4.0f,
+        5.0f, 6.0f,
+        7.0f, 8.0f,
+        9.0f, 10.0f,
+        11.0f, 12.0f
     }));
 
-    auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        4.0f, 5.0f, 6.0f,
+    armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
 
-        // Batch 1
-        13.0f, 14.0f, 15.0f,
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
+        11.0f, 12.0f,
+        13.0f, 14.0f,
+        15.0f, 16.0f,
+        17.0f, 18.0f,
+        19.0f, 20.0f,
+        21.0f, 22.0f,
+        23.0f, 24.0f,
+        25.0f, 26.0f,
+        27.0f, 28.0f
     }));
 
-    auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        7.0f, 8.0f, 9.0f,
-
-        // Batch 1
-        16.0f, 17.0f, 18.0f,
-    }));
+    armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
 
-    LayerTestResult<T, 2> result(outputTensorInfo);
+    LayerTestResult<T, 4> result(outputTensorInfo);
 
     std::vector<T> output;
     output.resize(outputTensorInfo.GetNumElements());
-    Concatenate<T>(workloadFactory, memoryManager,
-                   { inputTensorInfo, inputTensorInfo, inputTensorInfo },
-                   { input0.data(), input1.data(), input2.data() },
+    Concatenate<T>(workloadFactory,
+                   memoryManager,
+                   {inputTensorInfo0, inputTensorInfo1},
+                   {input0.data(), input1.data()},
                    outputTensorInfo,
                    output.data(),
                    dimension,
                    true);
 
-    result.output = MakeTensor<T, 2>(outputTensorInfo, output);
-    return result;
-}
+    result.output = MakeTensor<T, 4>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        1.0f, 2.0f,
+        3.0f, 4.0f,
+        11.0f, 12.0f,
+        13.0f, 14.0f,
+        15.0f, 16.0f,
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
-{
-    armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
-
-    LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
-        workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
-
-    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f,
-
-        // Batch 1
-        10.0f, 11.0f, 12.0f,
-
-        // Batch 2
-        4.0f, 5.0f, 6.0f,
-
-        // Batch 3
-        13.0f, 14.0f, 15.0f,
-
-        // Batch 4
-        7.0f, 8.0f, 9.0f,
+        5.0f, 6.0f,
+        7.0f, 8.0f,
+        17.0f, 18.0f,
+        19.0f, 20.0f,
+        21.0f, 22.0f,
 
-        // Batch 5
-        16.0f, 17.0f, 18.0f,
+        9.0f, 10.0f,
+        11.0f, 12.0f,
+        23.0f, 24.0f,
+        25.0f, 26.0f,
+        27.0f, 28.0f
     }));
 
     return result;
 }
 
-LayerTestResult<float, 2> Concatenation2dDim0Test(
+LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
+        workloadFactory, memoryManager, 0.0f, 0);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
+LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
-    int32_t qOffset)
+    int32_t qOffset,
+    bool useSubtensor)
 {
-    armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
-
-    LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
-        workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
-
-    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
+    unsigned int dimension = 3;
+    armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
+    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
+        1.0f, 2.0f,
+        3.0f, 4.0f,
+        5.0f, 6.0f,
+        7.0f, 8.0f,
+        9.0f, 10.0f,
+        11.0f, 12.0f
     }));
 
-    return result;
-}
-
-LayerTestResult<float, 2> Concatenation2dDim1Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
-{
-    armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
-    auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f,
-
-        // Batch 1
-        10.0f, 11.0f, 12.0f,
-    }));
+    armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
 
-    armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
-    auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        4.0f, 5.0f, 6.0f,
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
+        11.0f, 12.0f, 13.0f,
+        14.0f, 15.0f, 16.0f,
 
-        // Batch 1
-        13.0f, 14.0f, 15.0f,
+        17.0f, 18.0f, 19.0f,
+        20.0f, 21.0f, 22.0f,
 
-        // Batch 0
-        7.0f, 8.0f, 9.0f,
+        23.0f, 24.0f, 25.0f,
+        26.0f, 27.0f, 28.0f
     }));
 
-    armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
-    auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 1
-        16.0f, 17.0f, 18.0f,
-    }));
+    armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
 
-    armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
-    LayerTestResult<T, 2> result(outputTensorInfo);
+    LayerTestResult<T, 4> result(outputTensorInfo);
 
     std::vector<T> output;
     output.resize(outputTensorInfo.GetNumElements());
-    Concatenate<T>(workloadFactory, memoryManager,
-                   { input0TensorInfo, input1TensorInfo, input2TensorInfo },
-                   { input0.data(), input1.data(), input2.data() },
+    Concatenate<T>(workloadFactory,
+                   memoryManager,
+                   {inputTensorInfo0, inputTensorInfo1},
+                   {input0.data(), input1.data()},
                    outputTensorInfo,
                    output.data(),
-                   0,
-                   true);
-
-    result.output = MakeTensor<T, 2>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f,
-
-        // Batch 1
-        10.0f, 11.0f, 12.0f,
-
-        // Batch 2
-        4.0f, 5.0f, 6.0f,
-
-        // Batch 3
-        13.0f, 14.0f, 15.0f,
-
-        // Batch 4
-        7.0f, 8.0f, 9.0f,
+                   dimension,
+                   useSubtensor);
 
-        // Batch 5
-        16.0f, 17.0f, 18.0f,
+    result.output = MakeTensor<T, 4>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
+        3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
+        5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
+        7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
+        9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
+        11.0f, 12.0f, 26.0f, 27.0f, 28.0f
     }));
 
     return result;
 }
 
-LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(
+LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    bool useSubtensor)
 {
-    return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
-        workloadFactory, memoryManager, 0.0f, 0);
+    return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
+        workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
+LayerTestResult<float, 2> FakeQuantizationTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
-    auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f,
+    constexpr unsigned int width = 2;
+    constexpr unsigned int height = 3;
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f,
+    const armnn::TensorInfo tensorInfo({height, width },
+        armnn::DataType::Float32);
+    auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
+       -10.0f,  -5.0f,
+         0.0f,   5.0f,
+        10.0f,  10.0f
     }));
 
-    armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
-    auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
+    LayerTestResult<float, 2> ret(tensorInfo);
 
-        // Batch 1
-        13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
-    }));
+    std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(tensorInfo);
 
-    armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
-    auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        9.0f,
+    std::unique_ptr<armnn::ITensorHandle> outputHandle  = workloadFactory.CreateTensorHandle(tensorInfo);
 
-        // Batch 1
-        18.0f
-    }));
+    armnn::FakeQuantizationQueueDescriptor data;
+    armnn::WorkloadInfo info;
 
-    armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
-    LayerTestResult<T, 2> result(outputTensorInfo);
+    AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
+    float min = -10.f;
+    float max = 10.f;
 
-    std::vector<T> output;
-    output.resize(outputTensorInfo.GetNumElements());
-    Concatenate<T>(workloadFactory, memoryManager,
-                   { input0TensorInfo, input1TensorInfo, input2TensorInfo },
-                   { input0.data(), input1.data(), input2.data() },
-                   outputTensorInfo,
-                   output.data(),
-                   1,
-                   true);
+    data.m_Parameters.m_Min = min;
+    data.m_Parameters.m_Max = max;
 
-    result.output = MakeTensor<T, 2>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
+    armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
+    armnn::FakeQuantizationQueueDescriptor refData = data;
+    armnn::WorkloadInfo refInfo = info;
+    SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
-    }));
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
 
-    return result;
-}
+    inputHandle->Allocate();
+    outputHandle->Allocate();
 
-LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
-        workloadFactory, memoryManager, 0.0f, 0);
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
+
+    ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
+        0.0f,     63.0f,
+        128.0f,   191.0f,
+        255.0f,   255.0f
+    }));
+    return ret;
 }
 
+namespace
+{
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 3> Concatenation3dTestImpl(
+LayerTestResult<T, 4> L2NormalizationTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::TensorInfo& outputTensorInfo,
-    unsigned int dimension,
-    bool useSubtensor,
-    float qScale,
-    int32_t qOffset)
+    const armnn::TensorShape& inputOutputTensorShape,
+    float scale,
+    int32_t offset,
+    const std::vector<float>& inputValues,
+    float outScale,
+    int32_t outOffset,
+    const std::vector<float>& expectedOutputValues,
+    const armnn::DataLayout layout,
+    float epsilon = 1e-12f)
 {
-    armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
-
-    auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
-
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
-
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
-
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
-
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
-
-        // Batch 1, Channel 2
-        23.0f, 24.0f
-    }));
-
-    auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        7.0f, 8.0f,
-
-        // Batch 0, Channel 1
-        9.0f, 10.0f,
+    const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
+    const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
 
-        // Batch 0, Channel 2
-        11.0f, 12.0f,
+    // at this point if we require it permute the input data
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    std::vector<float> inputData = inputValues;
+    if (layout == armnn::DataLayout::NHWC)
+    {
+        std::vector<float> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
+        inputData = tmp;
+    }
 
-        // Batch 1, Channel 0
-        25.0f, 26.0f,
+    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
+                                                         inputTensorInfo.GetQuantizationScale(),
+                                                         inputTensorInfo.GetQuantizationOffset(),
+                                                         inputData));
 
-        // Batch 1, Channel 1
-        27.0f, 28.0f,
+    std::vector<float> expectedOutputData = expectedOutputValues;
+    if (layout == armnn::DataLayout::NHWC)
+    {
+        std::vector<float> tmp(expectedOutputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
+                            sizeof(float));
+        expectedOutputData = tmp;
+    }
 
-        // Batch 1, Channel 2
-        29.0f, 30.0f
-    }));
+    LayerTestResult<T, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+                                                               outputTensorInfo.GetQuantizationScale(),
+                                                               outputTensorInfo.GetQuantizationOffset(),
+                                                               expectedOutputData));
 
-    auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        13.0f, 14.0f,
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-        // Batch 0, Channel 1
-        15.0f, 16.0f,
+    armnn::L2NormalizationQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_Eps = epsilon;
+    descriptor.m_Parameters.m_DataLayout = layout;
+    armnn::WorkloadInfo info;
 
-        // Batch 0, Channel 2
-        17.0f, 18.0f,
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-        // Batch 1, Channel 0
-        31.0f, 32.0f,
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
 
-        // Batch 1, Channel 1
-        33.0f, 34.0f,
+    inputHandle->Allocate();
+    outputHandle->Allocate();
 
-        // Batch 1, Channel 2
-        35.0f, 36.0f
-    }));
+    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
 
-    LayerTestResult<T, 3> result(outputTensorInfo);
+    workload->PostAllocationConfigure();
+    ExecuteWorkload(*workload, memoryManager);
 
-    std::vector<T> output;
-    output.resize(outputTensorInfo.GetNumElements());
-    Concatenate<T>(workloadFactory, memoryManager,
-                   { inputTensorInfo, inputTensorInfo, inputTensorInfo },
-                   { input0.data(), input1.data(), input2.data() },
-                   outputTensorInfo,
-                   output.data(),
-                   dimension,
-                   useSubtensor);
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
 
-    result.output = MakeTensor<T, 3>(outputTensorInfo, output);
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
+float CalcInvL2Norm(std::initializer_list<float> elements)
+{
+    const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
+        [](float acc, float element) { return acc + element * element; });
+    return 1.0f / sqrtf(reduction);
+}
+
+} // anonymous namespace
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> Pad2dTestCommon(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
-    int32_t qOffset)
+    int32_t qOffset,
+    const float customPaddingValue)
 {
-    armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
-
-    LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
-        workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
-
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
-
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
-
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
+    const armnn::TensorShape inputShape{ 3, 3 };
+    const armnn::TensorShape outputShape{ 7, 7 };
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
+    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
+    std::vector<T> inputValues(
+    QuantizedVector<T>(qScale, qOffset,
+    {
+      // Height (3) x Width (3)
+      4, 8, 6,
+      7, 4, 4,
+      3, 2, 4
+    }));
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f,
+    auto p = customPaddingValue;
+    std::vector<T> expectedOutputValues;
+    expectedOutputValues = (
+    QuantizedVector<T>(qScale, qOffset,
+    {
+      p, p, p, p, p, p, p,
+      p, p, p, p, p, p, p,
+      p, p, 4, 8, 6, p, p,
+      p, p, 7, 4, 4, p, p,
+      p, p, 3, 2, 4, p, p,
+      p, p, p, p, p, p, p,
+      p, p, p, p, p, p, p
+    }));
 
-        // Batch 2, Channel 0
-        7.0f, 8.0f,
+    auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
 
-        // Batch 2, Channel 1
-        9.0f, 10.0f,
+    LayerTestResult<T, 2> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
 
-        // Batch 2, Channel 2
-        11.0f, 12.0f,
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-        // Batch 3, Channel 0
-        25.0f, 26.0f,
+    armnn::PadQueueDescriptor descriptor;
 
-        // Batch 3, Channel 1
-        27.0f, 28.0f,
+    std::vector<std::pair<unsigned int, unsigned int>> padList;
+    padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+    padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
 
-        // Batch 3, Channel 2
-        29.0f, 30.0f,
+    descriptor.m_Parameters.m_PadList = padList;
+    descriptor.m_Parameters.m_PadValue = customPaddingValue;
+    armnn::WorkloadInfo info;
 
-        // Batch 4, Channel 0
-        13.0f, 14.0f,
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-        // Batch 4, Channel 1
-        15.0f, 16.0f,
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
 
-        // Batch 4, Channel 2
-        17.0f, 18.0f,
+    inputHandle->Allocate();
+    outputHandle->Allocate();
 
-        // Batch 5, Channel 0
-        31.0f, 32.0f,
+    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
 
-        // Batch 5, Channel 1
-        33.0f, 34.0f,
+    workload->PostAllocationConfigure();
+    workload->Execute();
 
-        // Batch 5, Channel 2
-        35.0f, 36.0f
-    }));
+    CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
 
     return result;
 }
 
-LayerTestResult<float, 3> Concatenation3dDim0Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 3> Pad3dTestCommon(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
+    const armnn::TensorShape inputShape{ 2, 2, 2 };
+    const armnn::TensorShape outputShape{ 3, 5, 6 };
 
-    LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
-        workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
+    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
 
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
+    std::vector<T> inputValues(
+      QuantizedVector<T>(qScale,qOffset,
+    {
+        // Channel 0, Height (2) x Width (2)
+        0, 4,
+        2, 5,
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
+        // Channel 1, Height (2) x Width (2)
+        6, 1,
+        5, 2
+    }));
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
+    std::vector<T> expectedOutputValues(
+      QuantizedVector<T>(qScale,qOffset,
+    {
 
-        // Batch 0, Channel 3
-        7.0f, 8.0f,
+        0, 0, 0, 0, 0, 0,
+        0, 0, 0, 0, 0, 0,
+        0, 0, 0, 4, 0, 0,
+        0, 0, 2, 5, 0, 0,
+        0, 0, 0, 0, 0, 0,
 
-        // Batch 0, Channel 4
-        9.0f, 10.0f,
+        0, 0, 0, 0, 0, 0,
+        0, 0, 0, 0, 0, 0,
+        0, 0, 6, 1, 0, 0,
+        0, 0, 5, 2, 0, 0,
+        0, 0, 0, 0, 0, 0,
 
-        // Batch 0, Channel 5
-        11.0f, 12.0f,
+        0, 0, 0, 0, 0, 0,
+        0, 0, 0, 0, 0, 0,
+        0, 0, 0, 0, 0, 0,
+        0, 0, 0, 0, 0, 0,
+        0, 0, 0, 0, 0, 0
 
-        // Batch 0, Channel 6
-        13.0f, 14.0f,
+    }));
 
-        // Batch 0, Channel 7
-        15.0f, 16.0f,
+    auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
 
-        // Batch 0, Channel 8
-        17.0f, 18.0f,
+    LayerTestResult<T, 3> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
+    armnn::PadQueueDescriptor descriptor;
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f,
+    std::vector<std::pair<unsigned int, unsigned int>> PadList;
+    PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
+    PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
+    PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
 
-        // Batch 1, Channel 3
-        25.0f, 26.0f,
+    descriptor.m_Parameters.m_PadList = PadList;
+    armnn::WorkloadInfo info;
 
-        // Batch 1, Channel 4
-        27.0f, 28.0f,
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-        // Batch 1, Channel 5
-        29.0f, 30.0f,
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
 
-        // Batch 1, Channel 6
-        31.0f, 32.0f,
+    inputHandle->Allocate();
+    outputHandle->Allocate();
 
-        // Batch 1, Channel 7
-        33.0f, 34.0f,
+    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
 
-        // Batch 1, Channel 8
-        35.0f, 36.0f
-    }));
+    workload->PostAllocationConfigure();
+    workload->Execute();
 
-    return result;
-}
+    CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
 
-LayerTestResult<float, 3> Concatenation3dDim1Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> Pad4dTestCommon(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    bool useSubtensor,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
-
-    LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
-        workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
-
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
+    const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
+    const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
+    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
+    std::vector<T> inputValues(
+      QuantizedVector<T>(qScale,qOffset,
+    {
+        // Batch 0, Channel 0, Height (3) x Width (2)
+        0, 1,
+        2, 3,
+        4, 5,
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
+        // Batch 0, Channel 1, Height (3) x Width (2)
+        6, 7,
+        8, 9,
+        10, 11,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
+        // Batch 1, Channel 0, Height (3) x Width (2)
+        12, 13,
+        14, 15,
+        16, 17,
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
+        // Batch 1, Channel 1, Height (3) x Width (2)
+        18, 19,
+        20, 21,
+        22, 23
     }));
 
-    return result;
-}
+    std::vector<T> expectedOutputValues(
+      QuantizedVector<T>(qScale,qOffset,
+    {
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
 
-LayerTestResult<float, 3> Concatenation3dDim2Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    bool useSubtensor)
-{
-    return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
-        workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
-}
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
-{
-    armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
-    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-            // Batch 0, Channel 0
-            1.0f, 2.0f,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
 
-            // Batch 0, Channel 1
-            3.0f, 4.0f,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
 
-            // Batch 0, Channel 2
-            5.0f, 6.0f,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
 
-            // Batch 1, Channel 0
-            19.0f, 20.0f,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
 
-            // Batch 1, Channel 1
-            21.0f, 22.0f,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
 
-            // Batch 1, Channel 2
-            23.0f, 24.0f
-    }));
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 1, 0,
+        0, 2, 3, 0,
+        0, 4, 5, 0,
+        0, 0, 0, 0,
 
-    armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
-    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-            // Batch 0, Channel 0
-            7.0f, 8.0f,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 6, 7, 0,
+        0, 8, 9, 0,
+        0, 10, 11, 0,
+        0, 0, 0, 0,
 
-            // Batch 0, Channel 1
-            9.0f, 10.0f,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
 
-            // Batch 0, Channel 2
-            11.0f, 12.0f,
-    }));
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
 
-    armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
-    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-            // Batch 0, Channel 0
-            25.0f, 26.0f,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
 
-            // Batch 0, Channel 1
-            27.0f, 28.0f,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 12, 13, 0,
+        0, 14, 15, 0,
+        0, 16, 17, 0,
+        0, 0, 0, 0,
 
-            // Batch 0, Channel 2
-            29.0f, 30.0f,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 18, 19, 0,
+        0, 20, 21, 0,
+        0, 22, 23, 0,
+        0, 0, 0, 0,
 
-            // Batch 1, Channel 0
-            13.0f, 14.0f,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
 
-            // Batch 1, Channel 1
-            15.0f, 16.0f,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
 
-            // Batch 1, Channel 2
-            17.0f, 18.0f,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
 
-            // Batch 2, Channel 0
-            31.0f, 32.0f,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
 
-            // Batch 2, Channel 1
-            33.0f, 34.0f,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
 
-            // Batch 2, Channel 2
-            35.0f, 36.0f
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0
     }));
 
-    armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
-    LayerTestResult<T, 3> result(outputTensorInfo);
-
-    std::vector<T> output;
-    output.resize(outputTensorInfo.GetNumElements());
-    Concatenate<T>(workloadFactory, memoryManager,
-                   { input0TensorInfo, input1TensorInfo, input2TensorInfo },
-                   { input0.data(), input1.data(), input2.data() },
-                   outputTensorInfo,
-                   output.data(),
-                   0,
-                   true);
-
-    result.output = MakeTensor<T, 3>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
-
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
-
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
-
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
-
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
-
-        // Batch 1, Channel 2
-        23.0f, 24.0f,
-
-        // Batch 2, Channel 0
-        7.0f, 8.0f,
+    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
 
-        // Batch 2, Channel 1
-        9.0f, 10.0f,
+    LayerTestResult<T, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
 
-        // Batch 2, Channel 2
-        11.0f, 12.0f,
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-        // Batch 3, Channel 0
-        25.0f, 26.0f,
+    armnn::PadQueueDescriptor descriptor;
 
-        // Batch 3, Channel 1
-        27.0f, 28.0f,
+    std::vector<std::pair<unsigned int, unsigned int>> PadList;
+    PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+    PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
+    PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
+    PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
 
-        // Batch 3, Channel 2
-        29.0f, 30.0f,
+    descriptor.m_Parameters.m_PadList = PadList;
+    armnn::WorkloadInfo info;
 
-        // Batch 4, Channel 0
-        13.0f, 14.0f,
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-        // Batch 4, Channel 1
-        15.0f, 16.0f,
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
 
-        // Batch 4, Channel 2
-        17.0f, 18.0f,
+    inputHandle->Allocate();
+    outputHandle->Allocate();
 
-        // Batch 5, Channel 0
-        31.0f, 32.0f,
+    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
 
-        // Batch 5, Channel 1
-        33.0f, 34.0f,
+    workload->PostAllocationConfigure();
+    workload->Execute();
 
-        // Batch 5, Channel 2
-        35.0f, 36.0f
-    }));
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
 
     return result;
 }
 
-LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(
+LayerTestResult<uint8_t, 2> PadUint82dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
-        workloadFactory, memoryManager, 0.0f, 0);
+    return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
+LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
-    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
+    return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
+}
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
+LayerTestResult<uint8_t, 3> PadUint83dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+}
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
+LayerTestResult<uint8_t, 4> PadUint84dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+}
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
+template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
+Pad2dTestCommon<armnn::DataType::QuantisedSymm16>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset,
+    const float customPaddingValue);
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f
-    }));
+template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
+Pad3dTestCommon<armnn::DataType::QuantisedSymm16>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset);
 
-    armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
-    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        7.0f, 8.0f,
+template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
+Pad4dTestCommon<armnn::DataType::QuantisedSymm16>(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset);
 
-        // Batch 0, Channel 1
-        9.0f, 10.0f,
+LayerTestResult<float, 2> PadFloat322dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+}
 
-        // Batch 0, Channel 2
-        11.0f, 12.0f,
+LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, 1.0f);
+}
 
-        // Batch 0, Channel 3
-        25.0f, 26.0f,
+LayerTestResult<float, 3> PadFloat323dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+}
 
-        // Batch 1, Channel 0
-        27.0f, 28.0f,
+LayerTestResult<float, 4> PadFloat324dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+}
 
-        // Batch 1, Channel 1
-        29.0f, 30.0f,
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        float scale,
+        int32_t offset,
+        float outScale,
+        int32_t outOffset,
+        const armnn::DataLayout layout,
+        float epsilon)
+{
+    // Width: 1
+    // Height: 1
+    // Channels: 3
+    // BatchSize: 1
+    unsigned int numberOfBatches = 1;
+    unsigned int numberOfChannels = 3;
+    unsigned int height = 1;
+    unsigned int width = 1;
 
-        // Batch 1, Channel 2
-        13.0f, 14.0f,
+    const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
+            numberOfBatches, numberOfChannels, height, width, layout);
 
-        // Batch 1, Channel 3
-        15.0f, 16.0f,
-    }));
+    // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
+    std::vector<float> inputValues
+    {
+        // Batch 0, Channel 0, Height (1) x Width (1)
+        0.00000001f,
 
-    armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
-    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        17.0f, 18.0f,
+        // Batch 0, Channel 1, Height (1) x Width (1)
+        0.00000002f,
 
-        // Batch 1, Channel 0
-        31.0f, 32.0f,
-    }));
+        // Batch 0, Channel 2, Height (1) x Width (1)
+        0.00000003f,
+    };
 
-    armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
-    LayerTestResult<T, 3> result(outputTensorInfo);
+    const float approxInvL2Norm = 1.f / sqrtf(epsilon);
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Channel 0, Height (1) x Width (1)
+        0.00000001f * approxInvL2Norm,
+        0.00000002f * approxInvL2Norm,
+        0.00000003f * approxInvL2Norm,
+    };
 
-    std::vector<T> output;
-    output.resize(outputTensorInfo.GetNumElements());
-    Concatenate<T>(workloadFactory, memoryManager,
-                   { input0TensorInfo, input1TensorInfo, input2TensorInfo },
-                   { input0.data(), input1.data(), input2.data() },
-                   outputTensorInfo,
-                   output.data(),
-                   1,
-                   true);
+    return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
+                                              inputValues, outScale, outOffset, expectedOutputValues, layout,
+                                              epsilon);
+}
 
-    result.output = MakeTensor<T, 3>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> L2Normalization1dTestCommon(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        float scale,
+        int32_t offset,
+        float outScale,
+        int32_t outOffset,
+        const armnn::DataLayout layout)
+{
+    // Width: 1
+    // Height: 1
+    // Channels: 10
+    // BatchSize: 1
+    unsigned int numberOfBatches = 1;
+    unsigned int numberOfChannels = 10;
+    unsigned int height = 1;
+    unsigned int width = 1;
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
 
-        // Batch 0, Channel 3
-        7.0f, 8.0f,
+    const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
+            numberOfBatches, numberOfChannels, height, width, layout);
+    std::vector<float> inputValues
+    {
+        // Batch 0, Channel 0, Height (1) x Width (1)
+        1.0f,
 
-        // Batch 0, Channel 4
-        9.0f, 10.0f,
+        // Batch 0, Channel 1, Height (1) x Width (1)
+        2.0f,
 
-        // Batch 0, Channel 5
-        11.0f, 12.0f,
+        // Batch 0, Channel 2, Height (1) x Width (1)
+        3.0f,
 
-        // Batch 0, Channel 6
-        25.0f, 26.0f,
+        // Batch 0, Channel 3, Height (1) x Width (1)
+        4.0f,
 
-        // Batch 0, Channel 7
-        17.0f, 18.0f,
+        // Batch 0, Channel 4, Height (1) x Width (1)
+        5.0f,
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
+        // Batch 0, Channel 5, Height (1) x Width (1)
+        6.0f,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
+        // Batch 0, Channel 6, Height (1) x Width (1)
+        7.0f,
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f,
+        // Batch 0, Channel 7, Height (1) x Width (1)
+        8.0f,
 
-        // Batch 1, Channel 3
-        27.0f, 28.0f,
+        // Batch 0, Channel 8, Height (1) x Width (1)
+        9.0f,
 
-        // Batch 1, Channel 4
-        29.0f, 30.0f,
+        // Batch 0, Channel 9, Height (1) x Width (1)
+        10.0f
+    };
+    const float approxInvL2Norm = 0.050964719f;
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Channel 0, Height (1) x Width (1)
+        1.0f * approxInvL2Norm,
+        2.0f * approxInvL2Norm,
+        3.0f * approxInvL2Norm,
+        4.0f * approxInvL2Norm,
+        5.0f * approxInvL2Norm,
+        6.0f * approxInvL2Norm,
+        7.0f * approxInvL2Norm,
+        8.0f * approxInvL2Norm,
+        9.0f * approxInvL2Norm,
+        10.0f * approxInvL2Norm
+    };
 
-        // Batch 1, Channel 5
-        13.0f, 14.0f,
 
-        // Batch 1, Channel 6
-        15.0f, 16.0f,
+    return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
+                                              inputValues, outScale, outOffset, expectedOutputValues, layout);
+}
 
-        // Batch 1, Channel 7
-        31.0f, 32.0f,
-    }));
+LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout layout)
+{
+    // Dummy descriptor to get the default value of epsilon.
+    armnn::L2NormalizationDescriptor descriptor;
 
-    return result;
+    return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
+                                                                      layout, descriptor.m_Eps);
 }
 
-LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(
+LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout layout)
+{
+    return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
+                                                                      layout, 1e-9f);
+}
+
+LayerTestResult<float, 4> L2Normalization1dTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::DataLayout layout)
 {
-    return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
-        workloadFactory, memoryManager, 0.0f, 0);
+    return L2Normalization1dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,layout);
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
+LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    bool useSubtensor,
-    float qScale,
-    int32_t qOffset)
+    const armnn::DataLayout layout)
 {
-    armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
-    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
+    return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
+                                                                         layout);
+}
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
+LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::DataLayout layout)
+{
+    return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
+                                                                         1.f/128, 128, layout);
+}
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> L2Normalization2dTestCommon(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float scale,
+    int32_t offset,
+    float outScale,
+    int32_t outOffset,
+    const armnn::DataLayout layout)
+{
+    // Width: 5
+    // Height: 1
+    // Channels: 2
+    // BatchSize: 1
+    unsigned int numberOfBatches = 1;
+    unsigned int numberOfChannels = 2;
+    unsigned int height = 1;
+    unsigned int width = 5;
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
+    const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
+            numberOfBatches, numberOfChannels, height, width, layout);
+    std::vector<float> inputValues
+    {
+        // Batch 0, Channel 0, Height (1) x Width (5)
+        1.0f, 3.0f, 5.0f, 7.0f,  9.0f,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
+        // Batch 0, Channel 1, Height (1) x Width (5)
+        2.0f, 4.0f, 6.0f, 8.0f, 10.0f
+    };
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Channel 0, Height (1) x Width (5)
+        1.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
+        3.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
+        5.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
+        7.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
+        9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f
-    }));
+        // Batch 0, Channel 1, Height (1) x Width (5)
+        2.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
+        4.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
+        6.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
+        8.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
+        10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
+    };
 
-    armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
-    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        7.0f,
+    return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
+                                              inputValues, outScale, outOffset, expectedOutputValues, layout);
+}
 
-        // Batch 0, Channel 1
-        9.0f,
+LayerTestResult<float, 4> L2Normalization2dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::DataLayout layout)
+{
+    return L2Normalization2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
+                                                                 layout);
+}
 
-        // Batch 0, Channel 2
-        11.0f,
+LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::DataLayout layout)
+{
+    return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
+                                                                         layout);
+}
 
-        // Batch 1, Channel 0
-        25.0f,
+LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::DataLayout layout)
+{
+    return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
+                                                                         1.f/128, 128, layout);
+}
 
-        // Batch 1, Channel 1
-        27.0f,
+LayerTestResult<float, 2> L2Normalization2dShapeTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const armnn::DataLayout layout = armnn::DataLayout::NHWC;
+    const armnn::TensorShape inputOutputTensorShape = armnn::TensorShape({ 5, 2 });
 
-        // Batch 1, Channel 2
-        29.0f
-    }));
+    std::vector<float> inputData
+    {
+        1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
+    };
+    std::vector<float> expectedOutputData
+    {
+        1.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
+        2.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
+        3.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
+        4.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
+        5.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
+        6.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
+        7.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
+        8.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
+        9.0f  * CalcInvL2Norm({ 9.0f, 10.0f }),
+        10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
+    };
 
-    armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
-    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        13.0f, 14.0f, 50.0f,
+    const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
+    const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
 
-        // Batch 0, Channel 1
-        15.0f, 16.0f, 51.0f,
+    auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, QuantizedVector<float>(
+                                                             inputTensorInfo.GetQuantizationScale(),
+                                                             inputTensorInfo.GetQuantizationOffset(),
+                                                             inputData));
 
-        // Batch 0, Channel 2
-        17.0f, 18.0f, 52.0f,
+    LayerTestResult<float, 2> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, QuantizedVector<float>(
+                                                                   outputTensorInfo.GetQuantizationScale(),
+                                                                   outputTensorInfo.GetQuantizationOffset(),
+                                                                   expectedOutputData));
 
-        // Batch 1, Channel 0
-        31.0f, 32.0f, 53.0f,
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-        // Batch 1, Channel 1
-        33.0f, 34.0f, 54.0f,
+    armnn::L2NormalizationQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_Eps = 1e-12f;
+    descriptor.m_Parameters.m_DataLayout = layout;
+    armnn::WorkloadInfo info;
 
-        // Batch 1, Channel 2
-        35.0f, 36.0f, 55.0f,
-    }));
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
-    LayerTestResult<T, 3> result(outputTensorInfo);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
 
-    std::vector<T> output;
-    output.resize(outputTensorInfo.GetNumElements());
-    Concatenate<T>(workloadFactory, memoryManager,
-                   { input0TensorInfo, input1TensorInfo, input2TensorInfo },
-                   { input0.data(), input1.data(), input2.data() },
-                   outputTensorInfo,
-                   output.data(),
-                   2,
-                   useSubtensor);
+    inputHandle->Allocate();
+    outputHandle->Allocate();
 
-    result.output = MakeTensor<T, 3>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
+    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
+    workload->PostAllocationConfigure();
+    ExecuteWorkload(*workload, memoryManager);
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
+    CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
+    return result;
+}
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> L2Normalization3dTestCommon(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float scale,
+    int32_t offset,
+    float outScale,
+    int32_t outOffset,
+    const armnn::DataLayout layout)
+{
+    // Width: 3
+    // Height: 4
+    // Channels: 2
+    // BatchSize: 1
+    unsigned int numberOfBatches = 1;
+    unsigned int numberOfChannels = 2;
+    unsigned int height = 4;
+    unsigned int width = 3;
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
-    }));
+    const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
+            numberOfBatches, numberOfChannels, height, width, layout);
+    std::vector<float> inputValues
+    {
+        // Batch 0, Channel 0, Height (4) x Width (3)
+        119.0f,  21.0f, 150.0f,
+        149.0f,  32.0f, 179.0f,
+        15.0f, 227.0f, 141.0f,
+        147.0f, 199.0f, 220.0f,
 
-    return result;
+        // Batch 0, Channel 1, Height (4) x Width (3)
+        110.0f, 140.0f,  73.0f,
+        211.0f, 212.0f,  89.0f,
+        24.0f, 138.0f, 188.0f,
+        162.0f,  12.0f, 161.0f
+    };
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Channel 0, Height (4) x Width (3)
+        119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
+        21.0f * CalcInvL2Norm({  21.0f, 140.0f }),
+        150.0f * CalcInvL2Norm({ 150.0f,  73.0f }),
+        149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
+        32.0f * CalcInvL2Norm({  32.0f, 212.0f }),
+        179.0f * CalcInvL2Norm({ 179.0f,  89.0f }),
+        15.0f * CalcInvL2Norm({  15.0f,  24.0f }),
+        227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
+        141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
+        147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
+        199.0f * CalcInvL2Norm({ 199.0f,  12.0f }),
+        220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
+
+        // Batch 0, Channel 1, Height (4) x Width (3)
+        110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
+        140.0f * CalcInvL2Norm({  21.0f, 140.0f }),
+        73.0f * CalcInvL2Norm({ 150.0f,  73.0f }),
+        211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
+        212.0f * CalcInvL2Norm({  32.0f, 212.0f }),
+        89.0f * CalcInvL2Norm({ 179.0f,  89.0f }),
+        24.0f * CalcInvL2Norm({  15.0f,  24.0f }),
+        138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
+        188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
+        162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
+        12.0f * CalcInvL2Norm({ 199.0f,  12.0f }),
+        161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
+    };
+
+    return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
+                                              inputValues, outScale, outOffset, expectedOutputValues, layout);
 }
 
-LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(
+LayerTestResult<float, 4> L2Normalization3dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    bool useSubtensor)
+    const armnn::DataLayout layout)
 {
-    return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
-        workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
+    return L2Normalization3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
+                                                                 layout);
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> Concatenation4dTestImpl(
+LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::TensorInfo& outputTensorInfo,
-    unsigned int dimension,
-    bool useSubtensor,
-    float qScale,
-    int32_t qOffset)
+    const armnn::DataLayout layout)
 {
-    armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
-
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f
-    }));
+    return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
+                                                                         layout);
+}
 
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f
-    }));
-
-    auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
-    }));
-
-    LayerTestResult<T, 4> result(outputTensorInfo);
-
-    std::vector<T> output;
-    output.resize(outputTensorInfo.GetNumElements());
-
-    Concatenate<T>(workloadFactory,
-                   memoryManager,
-                   {inputTensorInfo, inputTensorInfo, inputTensorInfo},
-                   {input0.data(), input1.data(), input2.data()},
-                   outputTensorInfo,
-                   output.data(),
-                   dimension,
-                   useSubtensor);
-
-    result.output = MakeTensor<T, 4>(outputTensorInfo, output);
-    return result;
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
-{
-    armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
-
-    LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
-        workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
-
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f,
-
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
-
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
-    }));
-    return result;
-}
-
-LayerTestResult<float, 4> Concatenation4dDim0Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
-{
-    armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
-
-    LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
-        workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
-
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f,
-
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
-
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
-    }));
-
-    return result;
-}
-
-LayerTestResult<float, 4> Concatenation4dDim1Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
-{
-    armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
-
-    LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
-        workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
-
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
-
-        9.0f, 10.0f,
-        11.0f, 12.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
-    }));
-
-    return result;
-}
-
-LayerTestResult<float, 4> Concatenation4dDim2Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset,
-    bool useSubtensor)
-{
-    armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
-
-    LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
-        workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
-
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        11.0f, 12.0f,
-        21.0f, 22.0f,
-        3.0f, 4.0f,
-        13.0f, 14.0f,
-        23.0f, 24.0f,
-
-        5.0f, 6.0f,
-        15.0f, 16.0f,
-        25.0f, 26.0f,
-        7.0f, 8.0f,
-        17.0f, 18.0f,
-        27.0f, 28.0f,
-
-        9.0f, 10.0f,
-        19.0f, 20.0f,
-        29.0f, 30.0f,
-        11.0f, 12.0f,
-        21.0f, 22.0f,
-        31.0f, 32.0f
-    }));
-
-    return result;
-}
-
-LayerTestResult<float, 4> Concatenation4dDim3Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    bool useSubtensor)
-{
-    return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
-        workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
-{
-    unsigned int dimension = 0;
-    armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
-
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f
-    }));
-
-    armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
-
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
-
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
-
-    }));
-
-    armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
-
-    LayerTestResult<T, 4> result(outputTensorInfo);
-
-    std::vector<T> output;
-    output.resize(outputTensorInfo.GetNumElements());
-    Concatenate<T>(workloadFactory,
-                   memoryManager,
-                   {inputTensorInfo0, inputTensorInfo1},
-                   {input0.data(), input1.data()},
-                   outputTensorInfo,
-                   output.data(),
-                   dimension,
-                   true);
-
-    result.output = MakeTensor<T, 4>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f,
-
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
-
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
-    }));
-
-    return result;
-}
-
-LayerTestResult<float, 4> Concatenation4dDiffShapeDim0Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
-        workloadFactory, memoryManager, 0.0f, 0);
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
-{
-    unsigned int dimension = 1;
-    armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
-
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f
-    }));
-
-    armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
-
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-
-    }));
-
-    armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
-
-    LayerTestResult<T, 4> result(outputTensorInfo);
-
-    std::vector<T> output;
-    output.resize(outputTensorInfo.GetNumElements());
-    Concatenate<T>(workloadFactory,
-                   memoryManager,
-                   {inputTensorInfo0, inputTensorInfo1},
-                   {input0.data(), input1.data()},
-                   outputTensorInfo,
-                   output.data(),
-                   dimension,
-                   true);
-
-    result.output = MakeTensor<T, 4>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f,
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f
-    }));
-
-    return result;
-}
-
-LayerTestResult<float, 4> Concatenation4dDiffShapeDim1Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
-        workloadFactory, memoryManager, 0.0f, 0);
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
-{
-    unsigned int dimension = 2;
-    armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
-
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f
-    }));
-
-    armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
-
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f
-    }));
-
-    armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
-
-    LayerTestResult<T, 4> result(outputTensorInfo);
-
-    std::vector<T> output;
-    output.resize(outputTensorInfo.GetNumElements());
-    Concatenate<T>(workloadFactory,
-                   memoryManager,
-                   {inputTensorInfo0, inputTensorInfo1},
-                   {input0.data(), input1.data()},
-                   outputTensorInfo,
-                   output.data(),
-                   dimension,
-                   true);
-
-    result.output = MakeTensor<T, 4>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
-
-        9.0f, 10.0f,
-        11.0f, 12.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f
-    }));
-
-    return result;
-}
-
-LayerTestResult<float, 4> Concatenation4dDiffShapeDim2Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
-        workloadFactory, memoryManager, 0.0f, 0);
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset,
-    bool useSubtensor)
-{
-    unsigned int dimension = 3;
-    armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
-
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f
-    }));
-
-    armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
-
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
-        11.0f, 12.0f, 13.0f,
-        14.0f, 15.0f, 16.0f,
-
-        17.0f, 18.0f, 19.0f,
-        20.0f, 21.0f, 22.0f,
-
-        23.0f, 24.0f, 25.0f,
-        26.0f, 27.0f, 28.0f
-    }));
-
-    armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
-
-    LayerTestResult<T, 4> result(outputTensorInfo);
-
-    std::vector<T> output;
-    output.resize(outputTensorInfo.GetNumElements());
-    Concatenate<T>(workloadFactory,
-                   memoryManager,
-                   {inputTensorInfo0, inputTensorInfo1},
-                   {input0.data(), input1.data()},
-                   outputTensorInfo,
-                   output.data(),
-                   dimension,
-                   useSubtensor);
-
-    result.output = MakeTensor<T, 4>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
-        3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
-        5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
-        7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
-        9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
-        11.0f, 12.0f, 26.0f, 27.0f, 28.0f
-    }));
-
-    return result;
-}
-
-LayerTestResult<float, 4> Concatenation4dDiffShapeDim3Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    bool useSubtensor)
-{
-    return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
-        workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
-}
-
-LayerTestResult<float, 2> FakeQuantizationTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    constexpr unsigned int width = 2;
-    constexpr unsigned int height = 3;
-
-    const armnn::TensorInfo tensorInfo({height, width },
-        armnn::DataType::Float32);
-    auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
-       -10.0f,  -5.0f,
-         0.0f,   5.0f,
-        10.0f,  10.0f
-    }));
-
-    LayerTestResult<float, 2> ret(tensorInfo);
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(tensorInfo);
-
-    std::unique_ptr<armnn::ITensorHandle> outputHandle  = workloadFactory.CreateTensorHandle(tensorInfo);
-
-    armnn::FakeQuantizationQueueDescriptor data;
-    armnn::WorkloadInfo info;
-
-    AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
-    AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
-    float min = -10.f;
-    float max = 10.f;
-
-    data.m_Parameters.m_Min = min;
-    data.m_Parameters.m_Max = max;
-
-    armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
-    armnn::FakeQuantizationQueueDescriptor refData = data;
-    armnn::WorkloadInfo refInfo = info;
-    SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-
-    CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
-
-    ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
-        0.0f,     63.0f,
-        128.0f,   191.0f,
-        255.0f,   255.0f
-    }));
-    return ret;
-}
-
-namespace
-{
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> L2NormalizationTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::TensorShape& inputOutputTensorShape,
-    float scale,
-    int32_t offset,
-    const std::vector<float>& inputValues,
-    float outScale,
-    int32_t outOffset,
-    const std::vector<float>& expectedOutputValues,
-    const armnn::DataLayout layout,
-    float epsilon = 1e-12f)
-{
-    const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
-    const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
-
-    // at this point if we require it permute the input data
-    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    std::vector<float> inputData = inputValues;
-    if (layout == armnn::DataLayout::NHWC)
-    {
-        std::vector<float> tmp(inputData.size());
-        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
-        inputData = tmp;
-    }
-
-    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
-                                                         inputTensorInfo.GetQuantizationScale(),
-                                                         inputTensorInfo.GetQuantizationOffset(),
-                                                         inputData));
-
-    std::vector<float> expectedOutputData = expectedOutputValues;
-    if (layout == armnn::DataLayout::NHWC)
-    {
-        std::vector<float> tmp(expectedOutputData.size());
-        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
-                            sizeof(float));
-        expectedOutputData = tmp;
-    }
-
-    LayerTestResult<T, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
-                                                               outputTensorInfo.GetQuantizationScale(),
-                                                               outputTensorInfo.GetQuantizationOffset(),
-                                                               expectedOutputData));
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::L2NormalizationQueueDescriptor descriptor;
-    descriptor.m_Parameters.m_Eps = epsilon;
-    descriptor.m_Parameters.m_DataLayout = layout;
-    armnn::WorkloadInfo info;
-
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-
-    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
-
-    workload->PostAllocationConfigure();
-    ExecuteWorkload(*workload, memoryManager);
-
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-
-    return result;
-}
-
-float CalcInvL2Norm(std::initializer_list<float> elements)
-{
-    const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
-        [](float acc, float element) { return acc + element * element; });
-    return 1.0f / sqrtf(reduction);
-}
-
-} // anonymous namespace
-
-template<armnn::DataType ArmnnType, typename T>
-LayerTestResult<T, 2> Pad2dTestCommon(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset,
-    const float customPaddingValue)
-{
-    const armnn::TensorShape inputShape{ 3, 3 };
-    const armnn::TensorShape outputShape{ 7, 7 };
-
-    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
-    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
-
-    std::vector<T> inputValues(
-    QuantizedVector<T>(qScale, qOffset,
-    {
-      // Height (3) x Width (3)
-      4, 8, 6,
-      7, 4, 4,
-      3, 2, 4
-    }));
-
-    auto p = customPaddingValue;
-    std::vector<T> expectedOutputValues;
-    expectedOutputValues = (
-    QuantizedVector<T>(qScale, qOffset,
-    {
-      p, p, p, p, p, p, p,
-      p, p, p, p, p, p, p,
-      p, p, 4, 8, 6, p, p,
-      p, p, 7, 4, 4, p, p,
-      p, p, 3, 2, 4, p, p,
-      p, p, p, p, p, p, p,
-      p, p, p, p, p, p, p
-    }));
-
-    auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
-
-    LayerTestResult<T, 2> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::PadQueueDescriptor descriptor;
-
-    std::vector<std::pair<unsigned int, unsigned int>> padList;
-    padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
-    padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
-
-    descriptor.m_Parameters.m_PadList = padList;
-    descriptor.m_Parameters.m_PadValue = customPaddingValue;
-    armnn::WorkloadInfo info;
-
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-
-    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
-
-    return result;
-}
-
-template<armnn::DataType ArmnnType, typename T>
-LayerTestResult<T, 3> Pad3dTestCommon(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
-{
-    const armnn::TensorShape inputShape{ 2, 2, 2 };
-    const armnn::TensorShape outputShape{ 3, 5, 6 };
-
-    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
-    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
-
-    std::vector<T> inputValues(
-      QuantizedVector<T>(qScale,qOffset,
-    {
-        // Channel 0, Height (2) x Width (2)
-        0, 4,
-        2, 5,
-
-        // Channel 1, Height (2) x Width (2)
-        6, 1,
-        5, 2
-    }));
-
-    std::vector<T> expectedOutputValues(
-      QuantizedVector<T>(qScale,qOffset,
-    {
-
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 4, 0, 0,
-        0, 0, 2, 5, 0, 0,
-        0, 0, 0, 0, 0, 0,
-
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0,
-        0, 0, 6, 1, 0, 0,
-        0, 0, 5, 2, 0, 0,
-        0, 0, 0, 0, 0, 0,
-
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0
-
-    }));
-
-    auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
-
-    LayerTestResult<T, 3> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::PadQueueDescriptor descriptor;
-
-    std::vector<std::pair<unsigned int, unsigned int>> PadList;
-    PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
-    PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
-    PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
-
-    descriptor.m_Parameters.m_PadList = PadList;
-    armnn::WorkloadInfo info;
-
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-
-    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
-
-    return result;
-}
-
-template<armnn::DataType ArmnnType, typename T>
-LayerTestResult<T, 4> Pad4dTestCommon(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
-{
-    const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
-    const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
-
-    const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
-    const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
-
-    std::vector<T> inputValues(
-      QuantizedVector<T>(qScale,qOffset,
-    {
-        // Batch 0, Channel 0, Height (3) x Width (2)
-        0, 1,
-        2, 3,
-        4, 5,
-
-        // Batch 0, Channel 1, Height (3) x Width (2)
-        6, 7,
-        8, 9,
-        10, 11,
-
-        // Batch 1, Channel 0, Height (3) x Width (2)
-        12, 13,
-        14, 15,
-        16, 17,
-
-        // Batch 1, Channel 1, Height (3) x Width (2)
-        18, 19,
-        20, 21,
-        22, 23
-    }));
-
-    std::vector<T> expectedOutputValues(
-      QuantizedVector<T>(qScale,qOffset,
-    {
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 1, 0,
-        0, 2, 3, 0,
-        0, 4, 5, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 6, 7, 0,
-        0, 8, 9, 0,
-        0, 10, 11, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 12, 13, 0,
-        0, 14, 15, 0,
-        0, 16, 17, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 18, 19, 0,
-        0, 20, 21, 0,
-        0, 22, 23, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0
-    }));
-
-    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
-
-    LayerTestResult<T, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::PadQueueDescriptor descriptor;
-
-    std::vector<std::pair<unsigned int, unsigned int>> PadList;
-    PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
-    PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
-    PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
-    PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
-
-    descriptor.m_Parameters.m_PadList = PadList;
-    armnn::WorkloadInfo info;
-
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-
-    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-
-    return result;
-}
-
-LayerTestResult<uint8_t, 2> PadUint82dTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
-}
-
-LayerTestResult<uint8_t, 2> PadUint82dCustomPaddingTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
-}
-
-LayerTestResult<uint8_t, 3> PadUint83dTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
-}
-
-LayerTestResult<uint8_t, 4> PadUint84dTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
-}
-
-
-template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 2>
-Pad2dTestCommon<armnn::DataType::QuantisedSymm16>(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset,
-    const float customPaddingValue);
-
-template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
-Pad3dTestCommon<armnn::DataType::QuantisedSymm16>(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset);
-
-template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 4>
-Pad4dTestCommon<armnn::DataType::QuantisedSymm16>(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset);
-
-LayerTestResult<float, 2> PadFloat322dTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
-}
-
-LayerTestResult<float, 2> PadFloat322dCustomPaddingTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, 1.0f);
-}
-
-LayerTestResult<float, 3> PadFloat323dTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
-}
-
-LayerTestResult<float, 4> PadFloat324dTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        float scale,
-        int32_t offset,
-        float outScale,
-        int32_t outOffset,
-        const armnn::DataLayout layout,
-        float epsilon)
-{
-    // Width: 1
-    // Height: 1
-    // Channels: 3
-    // BatchSize: 1
-    unsigned int numberOfBatches = 1;
-    unsigned int numberOfChannels = 3;
-    unsigned int height = 1;
-    unsigned int width = 1;
-
-    const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
-            numberOfBatches, numberOfChannels, height, width, layout);
-
-    // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
-    std::vector<float> inputValues
-    {
-        // Batch 0, Channel 0, Height (1) x Width (1)
-        0.00000001f,
-
-        // Batch 0, Channel 1, Height (1) x Width (1)
-        0.00000002f,
-
-        // Batch 0, Channel 2, Height (1) x Width (1)
-        0.00000003f,
-    };
-
-    const float approxInvL2Norm = 1.f / sqrtf(epsilon);
-    std::vector<float> expectedOutputValues
-    {
-        // Batch 0, Channel 0, Height (1) x Width (1)
-        0.00000001f * approxInvL2Norm,
-        0.00000002f * approxInvL2Norm,
-        0.00000003f * approxInvL2Norm,
-    };
-
-    return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
-                                              inputValues, outScale, outOffset, expectedOutputValues, layout,
-                                              epsilon);
-}
-
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> L2Normalization1dTestCommon(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        float scale,
-        int32_t offset,
-        float outScale,
-        int32_t outOffset,
-        const armnn::DataLayout layout)
-{
-    // Width: 1
-    // Height: 1
-    // Channels: 10
-    // BatchSize: 1
-    unsigned int numberOfBatches = 1;
-    unsigned int numberOfChannels = 10;
-    unsigned int height = 1;
-    unsigned int width = 1;
-
-
-    const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
-            numberOfBatches, numberOfChannels, height, width, layout);
-    std::vector<float> inputValues
-    {
-        // Batch 0, Channel 0, Height (1) x Width (1)
-        1.0f,
-
-        // Batch 0, Channel 1, Height (1) x Width (1)
-        2.0f,
-
-        // Batch 0, Channel 2, Height (1) x Width (1)
-        3.0f,
-
-        // Batch 0, Channel 3, Height (1) x Width (1)
-        4.0f,
-
-        // Batch 0, Channel 4, Height (1) x Width (1)
-        5.0f,
-
-        // Batch 0, Channel 5, Height (1) x Width (1)
-        6.0f,
-
-        // Batch 0, Channel 6, Height (1) x Width (1)
-        7.0f,
-
-        // Batch 0, Channel 7, Height (1) x Width (1)
-        8.0f,
-
-        // Batch 0, Channel 8, Height (1) x Width (1)
-        9.0f,
-
-        // Batch 0, Channel 9, Height (1) x Width (1)
-        10.0f
-    };
-    const float approxInvL2Norm = 0.050964719f;
-    std::vector<float> expectedOutputValues
-    {
-        // Batch 0, Channel 0, Height (1) x Width (1)
-        1.0f * approxInvL2Norm,
-        2.0f * approxInvL2Norm,
-        3.0f * approxInvL2Norm,
-        4.0f * approxInvL2Norm,
-        5.0f * approxInvL2Norm,
-        6.0f * approxInvL2Norm,
-        7.0f * approxInvL2Norm,
-        8.0f * approxInvL2Norm,
-        9.0f * approxInvL2Norm,
-        10.0f * approxInvL2Norm
-    };
-
-
-    return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
-                                              inputValues, outScale, outOffset, expectedOutputValues, layout);
-}
-
-LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        const armnn::DataLayout layout)
-{
-    // Dummy descriptor to get the default value of epsilon.
-    armnn::L2NormalizationDescriptor descriptor;
-
-    return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
-                                                                      layout, descriptor.m_Eps);
-}
-
-LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-        const armnn::DataLayout layout)
-{
-    return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
-                                                                      layout, 1e-9f);
-}
-
-LayerTestResult<float, 4> L2Normalization1dTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout layout)
-{
-    return L2Normalization1dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,layout);
-}
-
-LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout layout)
-{
-    return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
-                                                                         layout);
-}
-
-LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout layout)
-{
-    return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
-                                                                         1.f/128, 128, layout);
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> L2Normalization2dTestCommon(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float scale,
-    int32_t offset,
-    float outScale,
-    int32_t outOffset,
-    const armnn::DataLayout layout)
-{
-    // Width: 5
-    // Height: 1
-    // Channels: 2
-    // BatchSize: 1
-    unsigned int numberOfBatches = 1;
-    unsigned int numberOfChannels = 2;
-    unsigned int height = 1;
-    unsigned int width = 5;
-
-    const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
-            numberOfBatches, numberOfChannels, height, width, layout);
-    std::vector<float> inputValues
-    {
-        // Batch 0, Channel 0, Height (1) x Width (5)
-        1.0f, 3.0f, 5.0f, 7.0f,  9.0f,
-
-        // Batch 0, Channel 1, Height (1) x Width (5)
-        2.0f, 4.0f, 6.0f, 8.0f, 10.0f
-    };
-    std::vector<float> expectedOutputValues
-    {
-        // Batch 0, Channel 0, Height (1) x Width (5)
-        1.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
-        3.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
-        5.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
-        7.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
-        9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
-
-        // Batch 0, Channel 1, Height (1) x Width (5)
-        2.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
-        4.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
-        6.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
-        8.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
-        10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
-    };
-
-    return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
-                                              inputValues, outScale, outOffset, expectedOutputValues, layout);
-}
-
-LayerTestResult<float, 4> L2Normalization2dTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout layout)
-{
-    return L2Normalization2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
-                                                                 layout);
-}
-
-LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout layout)
-{
-    return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
-                                                                         layout);
-}
-
-LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout layout)
-{
-    return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
-                                                                         1.f/128, 128, layout);
-}
-
-LayerTestResult<float, 2> L2Normalization2dShapeTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const armnn::DataLayout layout = armnn::DataLayout::NHWC;
-    const armnn::TensorShape inputOutputTensorShape = armnn::TensorShape({ 5, 2 });
-
-    std::vector<float> inputData
-    {
-        1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
-    };
-    std::vector<float> expectedOutputData
-    {
-        1.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
-        2.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
-        3.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
-        4.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
-        5.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
-        6.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
-        7.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
-        8.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
-        9.0f  * CalcInvL2Norm({ 9.0f, 10.0f }),
-        10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
-    };
-
-    const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
-    const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
-
-    auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, QuantizedVector<float>(
-                                                             inputTensorInfo.GetQuantizationScale(),
-                                                             inputTensorInfo.GetQuantizationOffset(),
-                                                             inputData));
-
-    LayerTestResult<float, 2> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, QuantizedVector<float>(
-                                                                   outputTensorInfo.GetQuantizationScale(),
-                                                                   outputTensorInfo.GetQuantizationOffset(),
-                                                                   expectedOutputData));
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::L2NormalizationQueueDescriptor descriptor;
-    descriptor.m_Parameters.m_Eps = 1e-12f;
-    descriptor.m_Parameters.m_DataLayout = layout;
-    armnn::WorkloadInfo info;
-
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-
-    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
-
-    workload->PostAllocationConfigure();
-    ExecuteWorkload(*workload, memoryManager);
-
-    CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
-
-    return result;
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> L2Normalization3dTestCommon(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float scale,
-    int32_t offset,
-    float outScale,
-    int32_t outOffset,
-    const armnn::DataLayout layout)
-{
-    // Width: 3
-    // Height: 4
-    // Channels: 2
-    // BatchSize: 1
-    unsigned int numberOfBatches = 1;
-    unsigned int numberOfChannels = 2;
-    unsigned int height = 4;
-    unsigned int width = 3;
-
-    const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
-            numberOfBatches, numberOfChannels, height, width, layout);
-    std::vector<float> inputValues
-    {
-        // Batch 0, Channel 0, Height (4) x Width (3)
-        119.0f,  21.0f, 150.0f,
-        149.0f,  32.0f, 179.0f,
-        15.0f, 227.0f, 141.0f,
-        147.0f, 199.0f, 220.0f,
-
-        // Batch 0, Channel 1, Height (4) x Width (3)
-        110.0f, 140.0f,  73.0f,
-        211.0f, 212.0f,  89.0f,
-        24.0f, 138.0f, 188.0f,
-        162.0f,  12.0f, 161.0f
-    };
-    std::vector<float> expectedOutputValues
-    {
-        // Batch 0, Channel 0, Height (4) x Width (3)
-        119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
-        21.0f * CalcInvL2Norm({  21.0f, 140.0f }),
-        150.0f * CalcInvL2Norm({ 150.0f,  73.0f }),
-        149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
-        32.0f * CalcInvL2Norm({  32.0f, 212.0f }),
-        179.0f * CalcInvL2Norm({ 179.0f,  89.0f }),
-        15.0f * CalcInvL2Norm({  15.0f,  24.0f }),
-        227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
-        141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
-        147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
-        199.0f * CalcInvL2Norm({ 199.0f,  12.0f }),
-        220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
-
-        // Batch 0, Channel 1, Height (4) x Width (3)
-        110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
-        140.0f * CalcInvL2Norm({  21.0f, 140.0f }),
-        73.0f * CalcInvL2Norm({ 150.0f,  73.0f }),
-        211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
-        212.0f * CalcInvL2Norm({  32.0f, 212.0f }),
-        89.0f * CalcInvL2Norm({ 179.0f,  89.0f }),
-        24.0f * CalcInvL2Norm({  15.0f,  24.0f }),
-        138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
-        188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
-        162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
-        12.0f * CalcInvL2Norm({ 199.0f,  12.0f }),
-        161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
-    };
-
-    return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
-                                              inputValues, outScale, outOffset, expectedOutputValues, layout);
-}
-
-LayerTestResult<float, 4> L2Normalization3dTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout layout)
-{
-    return L2Normalization3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
-                                                                 layout);
-}
-
-LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout layout)
-{
-    return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
-                                                                         layout);
-}
-
-LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout layout)
-{
-    return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
-                                                                         1.f/128, 128, layout);
-}
+LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::DataLayout layout)
+{
+    return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
+                                                                         1.f/128, 128, layout);
+}
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<T, 4> L2Normalization4dTestCommon(
@@ -7767,574 +5846,295 @@ LayerTestResult<T, 4> L2Normalization4dTestCommon(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float scale,
     int32_t offset,
-    float outScale,
-    int32_t outOffset,
-    const armnn::DataLayout layout)
-{
-    // Width: 3
-    // Height: 4
-    // Channels: 3
-    // BatchSize: 2
-    unsigned int numberOfBatches = 2;
-    unsigned int numberOfChannels = 3;
-    unsigned int height = 4;
-    unsigned int width = 3;
-
-    const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
-            numberOfBatches, numberOfChannels, height, width, layout);
-    std::vector<float> inputValues
-    {
-        // Batch 0, Channel 0, Height (4) x Width (3)
-        235.0f,  46.0f, 178.0f,
-        100.0f, 123.0f,  19.0f,
-        172.0f,  74.0f, 250.0f,
-        6.0f, 195.0f,  80.0f,
-
-        // Batch 0, Channel 1, Height (4) x Width (3)
-        113.0f,  95.0f, 202.0f,
-        77.0f, 114.0f,  71.0f,
-        122.0f, 246.0f, 166.0f,
-        82.0f,  28.0f,  37.0f,
-
-        // Batch 0, Channel 2, Height (4) x Width (3)
-        56.0f, 170.0f, 162.0f,
-        194.0f,  89.0f, 254.0f,
-        12.0f, 209.0f, 200.0f,
-        1.0f,  64.0f,  54.0f,
-
-        // Batch 1, Channel 0, Height (4) x Width (3)
-        67.0f,  90.0f,  49.0f,
-        7.0f, 163.0f,  18.0f,
-        25.0f, 117.0f, 103.0f,
-        247.0f,  59.0f, 189.0f,
-
-        // Batch 1, Channel 1, Height (4) x Width (3)
-        239.0f, 104.0f, 199.0f,
-        17.0f, 124.0f, 153.0f,
-        222.0f, 217.0f, 75.0f,
-        32.0f, 126.0f, 21.0f,
-
-        // Batch 1, Channel 2, Height (4) x Width (3)
-        97.0f, 145.0f, 215.0f,
-        115.0f, 116.0f, 238.0f,
-        226.0f,  16.0f, 132.0f,
-        92.0f, 125.0f,  88.0f
-    };
-    std::vector<float> expectedOutputValues
-    {
-        // Batch 0, Channel 0, Height (4) x Width (3)
-        235.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
-        46.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
-        178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
-        100.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
-        123.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
-        19.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
-        172.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
-        74.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
-        250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
-        6.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
-        195.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
-        80.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
-
-        // Batch 0, Channel 1, Height (4) x Width (3)
-        113.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
-        95.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
-        202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
-        77.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
-        114.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
-        71.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
-        122.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
-        246.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
-        166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
-        82.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
-        28.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
-        37.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
-
-        // Batch 0, Channel 2, Height (4) x Width (3)
-        56.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
-        170.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
-        162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
-        194.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
-        89.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
-        254.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
-        12.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
-        209.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
-        200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
-        1.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
-        64.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
-        54.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
-
-        // Batch 1, Channel 0, Height (4) x Width (3)
-        67.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
-        90.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
-        49.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
-        7.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
-        163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
-        18.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
-        25.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
-        117.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
-        103.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
-        247.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
-        59.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
-        189.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f }),
-
-        // Batch 1, Channel 1, Height (4) x Width (3)
-        239.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
-        104.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
-        199.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
-        17.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
-        124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
-        153.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
-        222.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
-        217.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
-        75.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
-        32.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
-        126.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
-        21.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f }),
-
-        // Batch 1, Channel 2, Height (4) x Width (3)
-        97.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
-        145.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
-        215.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
-        115.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
-        116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
-        238.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
-        226.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
-        16.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
-        132.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
-        92.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
-        125.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
-        88.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f })
-    };
-
-    return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
-                                              inputValues, outScale, outOffset, expectedOutputValues, layout);
-}
-
-LayerTestResult<float, 4> L2Normalization4dTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout layout)
-{
-    return L2Normalization4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
-                                                                 layout);
-}
-
-LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout layout)
-{
-    return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
-                                                                         layout);
-}
-
-LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout layout)
-{
-    return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
-                                                                         1.f/128, 128, layout);
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> ConstantTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
-{
-    constexpr unsigned int inputWidth = 3;
-    constexpr unsigned int inputHeight = 4;
-    constexpr unsigned int inputChannels = 3;
-    constexpr unsigned int inputBatchSize = 2;
-
-    constexpr unsigned int outputWidth = inputWidth;
-    constexpr unsigned int outputHeight = inputHeight;
-    constexpr unsigned int outputChannels = inputChannels;
-    constexpr unsigned int outputBatchSize = inputBatchSize;
-
-    armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
-                                        ArmnnType, qScale, qOffset);
-
-    armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
-                                         ArmnnType, qScale, qOffset);
+    float outScale,
+    int32_t outOffset,
+    const armnn::DataLayout layout)
+{
+    // Width: 3
+    // Height: 4
+    // Channels: 3
+    // BatchSize: 2
+    unsigned int numberOfBatches = 2;
+    unsigned int numberOfChannels = 3;
+    unsigned int height = 4;
+    unsigned int width = 3;
 
-    // Set quantization parameters if the requested type is a quantized type.
-    if(armnn::IsQuantizedType<T>())
+    const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
+            numberOfBatches, numberOfChannels, height, width, layout);
+    std::vector<float> inputValues
     {
-        inputTensorInfo.SetQuantizationScale(qScale);
-        inputTensorInfo.SetQuantizationOffset(qOffset);
-        outputTensorInfo.SetQuantizationScale(qScale);
-        outputTensorInfo.SetQuantizationOffset(qOffset);
-    }
-
-    auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
+        // Batch 0, Channel 0, Height (4) x Width (3)
         235.0f,  46.0f, 178.0f,
         100.0f, 123.0f,  19.0f,
         172.0f,  74.0f, 250.0f,
-          6.0f, 195.0f,  80.0f,
+        6.0f, 195.0f,  80.0f,
 
-        // Batch 0, Channel 1
+        // Batch 0, Channel 1, Height (4) x Width (3)
         113.0f,  95.0f, 202.0f,
-         77.0f, 114.0f,  71.0f,
+        77.0f, 114.0f,  71.0f,
         122.0f, 246.0f, 166.0f,
-         82.0f,  28.0f,  37.0f,
+        82.0f,  28.0f,  37.0f,
 
-        // Batch 0, Channel 2
-         56.0f, 170.0f, 162.0f,
+        // Batch 0, Channel 2, Height (4) x Width (3)
+        56.0f, 170.0f, 162.0f,
         194.0f,  89.0f, 254.0f,
-         12.0f, 209.0f, 200.0f,
-          1.0f,  64.0f,  54.0f,
+        12.0f, 209.0f, 200.0f,
+        1.0f,  64.0f,  54.0f,
 
-        // Batch 1, Channel 0
-         67.0f,  90.0f,  49.0f,
-          7.0f, 163.0f,  18.0f,
-         25.0f, 117.0f, 103.0f,
+        // Batch 1, Channel 0, Height (4) x Width (3)
+        67.0f,  90.0f,  49.0f,
+        7.0f, 163.0f,  18.0f,
+        25.0f, 117.0f, 103.0f,
         247.0f,  59.0f, 189.0f,
 
-        // Batch 1, Channel 1
+        // Batch 1, Channel 1, Height (4) x Width (3)
         239.0f, 104.0f, 199.0f,
-         17.0f, 124.0f, 153.0f,
+        17.0f, 124.0f, 153.0f,
         222.0f, 217.0f, 75.0f,
-         32.0f, 126.0f, 21.0f,
+        32.0f, 126.0f, 21.0f,
 
-        // Batch 1, Channel 2
-         97.0f, 145.0f, 215.0f,
+        // Batch 1, Channel 2, Height (4) x Width (3)
+        97.0f, 145.0f, 215.0f,
         115.0f, 116.0f, 238.0f,
         226.0f,  16.0f, 132.0f,
-         92.0f, 125.0f,  88.0f,
-    })));
-
-    LayerTestResult<T, 4> result(outputTensorInfo);
-    result.outputExpected = input;
-
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
-    AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
-
-    armnn::ConstantQueueDescriptor descriptor;
-    descriptor.m_LayerOutput = &constantTensor;
-
-    armnn::WorkloadInfo info;
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
-
-    outputHandle->Allocate();
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-    return result;
-}
-
-LayerTestResult<float, 4> ConstantTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
-}
-
-LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
-}
-
-LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
-}
-
-LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    unsigned int outputWidth = 3;
-    unsigned int outputHeight = 6;
-    unsigned int outputChannels = 3;
-
-    unsigned int inputWidth1 = 3;
-    unsigned int inputHeight1 = 6;
-    unsigned int inputChannels1 = 2;
-
-    unsigned int inputWidth2 = 3;
-    unsigned int inputHeight2 = 6;
-    unsigned int inputChannels2 = 1;
-
-    // Defines the tensor descriptors.
-    armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
-    armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
-    armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
-
-    // Quantized input1 tensor. Range [-3, 1]
-    const float inputScale1 = 0.015686f;
-    const int32_t inputOffset1 = 192;
-
-    auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
-    {
-        1, 2, 3,
-        4, 5, 6,
-        7, 8, 9,
-        10, 11, 12,
-        13, 14, 15,
-        16, 17, 18,
-
-        19, 20, 21,
-        22, 23, 24,
-        25, 26, 27,
-        28, 29, 30,
-        31, 32, 33,
-        34, 35, 36,
-    })
-    );
-
-    // Quatized input2 tensor. Range [-1, 4]
-    const float inputScale2 = 0.019608f;
-    const int32_t inputOffset2 = 50;
-
-    auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
-    {
-        37, 38, 39,
-        40, 41, 42,
-        43, 44, 45,
-        46, 47, 48,
-        49, 50, 51,
-        52, 53, 54,
-    })
-    );
-
-    // Output has the same quantization parameters than input1,
-    // so that only the requantization of input2 is required
-    const float outputScale = 0.015686f;
-    const int32_t outputOffset = 192;
-
-    LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
-
-    ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
+        92.0f, 125.0f,  88.0f
+    };
+    std::vector<float> expectedOutputValues
     {
-        1, 2, 3,
-        4, 5, 6,
-        7, 8, 9,
-        10, 11, 12,
-        13, 14, 15,
-        16, 17, 18,
-
-        19, 20, 21,
-        22, 23, 24,
-        25, 26, 27,
-        28, 29, 30,
-        31, 32, 33,
-        34, 35, 36,
-
-        176, 177, 178,
-        179, 181, 182,
-        183, 184, 186,
-        187, 188, 189,
-        191, 192, 193,
-        195, 196, 197,
-    })
-    );
-
-    outputTensorInfo.SetQuantizationScale(outputScale);
-    outputTensorInfo.SetQuantizationOffset(outputOffset);
-    inputTensorInfo1.SetQuantizationScale(inputScale1);
-    inputTensorInfo1.SetQuantizationOffset(inputOffset1);
-    inputTensorInfo2.SetQuantizationScale(inputScale2);
-    inputTensorInfo2.SetQuantizationOffset(inputOffset2);
-
-    std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
-    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
-
-    std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
-    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
-
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    bool subTensorsSupported = workloadFactory.SupportsSubTensors();
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
-            subTensorsSupported ?
-            workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
-            workloadFactory.CreateTensorHandle(inputTensorInfo1);
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
-            subTensorsSupported ?
-            workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
-            workloadFactory.CreateTensorHandle(inputTensorInfo2);
-
-    armnn::ConcatQueueDescriptor data;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
-    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
-    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
-    data.m_ViewOrigins.push_back(window1);
-    data.m_ViewOrigins.push_back(window2);
+        // Batch 0, Channel 0, Height (4) x Width (3)
+        235.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
+        46.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
+        178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
+        100.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
+        123.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
+        19.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
+        172.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
+        74.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
+        250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
+        6.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
+        195.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
+        80.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
+        // Batch 0, Channel 1, Height (4) x Width (3)
+        113.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
+        95.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
+        202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
+        77.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
+        114.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
+        71.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
+        122.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
+        246.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
+        166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
+        82.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
+        28.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
+        37.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
 
-    inputHandle1->Allocate();
-    inputHandle2->Allocate();
-    outputHandle->Allocate();
+        // Batch 0, Channel 2, Height (4) x Width (3)
+        56.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
+        170.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
+        162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
+        194.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
+        89.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
+        254.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
+        12.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
+        209.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
+        200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
+        1.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
+        64.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
+        54.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
 
-    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
-    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
+        // Batch 1, Channel 0, Height (4) x Width (3)
+        67.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
+        90.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
+        49.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
+        7.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
+        163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
+        18.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
+        25.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
+        117.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
+        103.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
+        247.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
+        59.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
+        189.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f }),
 
-    workload->PostAllocationConfigure();
-    workload->Execute();
+        // Batch 1, Channel 1, Height (4) x Width (3)
+        239.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
+        104.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
+        199.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
+        17.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
+        124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
+        153.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
+        222.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
+        217.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
+        75.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
+        32.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
+        126.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
+        21.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f }),
 
-    CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
+        // Batch 1, Channel 2, Height (4) x Width (3)
+        97.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
+        145.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
+        215.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
+        115.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
+        116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
+        238.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
+        226.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
+        16.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
+        132.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
+        92.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
+        125.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
+        88.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f })
+    };
 
-    return ret;
+    return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, inputOutputShape, scale, offset,
+                                              inputValues, outScale, outOffset, expectedOutputValues, layout);
 }
 
-LayerTestResult<uint8_t, 3> ConcatUint8Test(
+LayerTestResult<float, 4> L2Normalization4dTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::DataLayout layout)
 {
-    unsigned int outputWidth = 3;
-    unsigned int outputHeight = 6;
-    unsigned int outputChannels = 3;
-
-    unsigned int inputWidth1 = 3;
-    unsigned int inputHeight1 = 6;
-    unsigned int inputChannels1 = 2;
-
-    unsigned int inputWidth2 = 3;
-    unsigned int inputHeight2 = 6;
-    unsigned int inputChannels2 = 1;
-
-    // Defines the tensor descriptors.
-    armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
-    armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
-    armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
+    return L2Normalization4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, 0.f, 0,
+                                                                 layout);
+}
 
-    // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
-    const float scale = 0.13497836f;
-    const int32_t offset = -7;
+LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::DataLayout layout)
+{
+    return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0, 1.f, 0,
+                                                                         layout);
+}
 
-    outputTensorInfo.SetQuantizationScale(scale);
-    outputTensorInfo.SetQuantizationOffset(offset);
-    inputTensorInfo1.SetQuantizationScale(scale);
-    inputTensorInfo1.SetQuantizationOffset(offset);
-    inputTensorInfo2.SetQuantizationScale(scale);
-    inputTensorInfo2.SetQuantizationOffset(offset);
+LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::DataLayout layout)
+{
+    return L2Normalization1dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.f, 0,
+                                                                         1.f/128, 128, layout);
+}
 
-    LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ConstantTestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
+{
+    constexpr unsigned int inputWidth = 3;
+    constexpr unsigned int inputHeight = 4;
+    constexpr unsigned int inputChannels = 3;
+    constexpr unsigned int inputBatchSize = 2;
 
-    ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
-        {
-            1, 2, 3,
-            4, 5, 6,
-            7, 8, 9,
-            10, 11, 12,
-            13, 14, 15,
-            16, 17, 18,
+    constexpr unsigned int outputWidth = inputWidth;
+    constexpr unsigned int outputHeight = inputHeight;
+    constexpr unsigned int outputChannels = inputChannels;
+    constexpr unsigned int outputBatchSize = inputBatchSize;
 
-            19, 20, 21,
-            22, 23, 24,
-            25, 26, 27,
-            28, 29, 30,
-            31, 32, 33,
-            34, 35, 36,
+    armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
+                                        ArmnnType, qScale, qOffset);
 
-            37, 38, 39,
-            40, 41, 42,
-            43, 44, 45,
-            46, 47, 48,
-            49, 50, 51,
-            52, 53, 54,
-        })
-    );
+    armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
+                                         ArmnnType, qScale, qOffset);
 
-    auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
     {
-        1, 2, 3,
-        4, 5, 6,
-        7, 8, 9,
-        10, 11, 12,
-        13, 14, 15,
-        16, 17, 18,
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
 
-        19, 20, 21,
-        22, 23, 24,
-        25, 26, 27,
-        28, 29, 30,
-        31, 32, 33,
-        34, 35, 36,
-    })
-    );
+    auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        235.0f,  46.0f, 178.0f,
+        100.0f, 123.0f,  19.0f,
+        172.0f,  74.0f, 250.0f,
+          6.0f, 195.0f,  80.0f,
 
-    auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
-    {
-        37, 38, 39,
-        40, 41, 42,
-        43, 44, 45,
-        46, 47, 48,
-        49, 50, 51,
-        52, 53, 54,
-    })
-    );
+        // Batch 0, Channel 1
+        113.0f,  95.0f, 202.0f,
+         77.0f, 114.0f,  71.0f,
+        122.0f, 246.0f, 166.0f,
+         82.0f,  28.0f,  37.0f,
 
-    std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
-    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
+        // Batch 0, Channel 2
+         56.0f, 170.0f, 162.0f,
+        194.0f,  89.0f, 254.0f,
+         12.0f, 209.0f, 200.0f,
+          1.0f,  64.0f,  54.0f,
 
-    std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
-    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
+        // Batch 1, Channel 0
+         67.0f,  90.0f,  49.0f,
+          7.0f, 163.0f,  18.0f,
+         25.0f, 117.0f, 103.0f,
+        247.0f,  59.0f, 189.0f,
 
+        // Batch 1, Channel 1
+        239.0f, 104.0f, 199.0f,
+         17.0f, 124.0f, 153.0f,
+        222.0f, 217.0f, 75.0f,
+         32.0f, 126.0f, 21.0f,
 
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+        // Batch 1, Channel 2
+         97.0f, 145.0f, 215.0f,
+        115.0f, 116.0f, 238.0f,
+        226.0f,  16.0f, 132.0f,
+         92.0f, 125.0f,  88.0f,
+    })));
 
-    bool subTensorsSupported = workloadFactory.SupportsSubTensors();
+    LayerTestResult<T, 4> result(outputTensorInfo);
+    result.outputExpected = input;
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
-        subTensorsSupported ?
-            workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
-            workloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
-        subTensorsSupported ?
-            workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
-            workloadFactory.CreateTensorHandle(inputTensorInfo2);
+    armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
+    AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
 
+    armnn::ConstantQueueDescriptor descriptor;
+    descriptor.m_LayerOutput = &constantTensor;
 
-    armnn::ConcatQueueDescriptor data;
     armnn::WorkloadInfo info;
-    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
-    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
-    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
-    data.m_ViewOrigins.push_back(window1);
-    data.m_ViewOrigins.push_back(window2);
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
 
-    inputHandle1->Allocate();
-    inputHandle2->Allocate();
     outputHandle->Allocate();
 
-    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
-    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
-
     workload->PostAllocationConfigure();
     workload->Execute();
 
-    CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
 
-    return ret;
+LayerTestResult<float, 4> ConstantTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
-LayerTestResult<uint16_t, 3> ConcatUint16Test(
+LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
+}
+
+LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
+}
+
+LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
@@ -8351,24 +6151,15 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
     unsigned int inputChannels2 = 1;
 
     // Defines the tensor descriptors.
-    armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
-    armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
-    armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
-
-    // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
-    const float scale = 0.13497836f;
-    const int32_t offset = -7;
-
-    outputTensorInfo.SetQuantizationScale(scale);
-    outputTensorInfo.SetQuantizationOffset(offset);
-    inputTensorInfo1.SetQuantizationScale(scale);
-    inputTensorInfo1.SetQuantizationOffset(offset);
-    inputTensorInfo2.SetQuantizationScale(scale);
-    inputTensorInfo2.SetQuantizationOffset(offset);
+    armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
 
-    LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
+    // Quantized input1 tensor. Range [-3, 1]
+    const float inputScale1 = 0.015686f;
+    const int32_t inputOffset1 = 192;
 
-    ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
+    auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
     {
         1, 2, 3,
         4, 5, 6,
@@ -8383,16 +6174,32 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
         28, 29, 30,
         31, 32, 33,
         34, 35, 36,
+    })
+    );
+
+    // Quatized input2 tensor. Range [-1, 4]
+    const float inputScale2 = 0.019608f;
+    const int32_t inputOffset2 = 50;
 
+    auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
+    {
         37, 38, 39,
         40, 41, 42,
         43, 44, 45,
         46, 47, 48,
         49, 50, 51,
         52, 53, 54,
-    }));
+    })
+    );
 
-    auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
+    // Output has the same quantization parameters than input1,
+    // so that only the requantization of input2 is required
+    const float outputScale = 0.015686f;
+    const int32_t outputOffset = 192;
+
+    LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
+
+    ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
     {
         1, 2, 3,
         4, 5, 6,
@@ -8407,17 +6214,22 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
         28, 29, 30,
         31, 32, 33,
         34, 35, 36,
-    }));
 
-    auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
-    {
-        37, 38, 39,
-        40, 41, 42,
-        43, 44, 45,
-        46, 47, 48,
-        49, 50, 51,
-        52, 53, 54,
-    }));
+        176, 177, 178,
+        179, 181, 182,
+        183, 184, 186,
+        187, 188, 189,
+        191, 192, 193,
+        195, 196, 197,
+    })
+    );
+
+    outputTensorInfo.SetQuantizationScale(outputScale);
+    outputTensorInfo.SetQuantizationOffset(outputOffset);
+    inputTensorInfo1.SetQuantizationScale(inputScale1);
+    inputTensorInfo1.SetQuantizationOffset(inputOffset1);
+    inputTensorInfo2.SetQuantizationScale(inputScale2);
+    inputTensorInfo2.SetQuantizationOffset(inputOffset2);
 
     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
     armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
@@ -8425,7 +6237,6 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
     armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
 
-
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
@@ -8440,7 +6251,6 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo2);
 
-
     armnn::ConcatQueueDescriptor data;
     armnn::WorkloadInfo info;
     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
@@ -8467,662 +6277,273 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
     return ret;
 }
 
-namespace
-{
-template <typename T>
-LayerTestResult<T, 4> AdditionQuantizeTestHelper(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const unsigned int shape0[4],
-    const std::vector<T>& values0,
-    float scale0,
-    int32_t offset0,
-    const unsigned int shape1[4],
-    const std::vector<T> & values1,
-    float scale1,
-    int32_t offset1,
-    const unsigned int outShape[4],
-    const std::vector<T> & outValues,
-    float outScale,
-    int32_t outOffset)
-{
-    auto dataType = (std::is_same<T, uint8_t>::value ?
-                     armnn::DataType::QuantisedAsymm8 :
-                     armnn::DataType::QuantisedSymm16);
-
-    armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
-    armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
-    armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
-
-    inputTensorInfo0.SetQuantizationScale(scale0);
-    inputTensorInfo0.SetQuantizationOffset(offset0);
-
-    inputTensorInfo1.SetQuantizationScale(scale1);
-    inputTensorInfo1.SetQuantizationOffset(offset1);
-
-    outputTensorInfo.SetQuantizationScale(outScale);
-    outputTensorInfo.SetQuantizationOffset(outOffset);
-
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
-
-    LayerTestResult<T, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::AdditionQueueDescriptor data;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
-    AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
-    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
-
-    inputHandle0->Allocate();
-    inputHandle1->Allocate();
-    outputHandle->Allocate();
-
-    CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
-    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-
-    return result;
-}
-} // anonymous namespace
-
-LayerTestResult<uint8_t, 4> AdditionUint8Test(
+LayerTestResult<uint8_t, 3> ConcatUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 2, 2, 3 };
-
-    std::vector<uint8_t> input0(
-    {
-        63,  35,  77,  70,  56, 112, //  420, 224,  518,  469,  371, 763
-        203,  28, 252, 168, 245,  91  // 1400, 175, 1743, 1155, 1694, 616
-    });
+    unsigned int outputWidth = 3;
+    unsigned int outputHeight = 6;
+    unsigned int outputChannels = 3;
 
-    std::vector<uint8_t> input1(
-    {
-        21,   7, 175, 231, 175, 210, // 126,   28, 1204, 1596, 1204, 1449
-        126, 161,  63,  21, 105, 126  // 861, 1106,  420,  126,  714,  861
-    });
+    unsigned int inputWidth1 = 3;
+    unsigned int inputHeight1 = 6;
+    unsigned int inputChannels1 = 2;
 
-    std::vector<uint8_t> output(
-    {
-        81,  39, 249, 255, 228, 255, //  546,  252, 1722, 2065(clamped), 1575, 2212(clamped)
-        255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
-    });
+    unsigned int inputWidth2 = 3;
+    unsigned int inputHeight2 = 6;
+    unsigned int inputChannels2 = 1;
 
-    return AdditionQuantizeTestHelper(workloadFactory,
-                                      memoryManager,
-                                      shape0, input0, 7.0f, 3,
-                                      shape1, input1, 7.0f, 3,
-                                      shape0, output, 7.0f, 3);
-}
+    // Defines the tensor descriptors.
+    armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
 
-LayerTestResult<int16_t, 4> AdditionInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 2, 2, 3 };
+    // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
+    const float scale = 0.13497836f;
+    const int32_t offset = -7;
 
-    std::vector<int16_t> input0(
-        {
-            63,  35,  77,  70,  56, 112, //  441, 245,  539,  490,  392, 184
-            203,  28, 252, 168, 245,  91  // 1421, 196, 1764, 1176, 1715, 637
-        });
+    outputTensorInfo.SetQuantizationScale(scale);
+    outputTensorInfo.SetQuantizationOffset(offset);
+    inputTensorInfo1.SetQuantizationScale(scale);
+    inputTensorInfo1.SetQuantizationOffset(offset);
+    inputTensorInfo2.SetQuantizationScale(scale);
+    inputTensorInfo2.SetQuantizationOffset(offset);
 
-    std::vector<int16_t> input1(
-        {
-            21,   7, 175, 231, 175, 210, // 126,   28, 1204, 1596, 1204, 1449
-            126, 161,  63,  21, 105, 126  // 861, 1106,  420,  126,  714,  861
-        });
+    LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
 
-    std::vector<int16_t> output(
+    ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
         {
-            84,  42, 252, 301, 231, 322, //  588,  294, 1764, 2107(clamped), 1617, 2254(clamped)
-            329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
-        });
-
-    return AdditionQuantizeTestHelper(workloadFactory,
-                                      memoryManager,
-                                      shape0, input0, 7.0f, 0,
-                                      shape1, input1, 7.0f, 0,
-                                      shape0, output, 7.0f, 0);
-}
-
-namespace
-{
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> MultiplicationQuantizeTestHelper(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const unsigned int shape0[4],
-    const std::vector<T> & values0,
-    float scale0,
-    int32_t offset0,
-    const unsigned int shape1[4],
-    const std::vector<T> & values1,
-    float scale1,
-    int32_t offset1,
-    const unsigned int outShape[4],
-    const std::vector<T> & outValues,
-    float outScale,
-    int32_t outOffset)
-{
-    armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
-    armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
-    armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
-
-    inputTensorInfo0.SetQuantizationScale(scale0);
-    inputTensorInfo0.SetQuantizationOffset(offset0);
-
-    inputTensorInfo1.SetQuantizationScale(scale1);
-    inputTensorInfo1.SetQuantizationOffset(offset1);
-
-    outputTensorInfo.SetQuantizationScale(outScale);
-    outputTensorInfo.SetQuantizationOffset(outOffset);
-
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
-
-    LayerTestResult<T, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::MultiplicationQueueDescriptor data;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
-    AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
-    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
-
-    inputHandle0->Allocate();
-    inputHandle1->Allocate();
-    outputHandle->Allocate();
-
-    CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
-    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-
-    return result;
-}
-} // anonymous namespace
-
-LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    unsigned int batchSize = 1;
-    unsigned int channels = 2;
-    unsigned int height = 2;
-    unsigned int width = 3;
-    const unsigned int shape[] = { batchSize, channels, height, width };
-
-    // See dequantized values to the right.
-    std::vector<uint8_t> input0({
-         62,  37,   3, 172,  13, 111, // 244, 144,   8, 684,  48, 440,
-        188,  20,  73,  31,  23,  31  // 748,  76, 288, 120,  88, 120
-    });
-
-    // See dequantized values to the right.
-    std::vector<uint8_t> input1({
-        126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
-         48, 115, 151,  79,  78,  97  // 150, 351, 459, 243, 240, 297
-    });
-
-    // See dequantized values to the right.
-    std::vector<uint8_t> output(
-    {
-         64,  72,   0, 255,   8, 236, //  93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
-         77,  15,  92,  16,  10,  21, // 112200,  26676,        132192,           29160, 21120,  35640
-    });
-
-    // Scale/offset chosen to have output values out of range.
-    return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
-                                                                              memoryManager,
-                                                                              shape,
-                                                                              input0,
-                                                                              4.0f,
-                                                                              1,
-                                                                              shape,
-                                                                              input1,
-                                                                              3.0f,
-                                                                              -2,
-                                                                              shape,
-                                                                              output,
-                                                                              1366.255f,
-                                                                              -5);
-}
-
-LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 1 };
-
-    std::vector<uint8_t> input0({
-        1, 2, 3,    4,  5,  6,
-        7, 8, 9,   10, 11, 12
-    });
-
-    std::vector<uint8_t> input1({2});
-
-    std::vector<uint8_t> output({
-        2,  4,   6,     8, 10, 12,
-        14, 16, 18,    20, 22, 24
-    });
-
-    return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
-                                                                              memoryManager,
-                                                                              shape0,
-                                                                              input0,
-                                                                              1.0f,
-                                                                              0,
-                                                                              shape1,
-                                                                              input1,
-                                                                              1.0f,
-                                                                              0,
-                                                                              shape0,
-                                                                              output,
-                                                                              1.0f,
-                                                                              0);
-}
-
-LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 3 };
-
-    std::vector<uint8_t> input0({
-        1, 2, 3,    4,  5,  6,
-        7, 8, 9,   10, 11, 12
-    });
-
-    std::vector<uint8_t> input1({1, 2, 3});
-
-    std::vector<uint8_t> output({
-        1,  4,   9,     4, 10, 18,
-        7, 16,  27,    10, 22, 36
-    });
-
-    return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
-                                                                              memoryManager,
-                                                                              shape0,
-                                                                              input0,
-                                                                              1.0f,
-                                                                              0,
-                                                                              shape1,
-                                                                              input1,
-                                                                              1.0f,
-                                                                              0,
-                                                                              shape0,
-                                                                              output,
-                                                                              1.0f,
-                                                                              0);
-}
-
-LayerTestResult<int16_t, 4> MultiplicationInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape[] = { 1, 2, 2, 3 };
-
-    std::vector<int16_t> input0(
-    {
-        6,   7,  8,  9, 10, 11,
-        12, 13, 14, 15, 16, 17
-    });
-
-    std::vector<int16_t> input1(
-    {
-        1, 2, 3,  4,  5,  6,
-        7, 8, 9, 10, 11, 12
-    });
-
-    std::vector<int16_t> output(
-    {
-        6,   14,  24,  36,  50,  66,
-        84, 104, 126, 150, 176, 204
-    });
-
-    return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
-                                                                              memoryManager,
-                                                                              shape,
-                                                                              input0,
-                                                                              1.0f,
-                                                                              0,
-                                                                              shape,
-                                                                              input1,
-                                                                              1.0f,
-                                                                              0,
-                                                                              shape,
-                                                                              output,
-                                                                              1.0f,
-                                                                              0);
-}
-
-LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 1 };
+            1, 2, 3,
+            4, 5, 6,
+            7, 8, 9,
+            10, 11, 12,
+            13, 14, 15,
+            16, 17, 18,
 
-    std::vector<int16_t> input0(
-    {
-        1, 2, 3,  4,  5,  6,
-        7, 8, 9, 10, 11, 12
-    });
+            19, 20, 21,
+            22, 23, 24,
+            25, 26, 27,
+            28, 29, 30,
+            31, 32, 33,
+            34, 35, 36,
 
-    std::vector<int16_t> input1({2});
+            37, 38, 39,
+            40, 41, 42,
+            43, 44, 45,
+            46, 47, 48,
+            49, 50, 51,
+            52, 53, 54,
+        })
+    );
 
-    std::vector<int16_t> output(
+    auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
     {
-        2,   4,  6,  8, 10, 12,
-        14, 16, 18, 20, 22, 24
-    });
-
-    return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
-                                                                              memoryManager,
-                                                                              shape0,
-                                                                              input0,
-                                                                              1.0f,
-                                                                              0,
-                                                                              shape1,
-                                                                              input1,
-                                                                              1.0f,
-                                                                              0,
-                                                                              shape0,
-                                                                              output,
-                                                                              1.0f,
-                                                                              0);
-}
+        1, 2, 3,
+        4, 5, 6,
+        7, 8, 9,
+        10, 11, 12,
+        13, 14, 15,
+        16, 17, 18,
 
-LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 2, 2, 3 };
-    const unsigned int shape1[] = { 1, 1, 1, 3 };
+        19, 20, 21,
+        22, 23, 24,
+        25, 26, 27,
+        28, 29, 30,
+        31, 32, 33,
+        34, 35, 36,
+    })
+    );
 
-    std::vector<int16_t> input0(
+    auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
     {
-        1, 2, 3,  4,  5,  6,
-        7, 8, 9, 10, 11, 12
-    });
-
-    std::vector<int16_t> input1({1, 2, 3});
+        37, 38, 39,
+        40, 41, 42,
+        43, 44, 45,
+        46, 47, 48,
+        49, 50, 51,
+        52, 53, 54,
+    })
+    );
 
-    std::vector<int16_t> output(
-    {
-        1,  4,  9,  4, 10, 18,
-        7, 16, 27, 10, 22, 36
-    });
-
-    return MultiplicationQuantizeTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
-                                                                              memoryManager,
-                                                                              shape0,
-                                                                              input0,
-                                                                              1.0f,
-                                                                              0,
-                                                                              shape1,
-                                                                              input1,
-                                                                              1.0f,
-                                                                              0,
-                                                                              shape0,
-                                                                              output,
-                                                                              1.0f,
-                                                                              0);
-}
+    std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
+    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
 
-namespace
-{
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 4> SubtractionTestHelper(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const unsigned int shape0[4],
-    const std::vector<T>& values0,
-    float scale0,
-    int32_t offset0,
-    const unsigned int shape1[4],
-    const std::vector<T> & values1,
-    float scale1,
-    int32_t offset1,
-    const unsigned int outShape[4],
-    const std::vector<T> & outValues,
-    float outScale,
-    int32_t outOffset)
-{
-    armnn::TensorInfo inputTensorInfo0(4, shape0, ArmnnType);
-    armnn::TensorInfo inputTensorInfo1(4, shape1, ArmnnType);
-    armnn::TensorInfo outputTensorInfo(4, outShape, ArmnnType);
+    std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
+    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
 
-    inputTensorInfo0.SetQuantizationScale(scale0);
-    inputTensorInfo0.SetQuantizationOffset(offset0);
 
-    inputTensorInfo1.SetQuantizationScale(scale1);
-    inputTensorInfo1.SetQuantizationOffset(offset1);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-    outputTensorInfo.SetQuantizationScale(outScale);
-    outputTensorInfo.SetQuantizationOffset(outOffset);
+    bool subTensorsSupported = workloadFactory.SupportsSubTensors();
 
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
+        subTensorsSupported ?
+            workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
+            workloadFactory.CreateTensorHandle(inputTensorInfo1);
 
-    LayerTestResult<T, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
+        subTensorsSupported ?
+            workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
+            workloadFactory.CreateTensorHandle(inputTensorInfo2);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-    armnn::SubtractionQueueDescriptor data;
+    armnn::ConcatQueueDescriptor data;
     armnn::WorkloadInfo info;
-    AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
-    AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
+    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
+    data.m_ViewOrigins.push_back(window1);
+    data.m_ViewOrigins.push_back(window2);
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
 
-    inputHandle0->Allocate();
     inputHandle1->Allocate();
+    inputHandle2->Allocate();
     outputHandle->Allocate();
 
-    CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
-    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
+    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
 
     workload->PostAllocationConfigure();
     workload->Execute();
 
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
 
-    return result;
+    return ret;
 }
-} // anonymous namespace
 
-LayerTestResult<uint8_t, 4> SubtractionUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+LayerTestResult<uint16_t, 3> ConcatUint16Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    const unsigned int shape0[] = { 1, 1, 2, 2 };
-    const unsigned int shape1[] = { 1, 1, 2, 2 };
+    unsigned int outputWidth = 3;
+    unsigned int outputHeight = 6;
+    unsigned int outputChannels = 3;
+
+    unsigned int inputWidth1 = 3;
+    unsigned int inputHeight1 = 6;
+    unsigned int inputChannels1 = 2;
 
-    std::vector<uint8_t> input0({ 10, 12, 14, 16 });
-    std::vector<uint8_t> input1({ 1, 2, 1, 2 });
-    std::vector<uint8_t> output({ 3, 3, 5, 5 });
+    unsigned int inputWidth2 = 3;
+    unsigned int inputHeight2 = 6;
+    unsigned int inputChannels2 = 1;
 
-    return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
-                                                                   memoryManager,
-                                                                   shape0, input0, 0.5f, 2,
-                                                                   shape1, input1, 1.0f, 0,
-                                                                   shape0, output, 1.0f, 0);
-}
+    // Defines the tensor descriptors.
+    armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
+    armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
+    armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
 
-LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 1, 2, 2 };
-    const unsigned int shape1[] = { 1, 1, 1, 1 };
+    // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
+    const float scale = 0.13497836f;
+    const int32_t offset = -7;
 
-    std::vector<uint8_t> input0({ 10, 12, 14, 16 });
-    std::vector<uint8_t> input1({ 2 });
-    std::vector<uint8_t> output({ 5, 6, 7, 8 });
+    outputTensorInfo.SetQuantizationScale(scale);
+    outputTensorInfo.SetQuantizationOffset(offset);
+    inputTensorInfo1.SetQuantizationScale(scale);
+    inputTensorInfo1.SetQuantizationOffset(offset);
+    inputTensorInfo2.SetQuantizationScale(scale);
+    inputTensorInfo2.SetQuantizationOffset(offset);
 
-    return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
-                                                                   memoryManager,
-                                                                   shape0, input0, 0.5f, 2,
-                                                                   shape1, input1, 1.0f, 0,
-                                                                   shape0, output, 1.0f, 3);
-}
+    LayerTestResult<uint16_t, 3> ret(outputTensorInfo);
 
-LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 1, 2, 2 };
-    const unsigned int shape1[] = { 1, 1, 2, 1 };
+    ret.outputExpected = MakeTensor<uint16_t, 3>(outputTensorInfo, std::vector<uint16_t>(
+    {
+        1, 2, 3,
+        4, 5, 6,
+        7, 8, 9,
+        10, 11, 12,
+        13, 14, 15,
+        16, 17, 18,
 
-    std::vector<uint8_t> input0({ 10, 12, 14, 16 });
-    std::vector<uint8_t> input1({ 2, 1 });
-    std::vector<uint8_t> output({ 8, 11, 12, 15 });
+        19, 20, 21,
+        22, 23, 24,
+        25, 26, 27,
+        28, 29, 30,
+        31, 32, 33,
+        34, 35, 36,
 
-    return SubtractionTestHelper<armnn::DataType::QuantisedAsymm8>(workloadFactory,
-                                                                   memoryManager,
-                                                                   shape0, input0, 1.0f, 0,
-                                                                   shape1, input1, 1.0f, 0,
-                                                                   shape0, output, 1.0f, 0);
-}
+        37, 38, 39,
+        40, 41, 42,
+        43, 44, 45,
+        46, 47, 48,
+        49, 50, 51,
+        52, 53, 54,
+    }));
 
-LayerTestResult<float, 4> SubtractionTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 1, 2, 2 };
-    const unsigned int shape1[] = { 1, 1, 2, 2 };
+    auto input1 = MakeTensor<uint16_t, 3>(inputTensorInfo1, std::vector<uint16_t>(
+    {
+        1, 2, 3,
+        4, 5, 6,
+        7, 8, 9,
+        10, 11, 12,
+        13, 14, 15,
+        16, 17, 18,
 
-    std::vector<float> input0({ 1,  2, 3, 4 });
-    std::vector<float> input1({ 1, -1, 0, 2 });
-    std::vector<float> output({ 0,  3, 3, 2 });
+        19, 20, 21,
+        22, 23, 24,
+        25, 26, 27,
+        28, 29, 30,
+        31, 32, 33,
+        34, 35, 36,
+    }));
 
-    return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
-                                                           memoryManager,
-                                                           shape0, input0, 1.0f, 0,
-                                                           shape1, input1, 1.0f, 0,
-                                                           shape0, output, 1.0f, 0);
-}
+    auto input2 = MakeTensor<uint16_t, 3>(inputTensorInfo2, std::vector<uint16_t>(
+    {
+        37, 38, 39,
+        40, 41, 42,
+        43, 44, 45,
+        46, 47, 48,
+        49, 50, 51,
+        52, 53, 54,
+    }));
 
-LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 1, 2, 2 };
-    const unsigned int shape1[] = { 1, 1, 1, 1 };
+    std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
+    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
 
-    std::vector<float> input0({ 1,  2, 3, 4 });
-    std::vector<float> input1({ 10 });
-    std::vector<float> output({ -9,  -8, -7, -6 });
+    std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
+    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
 
-    return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
-                                                           memoryManager,
-                                                           shape0, input0, 1.0f, 0,
-                                                           shape1, input1, 1.0f, 0,
-                                                           shape0, output, 1.0f, 0);
-}
 
-LayerTestResult<float, 4> SubtractionBroadcastTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 1, 2, 2 };
-    const unsigned int shape1[] = { 1, 1, 1, 2 };
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-    std::vector<float> input0({ 1,  2, 3, 4 });
-    std::vector<float> input1({ 10, -5 });
-    std::vector<float> output({ -9,  7, -7, 9 });
+    bool subTensorsSupported = workloadFactory.SupportsSubTensors();
 
-    return SubtractionTestHelper<armnn::DataType::Float32>(workloadFactory,
-                                                           memoryManager,
-                                                           shape0, input0, 1.0f, 0,
-                                                           shape1, input1, 1.0f, 0,
-                                                           shape0, output, 1.0f, 0);
-}
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
+            subTensorsSupported ?
+            workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
+            workloadFactory.CreateTensorHandle(inputTensorInfo1);
 
-LayerTestResult<int16_t, 4> SubtractionInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 1, 2, 2 };
-    const unsigned int shape1[] = { 1, 1, 2, 2 };
+    std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
+            subTensorsSupported ?
+            workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
+            workloadFactory.CreateTensorHandle(inputTensorInfo2);
 
-    std::vector<int16_t> input0({ 10, 12, 14, 16 });
-    std::vector<int16_t> input1({ 1, 2, 1, 2 });
-    std::vector<int16_t> output({ 3, 3, 5, 5 });
 
-    return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
-                                                                   memoryManager,
-                                                                   shape0, input0, 0.5f, 0,
-                                                                   shape1, input1, 1.0f, 0,
-                                                                   shape0, output, 1.0f, 0);
-}
+    armnn::ConcatQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
 
-LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 1, 2, 2 };
-    const unsigned int shape1[] = { 1, 1, 1, 1 };
+    data.m_ViewOrigins.push_back(window1);
+    data.m_ViewOrigins.push_back(window2);
 
-    std::vector<int16_t> input0({ 10, 12, 14, 16 });
-    std::vector<int16_t> input1({ 2 });
-    std::vector<int16_t> output({ 3, 4, 5, 6 });
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
 
-    return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
-                                                                   memoryManager,
-                                                                   shape0, input0, 0.5f, 0,
-                                                                   shape1, input1, 1.0f, 0,
-                                                                   shape0, output, 1.0f, 0);
-}
+    inputHandle1->Allocate();
+    inputHandle2->Allocate();
+    outputHandle->Allocate();
 
-LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    const unsigned int shape0[] = { 1, 1, 2, 2 };
-    const unsigned int shape1[] = { 1, 1, 2, 1 };
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
+    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
 
-    std::vector<int16_t> input0({ 10, 12, 14, 16 });
-    std::vector<int16_t> input1({ 2, 1 });
-    std::vector<int16_t> output({ 8, 11, 12, 15 });
+    CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
 
-    return SubtractionTestHelper<armnn::DataType::QuantisedSymm16>(workloadFactory,
-                                                                   memoryManager,
-                                                                   shape0, input0, 1.0f, 0,
-                                                                   shape1, input1, 1.0f, 0,
-                                                                   shape0, output, 1.0f, 0);
+    return ret;
 }
 
 LayerTestResult<float, 4> BatchNormTest(
@@ -10021,111 +7442,6 @@ LayerTestResult<float, 2> FullyConnectedLargeTest(
     return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
 }
 
-LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    // Create Initial Tensor
-    // 1, 2, 3
-    // 4, 5, 6
-    // 7, 8, 9
-
-    armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
-    armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
-
-    boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
-                                                            {1, 2, 3,
-                                                             4, 5, 6,
-                                                             7, 8, 9
-                                                            });
-
-    std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
-            workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
-            workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
-
-    // Apply MaxPool poolSize = 1x1, stride=2x2
-    // Result =
-    // 1, 3
-    // 7, 9
-    armnn::Pooling2dDescriptor descriptor;
-    descriptor.m_PoolHeight = 1;
-    descriptor.m_PoolWidth = 1;
-    descriptor.m_StrideX = 2;
-    descriptor.m_StrideY = 2;
-    descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
-
-    armnn::Pooling2dQueueDescriptor queueDescriptor;
-    queueDescriptor.m_Parameters = descriptor;
-    armnn::WorkloadInfo workloadInfo;
-    AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
-    AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
-
-    // Create the MaxPool
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
-
-    //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
-    auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
-    boost::multi_array<float, 4> resultMaxPool;
-    resultMaxPool.resize(shape);
-
-
-    // Create addition with another tensor the same size
-    // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
-    // with the initial tensor.
-    // 12, 16
-    // 24, 28
-
-    armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
-    armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
-
-    boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
-                                                                    {12, 16,
-                                                                     24, 28,
-                                                                    });
-
-    // Expected output tensor after MaxPool and Addition.
-    LayerTestResult<float,4> addRet(addOutputTensorInfo);
-    addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
-            {
-                    13, 19,
-                    31, 37
-            }));
-
-    std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
-
-    armnn::AdditionQueueDescriptor data;
-    armnn::WorkloadInfo info;
-
-    // Add the output of the MaxPool and the new tensor
-    AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
-    AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
-    AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
-
-    poolingInputHandle->Allocate();
-    poolingOutputHandle->Allocate();
-    addInputHandle->Allocate();
-    addOutputHandle->Allocate();
-
-    CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
-    CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
-
-    CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
-    CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-    addWorkload->PostAllocationConfigure();
-    addWorkload->Execute();
-
-    CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
-
-    return addRet;
-}
-
 LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
index 235c5dc..bb21202 100644 (file)
@@ -7,6 +7,17 @@
 #include <armnn/ArmNN.hpp>
 #include <armnn/Tensor.hpp>
 
+#include <backendsCommon/test/layerTests/LayerTestResult.hpp>
+
+#include <backendsCommon/test/layerTests/AdditionTestImpl.hpp>
+#include <backendsCommon/test/layerTests/DivisionTestImpl.hpp>
+#include <backendsCommon/test/layerTests/EqualTestImpl.hpp>
+#include <backendsCommon/test/layerTests/GreaterTestImpl.hpp>
+#include <backendsCommon/test/layerTests/MaximumTestImpl.hpp>
+#include <backendsCommon/test/layerTests/MinimumTestImpl.hpp>
+#include <backendsCommon/test/layerTests/MultiplicationTestImpl.hpp>
+#include <backendsCommon/test/layerTests/SubtractionTestImpl.hpp>
+
 #include <Half.hpp>
 #include "TensorCopyUtils.hpp"
 #include "WorkloadTestUtils.hpp"
@@ -32,38 +43,6 @@ namespace armnn
 class IWorkloadFactory;
 }
 
-template <std::size_t n>
-boost::array<unsigned int, n> GetTensorShapeAsArray(const armnn::TensorInfo& tensorInfo)
-{
-    BOOST_ASSERT_MSG(n == tensorInfo.GetNumDimensions(),
-        "Attempting to construct a shape array of mismatching size");
-
-    boost::array<unsigned int, n> shape;
-    for (unsigned int i = 0; i < n; i++)
-    {
-        shape[i] = tensorInfo.GetShape()[i];
-    }
-    return shape;
-}
-
-template <typename T, std::size_t n>
-struct LayerTestResult
-{
-    LayerTestResult(const armnn::TensorInfo& outputInfo)
-    {
-        auto shape( GetTensorShapeAsArray<n>(outputInfo) );
-        output.resize(shape);
-        outputExpected.resize(shape);
-        supported = true;
-        compareBoolean = false;
-    }
-
-    boost::multi_array<T, n> output;
-    boost::multi_array<T, n> outputExpected;
-    bool supported;
-    bool compareBoolean;
-};
-
 LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -727,39 +706,6 @@ LayerTestResult<float, 3> ConcatTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
-LayerTestResult<float, 4> AdditionTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<float, 5> Addition5dTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<float, 4> AdditionBroadcastTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<float, 4> CompareAdditionTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::IWorkloadFactory& refWorkloadFactory);
-
-LayerTestResult<float, 4> SubtractionTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<float, 4> SubtractionBroadcastTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
 LayerTestResult<float, 4> CompareActivationTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -767,43 +713,6 @@ LayerTestResult<float, 4> CompareActivationTest(
     armnn::ActivationFunction f,
     unsigned int batchSize);
 
-LayerTestResult<float, 4> DivisionTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<float, 4> DivisionByZeroTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<float, 4> MultiplicationTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<float, 5> Multiplication5dTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<float, 4> CompareMultiplicationTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    armnn::IWorkloadFactory& refWorkloadFactory);
-
 LayerTestResult<float, 4> BatchNormTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -1177,54 +1086,6 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
-LayerTestResult<uint8_t, 4> AdditionUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<int16_t, 4> AdditionInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> SubtractionUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<int16_t, 4> SubtractionInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
 LayerTestResult<uint8_t, 4> CompareActivationUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -1243,54 +1104,6 @@ LayerTestResult<uint8_t, 2> CompareSoftmaxUint8Test(
     armnn::IWorkloadFactory& refWorkloadFactory,
     float beta);
 
-LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<int16_t, 4> MultiplicationInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> DivisionUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<int16_t, 4> DivisionInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
 LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -1417,54 +1230,6 @@ LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor);
 
-LayerTestResult<uint8_t, 4> EqualSimpleTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> EqualUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> GreaterSimpleTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> GreaterUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
 LayerTestResult<float, 2> FullyConnectedLargeTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -1647,34 +1412,6 @@ LayerTestResult<T, 3> MeanVts3Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
-LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
-    armnn::IWorkloadFactory & workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager);
-
-LayerTestResult<int16_t , 4> MinimumInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
 LayerTestResult<float, 4> SpaceToBatchNdSimpleFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
new file mode 100644 (file)
index 0000000..c6d3982
--- /dev/null
@@ -0,0 +1,617 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "AdditionTestImpl.hpp"
+
+#include "ElementwiseTestImpl.hpp"
+
+template<>
+std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::AdditionQueueDescriptor>(
+    const armnn::IWorkloadFactory& workloadFactory,
+    const armnn::WorkloadInfo& info,
+    const armnn::AdditionQueueDescriptor& descriptor)
+{
+    return workloadFactory.CreateAddition(descriptor, info);
+}
+
+LayerTestResult<float,4> AdditionTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int batchSize = 2u;
+    unsigned int channels  = 2u;
+    unsigned int height    = 2u;
+    unsigned int width     = 3u;
+
+    unsigned int shape[] = { batchSize, channels, height, width };
+
+    std::vector<float> input1 =
+    {
+        0.0f, 2.0f, 1.0f,
+        0.2f, 1.0f, 2.0f,
+
+        1.0f, 2.0f, 1.0f,
+        0.2f, 1.0f, 2.0f,
+
+        0.0f, 2.0f, 1.0f,
+        4.2f, 1.0f, 2.0f,
+
+        0.0f, 0.0f, 1.0f,
+        0.2f, 1.0f, 2.0f,
+    };
+
+    std::vector<float> input2 =
+    {
+        1.0f, 2.0f,  1.0f,
+        0.0f, 1.0f,  2.0f,
+
+        1.0f, 2.0f, -2.0f,
+        0.2f, 1.0f,  2.0f,
+
+        0.0f, 2.0f,  1.0f,
+        4.2f, 0.0f, -3.0f,
+
+        0.0f, 0.0f,  1.0f,
+        0.7f, 1.0f,  5.0f,
+    };
+
+
+    std::vector<float> output
+    {
+        1.0f, 4.0f,  2.0f,
+        0.2f, 2.0f,  4.0f,
+
+        2.0f, 4.0f, -1.0f,
+        0.4f, 2.0f,  4.0f,
+
+        0.0f, 4.0f,  2.0f,
+        8.4f, 1.0f, -1.0f,
+
+        0.0f, 0.0f,  2.0f,
+        0.9f, 2.0f,  7.0f,
+    };
+
+    return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input1,
+        shape,
+        input2,
+        shape,
+        output);
+}
+
+LayerTestResult<float, 5> Addition5dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int depth     = 2u;
+    unsigned int batchSize = 2u;
+    unsigned int channels  = 2u;
+    unsigned int height    = 2u;
+    unsigned int width     = 3u;
+
+    unsigned int shape[] = { depth, batchSize, channels, height, width };
+
+    std::vector<float> input1 =
+    {
+        2.6f, 4.0f, 4.4f,  2.7f, 4.6f, 2.8f,
+        2.3f, 1.9f, 3.4f,  2.9f, 2.2f, 4.5f,
+
+        2.8f, 1.9f, 2.3f,  2.6f, 4.7f, 3.5f,
+        0.4f, 1.5f, 2.1f,  0.7f, 5.0f, 1.1f,
+
+
+        1.0f, 2.7f, 0.0f,  0.6f, 0.8f, 0.9f,
+        1.0f, 2.6f, 0.4f,  3.8f, 0.4f, 0.8f,
+
+        0.5f, 4.3f, 3.1f,  4.4f, 0.7f, 1.4f,
+        0.4f, 4.4f, 0.7f,  0.6f, 4.7f, 1.2f,
+
+    };
+
+    std::vector<float> input2 =
+    {
+        4.4f, 3.0f, 1.0f,  0.0f, 3.9f, 3.1f,
+        1.7f, 2.9f, 1.3f,  0.4f, 0.4f, 4.3f,
+
+        4.5f, 0.2f, 2.2f,  4.1f, 3.9f, 3.0f,
+        0.1f, 2.5f, 4.1f,  4.6f, 1.5f, 0.0f,
+
+
+        0.5f, 4.9f, 2.5f,  1.5f, 3.4f, 4.5f,
+        2.0f, 3.0f, 4.9f,  1.6f, 2.4f, 3.4f,
+
+        3.6f, 1.8f, 1.3f,  2.6f, 2.1f, 4.8f,
+        2.0f, 4.3f, 4.0f,  0.2f, 0.6f, 4.4f,
+    };
+
+    std::vector<float> output =
+    {
+        7.0f, 7.0f, 5.4f,  2.7f, 8.5f, 5.9f,
+        4.0f, 4.8f, 4.7f,  3.3f, 2.6f, 8.8f,
+
+        7.3f, 2.1f, 4.5f,  6.7f, 8.6f, 6.5f,
+        0.5f, 4.0f, 6.2f,  5.3f, 6.5f, 1.1f,
+
+
+        1.5f, 7.6f, 2.5f,  2.1f, 4.2f, 5.4f,
+        3.0f, 5.6f, 5.3f,  5.4f, 2.8f, 4.2f,
+
+        4.1f, 6.1f, 4.4f,  7.0f, 2.8f, 6.2f,
+        2.4f, 8.7f, 4.7f,  0.8f, 5.3f, 5.6f,
+    };
+
+    return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input1,
+        shape,
+        input2,
+        shape,
+        output);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> AdditionBroadcastTestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
+{
+    armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
+    armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
+    armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
+
+    if (armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo1.SetQuantizationScale(qScale);
+        inputTensorInfo1.SetQuantizationOffset(qOffset);
+        inputTensorInfo2.SetQuantizationScale(qScale);
+        inputTensorInfo2.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
+    {
+        0.0f,
+        1.0f,
+
+        2.0f,
+        3.0f,
+
+        4.0f,
+        5.0f,
+    }));
+
+    auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
+    {
+        0.5f, 1.5f, 2.5f,
+        3.5f, 4.5f, 5.5f,
+    }));
+
+    LayerTestResult<T,4> ret(outputTensorInfo);
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
+    {
+        0.5f, 1.5f, 2.5f,
+        4.5f, 5.5f, 6.5f,
+
+        2.5f, 3.5f, 4.5f,
+        6.5f, 7.5f, 8.5f,
+
+        4.5f, 5.5f, 6.5f,
+        8.5f, 9.5f, 10.5f,
+    }));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::AdditionQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
+
+    inputHandle1->Allocate();
+    inputHandle2->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
+{
+    armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
+    armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
+    armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
+
+    if (armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo1.SetQuantizationScale(qScale);
+        inputTensorInfo1.SetQuantizationOffset(qOffset);
+        inputTensorInfo2.SetQuantizationScale(qScale);
+        inputTensorInfo2.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
+    {
+            0.0f,  1.0f,  2.0f,
+            3.0f,  4.0f,  5.0f,
+            6.0f,  7.0f,  8.0f,
+            9.0f, 10.0f, 11.0f,
+        12.0f, 13.0f, 14.0f,
+        15.0f, 16.0f, 17.0f,
+    }));
+
+    auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
+    {
+        0.5f,
+    }));
+
+    LayerTestResult<T,4> ret(outputTensorInfo);
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
+    {
+            0.5f,  1.5f,  2.5f,
+            3.5f,  4.5f,  5.5f,
+            6.5f,  7.5f,  8.5f,
+            9.5f, 10.5f, 11.5f,
+        12.5f, 13.5f, 14.5f,
+        15.5f, 16.5f, 17.5f,
+    }));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::AdditionQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
+
+    inputHandle1->Allocate();
+    inputHandle2->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
+
+LayerTestResult<float, 4> AdditionBroadcastTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
+        workloadFactory, memoryManager, 0.0f, 0);
+}
+
+LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
+        workloadFactory, memoryManager, 2.f, 0);
+}
+
+LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return AdditionBroadcastTestImpl<armnn::DataType::QuantisedSymm16>(
+        workloadFactory, memoryManager, 2.f, 0);
+}
+
+LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
+        workloadFactory, memoryManager, 0.0f, 0);
+}
+
+LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
+        workloadFactory, memoryManager, 0.1333333f, 128);
+}
+
+LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedSymm16>(
+        workloadFactory, memoryManager, 0.1333333f, 0);
+}
+
+LayerTestResult<uint8_t, 4> AdditionUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 2, 2, 3 };
+
+    std::vector<uint8_t> input0(
+    {
+        63,  35,  77,  70,  56, 112, //  420, 224,  518,  469,  371, 763
+        203,  28, 252, 168, 245,  91  // 1400, 175, 1743, 1155, 1694, 616
+    });
+
+    std::vector<uint8_t> input1(
+    {
+        21,   7, 175, 231, 175, 210, // 126,   28, 1204, 1596, 1204, 1449
+        126, 161,  63,  21, 105, 126  // 861, 1106,  420,  126,  714,  861
+    });
+
+    std::vector<uint8_t> output(
+    {
+        81,  39, 249, 255, 228, 255, //  546,  252, 1722, 2065(clamped), 1575, 2212(clamped)
+        255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
+    });
+
+    return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        7.0f,
+        3,
+        shape1,
+        input1,
+        7.0f,
+        3,
+        shape0,
+        output,
+        7.0f,
+        3);
+}
+
+LayerTestResult<int16_t, 4> AdditionInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 2, 2, 3 };
+
+    std::vector<int16_t> input0 =
+    {
+        63,  35,  77,  70,  56, 112, //  441, 245,  539,  490,  392, 184
+        203,  28, 252, 168, 245,  91  // 1421, 196, 1764, 1176, 1715, 637
+    };
+
+    std::vector<int16_t> input1 =
+    {
+        21,   7, 175, 231, 175, 210, // 126,   28, 1204, 1596, 1204, 1449
+        126, 161,  63,  21, 105, 126  // 861, 1106,  420,  126,  714,  861
+    };
+
+    std::vector<int16_t> output =
+    {
+        84,  42, 252, 301, 231, 322, //  588,  294, 1764, 2107(clamped), 1617, 2254(clamped)
+        329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
+    };
+
+    return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        7.0f,
+        0,
+        shape1,
+        input1,
+        7.0f,
+        0,
+        shape0,
+        output,
+        7.0f,
+        0);
+}
+
+LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    // Create Initial Tensor
+    // 1, 2, 3
+    // 4, 5, 6
+    // 7, 8, 9
+
+    armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
+    armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
+
+    boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
+                                                            {1, 2, 3,
+                                                             4, 5, 6,
+                                                             7, 8, 9
+                                                            });
+
+    std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
+            workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
+            workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
+
+    // Apply MaxPool poolSize = 1x1, stride=2x2
+    // Result =
+    // 1, 3
+    // 7, 9
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolHeight = 1;
+    descriptor.m_PoolWidth = 1;
+    descriptor.m_StrideX = 2;
+    descriptor.m_StrideY = 2;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
+
+    armnn::Pooling2dQueueDescriptor queueDescriptor;
+    queueDescriptor.m_Parameters = descriptor;
+    armnn::WorkloadInfo workloadInfo;
+    AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
+    AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
+
+    // Create the MaxPool
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
+
+    //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
+    auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
+    boost::multi_array<float, 4> resultMaxPool;
+    resultMaxPool.resize(shape);
+
+
+    // Create addition with another tensor the same size
+    // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
+    // with the initial tensor.
+    // 12, 16
+    // 24, 28
+
+    armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
+    armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
+
+    boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
+                                                                    {12, 16,
+                                                                     24, 28,
+                                                                    });
+
+    // Expected output tensor after MaxPool and Addition.
+    LayerTestResult<float,4> addRet(addOutputTensorInfo);
+    addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
+    {
+        13, 19,
+        31, 37
+    }));
+
+    std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
+
+    armnn::AdditionQueueDescriptor data;
+    armnn::WorkloadInfo info;
+
+    // Add the output of the MaxPool and the new tensor
+    AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
+    AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
+    AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
+
+    poolingInputHandle->Allocate();
+    poolingOutputHandle->Allocate();
+    addInputHandle->Allocate();
+    addOutputHandle->Allocate();
+
+    CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
+    CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
+
+    CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
+    CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+    addWorkload->PostAllocationConfigure();
+    addWorkload->Execute();
+
+    CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
+
+    return addRet;
+}
+
+LayerTestResult<float,4> CompareAdditionTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    armnn::IWorkloadFactory& refWorkloadFactory)
+{
+    unsigned int batchSize = 4;
+    unsigned int channels  = 1;
+    unsigned int height    = 2;
+    unsigned int width     = 3;
+
+    armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int shape[] = {batchSize, channels, height, width};
+
+    inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+
+    auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
+    auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
+
+    LayerTestResult<float,4> ret(outputTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
+    std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::AdditionQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    armnn::AdditionQueueDescriptor refData = data;
+    armnn::WorkloadInfo refInfo = info;
+    SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
+    SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
+    SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
+
+    inputHandle1->Allocate();
+    inputHandle2->Allocate();
+    outputHandle->Allocate();
+    inputHandle1Ref->Allocate();
+    inputHandle2Ref->Allocate();
+    outputHandleRef->Allocate();
+
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+    workloadRef->PostAllocationConfigure();
+    workloadRef->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+    CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
+
+    return ret;
+}
diff --git a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.hpp
new file mode 100644 (file)
index 0000000..60ef975
--- /dev/null
@@ -0,0 +1,60 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+LayerTestResult<float, 4> AdditionTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 5> Addition5dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> AdditionBroadcastTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> AdditionUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> AdditionInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> CompareAdditionTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    armnn::IWorkloadFactory& refWorkloadFactory);
diff --git a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
new file mode 100644 (file)
index 0000000..0316ea1
--- /dev/null
@@ -0,0 +1,350 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "DivisionTestImpl.hpp"
+
+#include "ElementwiseTestImpl.hpp"
+
+template<>
+std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::DivisionQueueDescriptor>(
+    const armnn::IWorkloadFactory& workloadFactory,
+    const armnn::WorkloadInfo& info,
+    const armnn::DivisionQueueDescriptor& descriptor)
+{
+    return workloadFactory.CreateDivision(descriptor, info);
+}
+
+LayerTestResult<float, 4> DivisionByZeroTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int width        = 2u;
+    const unsigned int height       = 2u;
+    const unsigned int channelCount = 2u;
+    const unsigned int batchSize    = 2u;
+
+    unsigned int shape[] = { batchSize, channelCount, height, width };
+
+    std::vector<float> input0 =
+    {
+         1.f,  1.f,  1.f,  1.f,  0.f, 0.f, 0.f, 0.f,
+        -1.f, -1.f, -1.f, -1.f,  5.f, 5.f, 5.f, 5.f
+    };
+
+    std::vector<float> input1 =
+    {
+        0.f, 0.f, -0.f, -0.f,  0.f, 0.f, -0.f, -0.f,
+        0.f, 0.f, -0.f, -0.f,  5.f, 5.f,  5.f,  5.f
+    };
+
+    std::vector<float> output =
+    {
+         INFINITY,  INFINITY, -INFINITY, -INFINITY,  NAN,  NAN, -NAN, -NAN,
+        -INFINITY, -INFINITY,  INFINITY,  INFINITY,    1,    1,    1,    1
+    };
+
+    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input0,
+        shape,
+        input1,
+        shape,
+        output);
+}
+
+LayerTestResult<float, 4> DivisionTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int width        = 2u;
+    const unsigned int height       = 2u;
+    const unsigned int channelCount = 2u;
+    const unsigned int batchSize    = 2u;
+
+    unsigned int shape[] = { batchSize, channelCount, height, width };
+
+    std::vector<float> input0 =
+    {
+        2.f, 2.f, 2.f, 2.f, 3.f, 3.f, 3.f, 3.f,
+        4.f, 4.f, 4.f, 4.f, 5.f, 5.f, 5.f, 5.f
+    };
+
+    std::vector<float> input1 =
+    {
+        1.f, 1.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f,
+        4.f, 4.f, 4.f, 4.f, 4.f, 4.f, 4.f, 4.f
+    };
+
+    std::vector<float> output =
+    {
+        2.f, 2.f, 2.f, 2.f, 1.50f, 1.50f, 1.50f, 1.50f,
+        1.f, 1.f, 1.f, 1.f, 1.25f, 1.25f, 1.25f, 1.25f
+    };
+
+    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input0,
+        shape,
+        input1,
+        shape,
+        output);
+}
+
+LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape0[] = { 1, 2, 2, 2 };
+    unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
+
+    std::vector<float> input1({ 2 });
+
+    std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
+
+    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape0[] = { 1, 3, 3, 2 };
+    unsigned int shape1[] = { 1, 1, 1, 2 };
+
+    std::vector<float> input0 =
+    {
+         1.f,  4.f,  3.f,  8.f,  5.f, 12.f,
+         7.f, 16.f,  9.f, 20.f, 11.f, 24.f,
+        13.f, 28.f, 15.f, 32.f, 17.f, 36.f
+    };
+
+    std::vector<float> input1 = { 1.f, 2.f };
+
+    std::vector<float> output =
+    {
+         1.f,  2.f,  3.f,  4.f,  5.f,  6.f,
+         7.f,  8.f,  9.f, 10.f, 11.f, 12.f,
+        13.f, 14.f, 15.f, 16.f, 17.f, 18.f
+    };
+
+    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> DivisionUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int width        = 2u;
+    const unsigned int height       = 2u;
+    const unsigned int channelCount = 2u;
+    const unsigned int batchSize    = 2u;
+
+    unsigned int shape[] = { batchSize, channelCount, height, width };
+
+    std::vector<uint8_t> input0 =
+    {
+        2, 2, 2, 2,  3, 3, 3, 3,
+        4, 4, 4, 4,  5, 5, 5, 5
+    };
+
+    std::vector<uint8_t> input1 =
+    {
+        1, 1, 1, 1,  2, 2, 2, 2,
+        4, 4, 4, 4,  4, 4, 4, 4
+    };
+
+    std::vector<uint8_t> output =
+    {
+        8, 8, 8, 8,  6, 6, 6, 6,
+        4, 4, 4, 4,  5, 5, 5, 5
+    };
+
+    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input0,
+        shape,
+        input1,
+        shape,
+        output,
+        0.25f,
+        0);
+}
+
+LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape0[] = { 1, 2, 2, 2 };
+    unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<uint8_t> input0 = { 2, 4, 6, 8, 10, 12, 14, 16};
+
+    std::vector<uint8_t> input1 = { 2 };
+
+    std::vector<uint8_t> output = { 1, 2, 3, 4, 5, 6, 7, 8};
+
+    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape0[] = { 1, 3, 3, 2 };
+    unsigned int shape1[] = { 1, 1, 1, 2 };
+
+    std::vector<uint8_t> input0 =
+    {
+        1,  4,    3,  8,    5, 12,
+        7, 16,    9, 20,   11, 24,
+       13, 28,   15, 32,   17, 36
+    };
+
+    std::vector<uint8_t> input1 = { 1, 2 };
+
+    std::vector<uint8_t> output =
+    {
+        1,  2,    3,  4,    5,  6,
+        7,  8,    9, 10,   11, 12,
+       13, 14,   15, 16,   17, 18
+    };
+
+    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<int16_t,4> DivisionInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape[] = { 2, 2, 2, 2 };
+
+    std::vector<int16_t> input0 =
+    {
+        2, 2, 2, 2,  3, 3, 3, 3,
+        4, 4, 4, 4,  5, 5, 5, 5
+    };
+
+    std::vector<int16_t> input1 =
+    {
+        1, 1, 1, 1,  2, 2, 2, 2,
+        4, 4, 4, 4,  4, 4, 4, 4
+    };
+
+    std::vector<int16_t> output =
+    {
+        8, 8, 8, 8,  6, 6, 6, 6,
+        4, 4, 4, 4,  5, 5, 5, 5
+    };
+
+    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input0,
+        shape,
+        input1,
+        shape,
+        output,
+        0.25f,
+        0);
+}
+
+LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape0[] = { 1, 2, 2, 2 };
+    unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<int16_t> input0 = { 2, 4, 6, 8, 10, 12, 14, 16};
+
+    std::vector<int16_t> input1 = { 2 };
+
+    std::vector<int16_t> output = { 1, 2, 3, 4, 5, 6, 7, 8};
+
+    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape0[] = { 1, 3, 3, 2 };
+    unsigned int shape1[] = { 1, 1, 1, 2 };
+
+    std::vector<int16_t> input0 =
+    {
+         1,  4,    3,  8,    5, 12,
+         7, 16,    9, 20,   11, 24,
+        13, 28,   15, 32,   17, 36
+    };
+
+    std::vector<int16_t> input1 = { 1, 2 };
+
+    std::vector<int16_t> output =
+    {
+         1,  2,    3,  4,    5,  6,
+         7,  8,    9, 10,   11, 12,
+        13, 14,   15, 16,   17, 18
+    };
+
+    return ElementwiseTestHelper<4, armnn::DivisionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.hpp
new file mode 100644 (file)
index 0000000..e06b494
--- /dev/null
@@ -0,0 +1,51 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+LayerTestResult<float, 4> DivisionByZeroTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> DivisionTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> DivisionBroadcast1ElementTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> DivisionUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t,4> DivisionInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> DivisionBroadcast1ElementInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> DivisionBroadcast1DVectorInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp
new file mode 100644 (file)
index 0000000..1bb1348
--- /dev/null
@@ -0,0 +1,206 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+#include <ResolveType.hpp>
+
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+#include <memory>
+
+template<typename DescriptorType>
+std::unique_ptr<armnn::IWorkload> CreateWorkload(
+    const armnn::IWorkloadFactory& workloadFactory,
+    const armnn::WorkloadInfo& info,
+    const DescriptorType& descriptor)
+{
+    return CreateWorkload(workloadFactory, info, descriptor);
+}
+
+template <std::size_t NumDims,
+          typename Descriptor,
+          armnn::DataType ArmnnTypeInput,
+          armnn::DataType ArmnnTypeOutput,
+          typename TInput  = armnn::ResolveType<ArmnnTypeInput>,
+          typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
+LayerTestResult<TOutput, NumDims> ElementwiseTestHelper(
+    armnn::IWorkloadFactory & workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
+    const unsigned int shape0[NumDims],
+    std::vector<TInput> values0,
+    float quantScale0,
+    int quantOffset0,
+    const unsigned int shape1[NumDims],
+    std::vector<TInput> values1,
+    float quantScale1,
+    int quantOffset1,
+    const unsigned int outShape[NumDims],
+    std::vector<TOutput> outValues,
+    float outQuantScale,
+    int outQuantOffset)
+{
+    armnn::TensorInfo inputTensorInfo0{NumDims, shape0, ArmnnTypeInput};
+    armnn::TensorInfo inputTensorInfo1{NumDims, shape1, ArmnnTypeInput};
+    armnn::TensorInfo outputTensorInfo{NumDims, outShape, ArmnnTypeOutput};
+
+    auto input0 = MakeTensor<TInput, NumDims>(inputTensorInfo0, values0);
+    auto input1 = MakeTensor<TInput, NumDims>(inputTensorInfo1, values1);
+
+    inputTensorInfo0.SetQuantizationScale(quantScale0);
+    inputTensorInfo0.SetQuantizationOffset(quantOffset0);
+
+    inputTensorInfo1.SetQuantizationScale(quantScale1);
+    inputTensorInfo1.SetQuantizationOffset(quantOffset1);
+
+    outputTensorInfo.SetQuantizationScale(outQuantScale);
+    outputTensorInfo.SetQuantizationOffset(outQuantOffset);
+
+    LayerTestResult<TOutput, NumDims> ret(outputTensorInfo);
+
+    if(ArmnnTypeOutput == armnn::DataType::Boolean)
+    {
+        ret.compareBoolean = true;
+    }
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    Descriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
+    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+    auto workload = CreateWorkload<Descriptor>(workloadFactory, info, data);
+
+    inputHandle0->Allocate();
+    inputHandle1->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle0.get(), input0.origin());
+    CopyDataToITensorHandle(inputHandle1.get(), input1.origin());
+
+    workload->PostAllocationConfigure();
+    ExecuteWorkload(*workload, memoryManager);
+
+    CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
+
+    ret.outputExpected = MakeTensor<TOutput, NumDims>(outputTensorInfo, outValues);
+    return ret;
+}
+
+template <std::size_t NumDims,
+          typename Descriptor,
+          armnn::DataType ArmnnType,
+          typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, NumDims> ElementwiseTestHelper(
+    armnn::IWorkloadFactory & workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
+    const unsigned int shape0[NumDims],
+    std::vector<T> values0,
+    float quantScale0,
+    int quantOffset0,
+    const unsigned int shape1[NumDims],
+    std::vector<T> values1,
+    float quantScale1,
+    int quantOffset1,
+    const unsigned int outShape[NumDims],
+    std::vector<T> outValues,
+    float outQuantScale,
+    int outQuantOffset)
+{
+    return ElementwiseTestHelper<NumDims, Descriptor, ArmnnType, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        values0,
+        quantScale0,
+        quantOffset0,
+        shape1,
+        values1,
+        quantScale1,
+        quantOffset1,
+        outShape,
+        outValues,
+        outQuantScale,
+        outQuantOffset);
+}
+
+template <std::size_t NumDims,
+          typename Descriptor,
+          armnn::DataType ArmnnTypeInput,
+          armnn::DataType ArmnnTypeOutput,
+          typename TInput  = armnn::ResolveType<ArmnnTypeInput>,
+          typename TOutput = armnn::ResolveType<ArmnnTypeOutput>>
+LayerTestResult<TOutput, NumDims> ElementwiseTestHelper(
+    armnn::IWorkloadFactory & workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
+    const unsigned int shape0[NumDims],
+    std::vector<TInput> values0,
+    const unsigned int shape1[NumDims],
+    std::vector<TInput> values1,
+    const unsigned int outShape[NumDims],
+    std::vector<TOutput> outValues,
+    float quantScale = 1.0f,
+    int quantOffset = 0)
+{
+    return ElementwiseTestHelper<NumDims, Descriptor, ArmnnTypeInput, ArmnnTypeOutput>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        values0,
+        quantScale,
+        quantOffset,
+        shape1,
+        values1,
+        quantScale,
+        quantOffset,
+        outShape,
+        outValues,
+        quantScale,
+        quantOffset);
+}
+
+template <std::size_t NumDims,
+          typename Descriptor,
+          armnn::DataType ArmnnType,
+          typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, NumDims> ElementwiseTestHelper(
+    armnn::IWorkloadFactory & workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
+    const unsigned int shape0[NumDims],
+    std::vector<T> values0,
+    const unsigned int shape1[NumDims],
+    std::vector<T> values1,
+    const unsigned int outShape[NumDims],
+    std::vector<T> outValues,
+    float quantScale = 1.0f,
+    int quantOffset = 0)
+{
+    return ElementwiseTestHelper<NumDims, Descriptor, ArmnnType, ArmnnType>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        values0,
+        shape1,
+        values1,
+        outShape,
+        outValues,
+        quantScale,
+        quantOffset);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/EqualTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/EqualTestImpl.cpp
new file mode 100644 (file)
index 0000000..fa72136
--- /dev/null
@@ -0,0 +1,187 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "EqualTestImpl.hpp"
+
+#include "ElementwiseTestImpl.hpp"
+
+template<>
+std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
+    const armnn::IWorkloadFactory& workloadFactory,
+    const armnn::WorkloadInfo& info,
+    const armnn::EqualQueueDescriptor& descriptor)
+{
+    return workloadFactory.CreateEqual(descriptor, info);
+}
+
+LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
+                                            const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int width        = 2u;
+    const unsigned int height       = 2u;
+    const unsigned int channelCount = 2u;
+    const unsigned int batchSize    = 2u;
+
+    unsigned int shape[] = { batchSize, channelCount, height, width };
+
+    std::vector<float> input0 =
+    {
+        1.f, 1.f, 1.f, 1.f,  5.f, 5.f, 5.f, 5.f,
+        3.f, 3.f, 3.f, 3.f,  4.f, 4.f, 4.f, 4.f
+    };
+
+    std::vector<float> input1({ 1, 1, 1, 1,  3, 3, 3, 3,
+                                5, 5, 5, 5,  4, 4, 4, 4 });
+
+    std::vector<uint8_t> output({ 1, 1, 1, 1,  0, 0, 0, 0,
+                                  0, 0, 0, 0,  1, 1, 1, 1 });
+
+    return ElementwiseTestHelper<4, armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input0,
+        shape,
+        input1,
+        shape,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape0[] = { 1, 2, 2, 2 };
+    std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
+
+    unsigned int shape1[] = { 1, 1, 1, 1 };
+    std::vector<float> input1({ 1 });
+
+    std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0, 0, 0});
+
+    return ElementwiseTestHelper<4, armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+    std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
+                                7, 8, 9, 10, 11, 12 });
+
+    std::vector<float> input1({ 1, 2, 3});
+
+    std::vector<uint8_t> output({ 1, 1, 1, 0, 0, 0,
+                                  0, 0, 0, 0, 0, 0 });
+
+    return ElementwiseTestHelper<4, armnn::EqualQueueDescriptor, armnn::DataType::Float32, armnn::DataType::Boolean>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> EqualUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape[] = { 2, 2, 2, 2 };
+
+    // See dequantized values to the right.
+    std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
+                                  3, 3, 3, 3, 7, 7, 7, 7 });
+
+    std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
+                                  3, 3, 3, 3, 5, 5, 5, 5 });
+
+    std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
+                                  1, 1, 1, 1, 0, 0, 0, 0 });
+
+    return ElementwiseTestHelper<4,
+                                 armnn::EqualQueueDescriptor,
+                                 armnn::DataType::QuantisedAsymm8,
+                                 armnn::DataType::Boolean>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input0,
+        shape,
+        input1,
+        shape,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
+                                  7, 8, 9, 10, 11, 12 });
+
+    std::vector<uint8_t> input1({ 1 });
+
+    std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
+                                  0, 0, 0, 0, 0, 0 });
+
+    return ElementwiseTestHelper<4,
+                                 armnn::EqualQueueDescriptor,
+                                 armnn::DataType::QuantisedAsymm8,
+                                 armnn::DataType::Boolean>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+    std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
+                                  7, 8, 9, 10, 11, 12 });
+
+    std::vector<uint8_t> input1({ 1, 1, 3});
+
+    std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
+                                  0, 0, 0, 0, 0, 0 });
+
+    return ElementwiseTestHelper<4,
+                                 armnn::EqualQueueDescriptor,
+                                 armnn::DataType::QuantisedAsymm8,
+                                 armnn::DataType::Boolean>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/EqualTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/EqualTestImpl.hpp
new file mode 100644 (file)
index 0000000..e9560b3
--- /dev/null
@@ -0,0 +1,34 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+LayerTestResult<uint8_t, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
+                                            const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> EqualBroadcast1ElementTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> EqualUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.cpp
new file mode 100644 (file)
index 0000000..b5bf560
--- /dev/null
@@ -0,0 +1,229 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GreaterTestImpl.hpp"
+
+#include "ElementwiseTestImpl.hpp"
+
+template<>
+std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
+    const armnn::IWorkloadFactory& workloadFactory,
+    const armnn::WorkloadInfo& info,
+    const armnn::GreaterQueueDescriptor& descriptor)
+{
+    return workloadFactory.CreateGreater(descriptor, info);
+}
+
+LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
+                                              const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int width        = 2u;
+    const unsigned int height       = 2u;
+    const unsigned int channelCount = 2u;
+    const unsigned int batchSize    = 2u;
+
+    unsigned int shape[] = { batchSize, channelCount, height, width };
+
+    std::vector<float> input0 =
+    {
+        1.f, 1.f, 1.f, 1.f,  5.f, 5.f, 5.f, 5.f,
+        3.f, 3.f, 3.f, 3.f,  4.f, 4.f, 4.f, 4.f
+    };
+
+    std::vector<float> input1 =
+    {
+        1.f, 1.f, 1.f, 1.f,  3.f, 3.f, 3.f, 3.f,
+        5.f, 5.f, 5.f, 5.f,  4.f, 4.f, 4.f, 4.f
+    };
+
+    std::vector<uint8_t> output =
+    {
+        0, 0, 0, 0,  1, 1, 1, 1,
+        0, 0, 0, 0,  0, 0, 0, 0
+    };
+
+    return ElementwiseTestHelper<4,
+                                 armnn::GreaterQueueDescriptor,
+                                 armnn::DataType::Float32,
+                                 armnn::DataType::Boolean>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input0,
+        shape,
+        input1,
+        shape,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape0[] = { 1, 2, 2, 2 };
+    unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<float> input0 = { 1, 2, 3, 4, 5, 6, 7, 8};
+    std::vector<float> input1 = { 1 };
+
+    std::vector<uint8_t> output = { 0, 1, 1, 1, 1, 1, 1, 1};
+
+    return ElementwiseTestHelper<4,
+                                 armnn::GreaterQueueDescriptor,
+                                 armnn::DataType::Float32,
+                                 armnn::DataType::Boolean>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+    std::vector<float> input0 =
+    {
+        1.0f, 2.9f, 2.1f,  4.0f,  5.0f,  6.0f,
+        7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f
+    };
+
+    std::vector<float> input1 = { 1.f, 3.f, 2.f };
+
+    std::vector<uint8_t> output =
+    {
+        0, 0, 1, 1, 1, 1,
+        1, 1, 1, 1, 1, 1
+    };
+
+    return ElementwiseTestHelper<4,
+                                 armnn::GreaterQueueDescriptor,
+                                 armnn::DataType::Float32,
+                                 armnn::DataType::Boolean>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> GreaterUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape[] = { 2, 2, 2, 2 };
+
+    // See dequantized values to the right.
+    std::vector<uint8_t> input0 =
+    {
+        1, 1, 1, 1, 6, 6, 6, 6,
+        3, 3, 3, 3, 5, 5, 5, 5
+    };
+
+    std::vector<uint8_t> input1 =
+    {
+        2, 2, 2, 2, 6, 6, 6, 6,
+        2, 2, 2, 2, 5, 5, 5, 5
+    };
+
+    std::vector<uint8_t> output =
+    {
+        0, 0, 0, 0, 0, 0, 0, 0,
+        1, 1, 1, 1, 0, 0, 0, 0
+    };
+
+    return ElementwiseTestHelper<4,
+                                 armnn::GreaterQueueDescriptor,
+                                 armnn::DataType::QuantisedAsymm8,
+                                 armnn::DataType::Boolean>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input0,
+        shape,
+        input1,
+        shape,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<uint8_t> input0 =
+    {
+        1, 2, 3,  4,  5,  6,
+        7, 8, 9, 10, 11, 12
+    };
+
+    std::vector<uint8_t> input1 = { 1 };
+
+    std::vector<uint8_t> output =
+    {
+        0, 1, 1, 1, 1, 1,
+        1, 1, 1, 1, 1, 1
+    };
+
+    return ElementwiseTestHelper<4,
+                                 armnn::GreaterQueueDescriptor,
+                                 armnn::DataType::QuantisedAsymm8,
+                                 armnn::DataType::Boolean>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+    std::vector<uint8_t> input0 =
+    {
+        1, 2, 3,  4,  5,  6,
+        7, 8, 9, 10, 11, 12
+    };
+
+    std::vector<uint8_t> input1 = { 1, 1, 3 };
+
+    std::vector<uint8_t> output =
+    {
+        0, 1, 0, 1, 1, 1,
+        1, 1, 1, 1, 1, 1
+    };
+
+    return ElementwiseTestHelper<4,
+                                 armnn::GreaterQueueDescriptor,
+                                 armnn::DataType::QuantisedAsymm8,
+                                 armnn::DataType::Boolean>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/GreaterTestImpl.hpp
new file mode 100644 (file)
index 0000000..39f3a39
--- /dev/null
@@ -0,0 +1,34 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+LayerTestResult<uint8_t, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
+                                              const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> GreaterUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp b/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp
new file mode 100644 (file)
index 0000000..c277d2d
--- /dev/null
@@ -0,0 +1,44 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Tensor.hpp>
+
+#include <boost/multi_array.hpp>
+
+#include <cstddef>
+
+template <std::size_t n>
+boost::array<unsigned int, n> GetTensorShapeAsArray(const armnn::TensorInfo& tensorInfo)
+{
+    BOOST_ASSERT_MSG(n == tensorInfo.GetNumDimensions(),
+        "Attempting to construct a shape array of mismatching size");
+
+    boost::array<unsigned int, n> shape;
+    for (unsigned int i = 0; i < n; i++)
+    {
+        shape[i] = tensorInfo.GetShape()[i];
+    }
+    return shape;
+}
+
+template <typename T, std::size_t n>
+struct LayerTestResult
+{
+    LayerTestResult(const armnn::TensorInfo& outputInfo)
+    {
+        auto shape( GetTensorShapeAsArray<n>(outputInfo) );
+        output.resize(shape);
+        outputExpected.resize(shape);
+        supported = true;
+        compareBoolean = false;
+    }
+
+    boost::multi_array<T, n> output;
+    boost::multi_array<T, n> outputExpected;
+    bool supported;
+    bool compareBoolean;
+};
diff --git a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
new file mode 100644 (file)
index 0000000..d0e624d
--- /dev/null
@@ -0,0 +1,301 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "MaximumTestImpl.hpp"
+
+#include "ElementwiseTestImpl.hpp"
+
+template<>
+std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
+    const armnn::IWorkloadFactory& workloadFactory,
+    const armnn::WorkloadInfo& info,
+    const armnn::MaximumQueueDescriptor& descriptor)
+{
+    return workloadFactory.CreateMaximum(descriptor, info);
+}
+
+LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
+                                           const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int width        = 2u;
+    const unsigned int height       = 2u;
+    const unsigned int channelCount = 2u;
+    const unsigned int batchSize    = 2u;
+
+    unsigned int shape[] = { batchSize, channelCount, height, width };
+
+    std::vector<float> input0 =
+    {
+        1.f, 1.f, 1.f, 1.f,  5.f, 5.f, 5.f, 5.f,
+        3.f, 3.f, 3.f, 3.f,  4.f, 4.f, 4.f, 4.f
+    };
+
+    std::vector<float> input1 =
+    {
+        2.f, 2.f, 2.f, 2.f,  3.f, 3.f, 3.f, 3.f,
+        4.f, 4.f, 4.f, 4.f,  5.f, 5.f, 5.f, 5.f
+    };
+
+    std::vector<float> output =
+    {
+        2.f, 2.f, 2.f, 2.f,  5.f, 5.f, 5.f, 5.f,
+        4.f, 4.f, 4.f, 4.f,  5.f, 5.f, 5.f, 5.f
+    };
+
+    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input0,
+        shape,
+        input1,
+        shape,
+        output);
+}
+
+LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape0[] = { 1, 2, 2, 2 };
+    unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<float> input0 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f };
+
+    std::vector<float> input1 = { 2.f };
+
+    std::vector<float> output = { 2.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f };
+
+    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+    std::vector<float> input0 =
+    {
+        1.f, 2.f, 3.f,  4.f,  5.f,  6.f,
+        7.f, 8.f, 9.f, 10.f, 11.f, 12.f
+    };
+
+    std::vector<float> input1 = { 1.f, 2.f, 3.f };
+
+    std::vector<float> output =
+    {
+        1.f, 2.f, 3.f,  4.f,  5.f,  6.f,
+        7.f, 8.f, 9.f, 10.f, 11.f, 12.f
+    };
+
+    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> MaximumUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape[] = { 2, 2, 2, 2 };
+
+    // See dequantized values to the right.
+    std::vector<uint8_t> input0 =
+    {
+        1, 1, 1, 1, 6, 6, 6, 6,
+        3, 3, 3, 3, 4, 4, 4, 4
+    };
+
+    std::vector<uint8_t> input1 =
+    {
+        2, 2, 2, 2, 3, 3, 3, 3,
+        4, 4, 4, 4, 5, 5, 5, 5
+    };
+
+    std::vector<uint8_t> output =
+    {
+        2, 2, 2, 2, 6, 6, 6, 6,
+        4, 4, 4, 4, 5, 5, 5, 5
+    };
+
+    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input0,
+        shape,
+        input1,
+        shape,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<uint8_t> input0 =
+    {
+        1, 2, 3,  4,  5,  6,
+        7, 8, 9, 10, 11, 12
+    };
+
+    std::vector<uint8_t> input1 = { 2 };
+
+    std::vector<uint8_t> output =
+    {
+        2, 2, 3,  4,  5,  6,
+        7, 8, 9, 10, 11, 12
+    };
+
+    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+    std::vector<uint8_t> input0 =
+    {
+        1, 2, 3,  4,  5,  6,
+        7, 8, 9, 10, 11, 12
+    };
+
+    std::vector<uint8_t> input1 = { 1, 10, 3 };
+
+    std::vector<uint8_t> output = {
+        1, 10, 3,  4, 10,  6,
+        7, 10, 9, 10, 11, 12
+    };
+
+    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<int16_t, 4> MaximumInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape[] = { 2, 2, 2, 2 };
+
+    std::vector<int16_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
+                                  3, 3, 3, 3, 4, 4, 4, 4 });
+
+    std::vector<int16_t> input1({ 2, 2, 2, 2, 3, 3, 3, 3,
+                                  4, 4, 4, 4, 5, 5, 5, 5 });
+
+    std::vector<int16_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
+                                  4, 4, 4, 4, 5, 5, 5, 5 });
+
+    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input0,
+        shape,
+        input1,
+        shape,
+        output);
+}
+
+LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<int16_t> input0 =
+    {
+        1, 2, 3,  4,  5,  6,
+        7, 8, 9, 10, 11, 12
+    };
+
+    std::vector<int16_t> input1 = { 2 };
+
+    std::vector<int16_t> output =
+    {
+        2, 2, 3,  4,  5,  6,
+        7, 8, 9, 10, 11, 12
+    };
+
+    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+    std::vector<int16_t> input0 =
+    {
+        1, 2, 3,  4,  5,  6,
+        7, 8, 9, 10, 11, 12
+    };
+
+    std::vector<int16_t> input1 = { 1, 10, 3 };
+
+    std::vector<int16_t> output =
+    {
+        1, 10, 3,  4, 10,  6,
+        7, 10, 9, 10, 11, 12
+    };
+
+    return ElementwiseTestHelper<4, armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.hpp
new file mode 100644 (file)
index 0000000..b672431
--- /dev/null
@@ -0,0 +1,46 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
+                                           const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> MaximumUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> MaximumInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> MaximumBroadcast1ElementInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> MaximumBroadcast1DVectorInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
new file mode 100644 (file)
index 0000000..eabad8f
--- /dev/null
@@ -0,0 +1,196 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "MinimumTestImpl.hpp"
+
+#include "ElementwiseTestImpl.hpp"
+
+template<>
+std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
+    const armnn::IWorkloadFactory& workloadFactory,
+    const armnn::WorkloadInfo& info,
+    const armnn::MinimumQueueDescriptor& descriptor)
+{
+    return workloadFactory.CreateMinimum(descriptor, info);
+}
+
+LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape0[] = { 1, 2, 2, 2 };
+    unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<float> input0 = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f };
+
+    std::vector<float> input1 = { 2.f };
+
+    std::vector<float> output = { 1.f, 2.f, 2.f, 2.f, 2.f, 2.f, 2.f, 2.f };
+
+    return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape0[] = { 1, 2, 2, 2 };
+    unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<float> input0 = { 1.f, 6.f, 3.f, 2.f, 8.f, 9.f, 1.f, 10.f };
+
+    std::vector<float> input1 = { 5.f };
+
+    std::vector<float> output = { 1.f, 5.f, 3.f, 2.f, 5.f, 5.f, 1.f, 5.f };
+
+    return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
+    armnn::IWorkloadFactory & workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+    std::vector<uint8_t> input0 =
+    {
+        1, 2, 3, 3, 2, 1,
+        7, 1, 2, 3, 4, 5
+    };
+
+    std::vector<uint8_t> input1 = { 1, 2, 3 };
+
+    std::vector<uint8_t> output =
+    {
+        1, 2, 3, 1, 2, 1,
+        1, 1, 2, 1, 2, 3
+    };
+
+    return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<int16_t, 4> MinimumInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape[] = { 2, 2, 2, 2 };
+
+    std::vector<int16_t> input0 =
+    {
+        1, 1, 1, 1, 6, 6, 6, 6,
+        3, 3, 3, 3, 4, 4, 4, 4
+    };
+
+    std::vector<int16_t> input1 =
+    {
+        2, 2, 2, 2, 3, 3, 3, 3,
+        4, 4, 4, 4, 5, 5, 5, 5
+    };
+
+    std::vector<int16_t> output
+    {
+        1, 1, 1, 1, 3, 3, 3, 3,
+        3, 3, 3, 3, 4, 4, 4, 4
+    };
+
+    return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input0,
+        shape,
+        input1,
+        shape,
+        output);
+}
+
+LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<int16_t> input0 =
+    {
+        1, 2, 3,  4,  5,  6,
+        7, 8, 9, 10, 11, 12
+    };
+
+    std::vector<int16_t> input1 = { 2 };
+
+    std::vector<int16_t> output =
+    {
+        1, 2, 2, 2, 2, 2,
+        2, 2, 2, 2, 2, 2
+    };
+
+    return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+    std::vector<int16_t> input0 =
+    {
+        1, 2, 3,  4,  5,  6,
+        7, 8, 9, 10, 11, 12
+    };
+
+    std::vector<int16_t> input1 = { 1, 10, 3 };
+
+    std::vector<int16_t> output =
+    {
+        1, 2, 3, 1,  5, 3,
+        1, 8, 3, 1, 10, 3
+    };
+
+    return ElementwiseTestHelper<4, armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.hpp
new file mode 100644 (file)
index 0000000..bb84bc0
--- /dev/null
@@ -0,0 +1,35 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
+    armnn::IWorkloadFactory & workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager);
+
+LayerTestResult<int16_t , 4> MinimumInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> MinimumBroadcast1ElementInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> MinimumBroadcast1DVectorInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
new file mode 100644 (file)
index 0000000..99b1b18
--- /dev/null
@@ -0,0 +1,467 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "MultiplicationTestImpl.hpp"
+
+#include "ElementwiseTestImpl.hpp"
+
+template<>
+std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MultiplicationQueueDescriptor>(
+    const armnn::IWorkloadFactory& workloadFactory,
+    const armnn::WorkloadInfo& info,
+    const armnn::MultiplicationQueueDescriptor& descriptor)
+{
+    return workloadFactory.CreateMultiplication(descriptor, info);
+}
+
+LayerTestResult<float, 4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int width        = 2u;
+    const unsigned int height       = 2u;
+    const unsigned int channelCount = 2u;
+    const unsigned int batchSize    = 2u;
+
+    unsigned int shape[] = { batchSize, channelCount, height, width };
+
+    std::vector<float> input0 =
+    {
+        1, 1, 1, 1,  2, 2, 2, 2,
+        3, 3, 3, 3,  4, 4, 4, 4
+    };
+
+    std::vector<float> input1 =
+    {
+        2, 2, 2, 2,  3, 3, 3, 3,
+        4, 4, 4, 4,  5, 5, 5, 5
+    };
+
+    std::vector<float> output =
+    {
+         2,  2,  2,  2,   6,  6,  6,  6,
+        12, 12, 12, 12,  20, 20, 20, 20
+    };
+
+    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input0,
+        shape,
+        input1,
+        shape,
+        output);
+}
+
+LayerTestResult<float, 5> Multiplication5dTest(armnn::IWorkloadFactory& workloadFactory,
+                                               const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int width        = 3u;
+    const unsigned int height       = 2u;
+    const unsigned int channelCount = 2u;
+    const unsigned int batchSize    = 2u;;
+    const unsigned int depth        = 2u;
+
+    unsigned int shape[] = { depth, batchSize, channelCount, height, width };
+
+    std::vector<float> input0 =
+    {
+        1.80f, 0.20f, 2.30f,  1.30f, 2.10f, 1.00f,
+        2.60f, 0.60f, 2.10f,  2.30f, 2.30f, 2.00f,
+
+        2.50f, 1.00f, 2.90f,  3.10f, 1.50f, 2.40f,
+        2.80f, 1.10f, 1.00f,  3.20f, 1.00f, 2.30f,
+
+
+        0.30f, 2.20f, 1.00f,  0.20f, 1.60f, 1.40f,
+        0.80f, 3.20f, 0.10f,  0.10f, 3.10f, 2.10f,
+
+        1.50f, 2.40f, 1.40f,  0.70f, 2.40f, 1.40f,
+        1.60f, 1.20f, 1.90f,  0.80f, 0.00f, 0.10f,
+    };
+
+    std::vector<float> input1 =
+    {
+        0.70f, 1.00f, 2.90f,  2.20f, 3.10f, 2.80f,
+        1.80f, 2.00f, 0.50f,  2.30f, 1.20f, 2.70f,
+
+        2.40f, 0.20f, 3.20f,  1.60f, 0.20f, 2.50f,
+        2.30f, 0.70f, 2.70f,  1.80f, 2.90f, 2.70f,
+
+
+        3.20f, 3.20f, 0.70f,  1.90f, 2.70f, 2.50f,
+        2.40f, 0.90f, 2.30f,  1.80f, 2.50f, 2.00f,
+
+        1.60f, 2.20f, 1.60f,  2.00f, 0.30f, 3.20f,
+        0.40f, 3.00f, 2.60f,  0.30f, 0.00f, 2.50f,
+    };
+
+    std::vector<float> output =
+    {
+        1.26f, 0.20f, 6.67f,  2.86f, 6.51f, 2.80f,
+        4.68f, 1.20f, 1.05f,  5.29f, 2.76f, 5.40f,
+
+        6.00f, 0.20f, 9.28f,  4.96f, 0.30f, 6.00f,
+        6.44f, 0.77f, 2.70f,  5.76f, 2.90f, 6.21f,
+
+
+        0.96f, 7.04f, 0.70f,  0.38f, 4.32f, 3.50f,
+        1.92f, 2.88f, 0.23f,  0.18f, 7.75f, 4.20f,
+
+        2.40f, 5.28f, 2.24f,  1.40f, 0.72f, 4.48f,
+        0.64f, 3.60f, 4.94f,  0.24f, 0.00f, 0.25f,
+    };
+
+    return ElementwiseTestHelper<5, armnn::MultiplicationQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input0,
+        shape,
+        input1,
+        shape,
+        output);
+}
+
+LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape0[] = { 1, 2, 2, 2 };
+    unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<float> input0 = { 1, 2, 3, 4, 5, 6, 7, 8};
+
+    std::vector<float> input1 = { 2 };
+
+    std::vector<float> output = { 2, 4, 6, 8, 10, 12, 14, 16};
+
+    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape0[] = { 1, 3, 3, 2 };
+    unsigned int shape1[] = { 1, 1, 1, 2 };
+
+    std::vector<float> input0 =
+    {
+        1,   2,    3,  4,    5,  6,
+        7,   8,    9, 10,   11, 12,
+        13, 14,   15, 16,   17, 18
+    };
+
+    std::vector<float> input1 = { 1, 2 };
+
+    std::vector<float> output =
+    {
+         1,  4,    3,  8,    5, 12,
+         7, 16,    9, 20,   11, 24,
+        13, 28,   15, 32,   17, 36
+    };
+
+    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    constexpr unsigned int batchSize = 1u;
+    constexpr unsigned int channels  = 2u;
+    constexpr unsigned int height    = 2u;
+    constexpr unsigned int width     = 3u;
+
+    const unsigned int shape[] = { batchSize, channels, height, width };
+
+    // See dequantized values to the right
+    std::vector<uint8_t> input0 =
+    {
+         62,  37,   3, 172,  13, 111, // 244, 144,   8, 684,  48, 440,
+        188,  20,  73,  31,  23,  31  // 748,  76, 288, 120,  88, 120
+    };
+
+    // See dequantized values to the right
+    std::vector<uint8_t> input1 =
+    {
+        126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
+         48, 115, 151,  79,  78,  97  // 150, 351, 459, 243, 240, 297
+    };
+
+    // See dequantized values to the right
+    std::vector<uint8_t> output =
+    {
+         64,  72,   0, 255,   8, 236, //  93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
+         77,  15,  92,  16,  10,  21, // 112200,  26676,        132192,           29160, 21120,  35640
+    };
+
+    // Scale/offset chosen to have output values out of range
+    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input0,
+        4.0f,
+        1,
+        shape,
+        input1,
+        3.0f,
+        -2,
+        shape,
+        output,
+        1366.255f,
+        -5);
+}
+
+LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<uint8_t> input0 =
+    {
+        1, 2, 3,    4,  5,  6,
+        7, 8, 9,   10, 11, 12
+    };
+
+    std::vector<uint8_t> input1 = { 2 };
+
+    std::vector<uint8_t> output =
+    {
+        2,  4,   6,    8, 10, 12,
+        14, 16, 18,   20, 22, 24
+    };
+
+    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+    std::vector<uint8_t> input0 =
+    {
+        1, 2, 3,    4,  5,  6,
+        7, 8, 9,   10, 11, 12
+    };
+
+    std::vector<uint8_t> input1 = { 1, 2, 3 };
+
+    std::vector<uint8_t> output =
+    {
+        1,  4,   9,     4, 10, 18,
+        7, 16,  27,    10, 22, 36
+    };
+
+    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<int16_t, 4> MultiplicationInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape[] = { 1, 2, 2, 3 };
+
+    std::vector<int16_t> input0 =
+    {
+        6,   7,  8,  9, 10, 11,
+        12, 13, 14, 15, 16, 17
+    };
+
+    std::vector<int16_t> input1 =
+    {
+        1, 2, 3,  4,  5,  6,
+        7, 8, 9, 10, 11, 12
+    };
+
+    std::vector<int16_t> output =
+    {
+        6,   14,  24,  36,  50,  66,
+        84, 104, 126, 150, 176, 204
+    };
+
+    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input0,
+        shape,
+        input1,
+        shape,
+        output);
+}
+
+LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<int16_t> input0 =
+    {
+        1, 2, 3,  4,  5,  6,
+        7, 8, 9, 10, 11, 12
+    };
+
+    std::vector<int16_t> input1 = { 2 };
+
+    std::vector<int16_t> output =
+    {
+        2,   4,  6,  8, 10, 12,
+        14, 16, 18, 20, 22, 24
+    };
+
+    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+    std::vector<int16_t> input0 =
+    {
+        1, 2, 3,  4,  5,  6,
+        7, 8, 9, 10, 11, 12
+    };
+
+    std::vector<int16_t> input1 = { 1, 2, 3 };
+
+    std::vector<int16_t> output =
+    {
+        1,  4,  9,  4, 10, 18,
+        7, 16, 27, 10, 22, 36
+    };
+
+    return ElementwiseTestHelper<4, armnn::MultiplicationQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<float,4> CompareMultiplicationTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    armnn::IWorkloadFactory& refWorkloadFactory)
+{
+    const unsigned int width = 16;
+    const unsigned int height = 32;
+    const unsigned int channelCount = 2;
+    const unsigned int batchSize = 5;
+
+    armnn::TensorInfo inputTensorInfo0;
+    armnn::TensorInfo inputTensorInfo1;
+    armnn::TensorInfo outputTensorInfo;
+
+    constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
+
+    inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+
+    LayerTestResult<float,4> comparisonResult(outputTensorInfo);
+
+    auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
+    auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::MultiplicationQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
+    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    armnn::MultiplicationQueueDescriptor refData = data;
+    armnn::WorkloadInfo refInfo = info;
+    SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
+    SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
+    SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
+
+    std::unique_ptr<armnn::IWorkload> workload    = workloadFactory.CreateMultiplication(data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
+
+    inputHandle0->Allocate();
+    inputHandle1->Allocate();
+    outputHandle->Allocate();
+    inputHandle0Ref->Allocate();
+    inputHandle1Ref->Allocate();
+    outputHandleRef->Allocate();
+
+    CopyDataToITensorHandle(inputHandle0.get(), input0.origin());
+    CopyDataToITensorHandle(inputHandle1.get(), input1.origin());
+    CopyDataToITensorHandle(inputHandle0Ref.get(), input0.origin());
+    CopyDataToITensorHandle(inputHandle1Ref.get(), input1.origin());
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+    workloadRef->PostAllocationConfigure();
+    workloadRef->Execute();
+    CopyDataFromITensorHandle(comparisonResult.output.origin(), outputHandle.get());
+    CopyDataFromITensorHandle(comparisonResult.outputExpected.origin(), outputHandleRef.get());
+
+    return comparisonResult;
+}
diff --git a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.hpp
new file mode 100644 (file)
index 0000000..c566751
--- /dev/null
@@ -0,0 +1,56 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+LayerTestResult<float, 4> MultiplicationTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 5> Multiplication5dTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> MultiplicationUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> MultiplicationInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> MultiplicationBroadcast1ElementInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> MultiplicationBroadcast1DVectorInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> CompareMultiplicationTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    armnn::IWorkloadFactory& refWorkloadFactory);
diff --git a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.cpp
new file mode 100644 (file)
index 0000000..61225d4
--- /dev/null
@@ -0,0 +1,250 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SubtractionTestImpl.hpp"
+
+#include "ElementwiseTestImpl.hpp"
+
+template<>
+std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::SubtractionQueueDescriptor>(
+    const armnn::IWorkloadFactory& workloadFactory,
+    const armnn::WorkloadInfo& info,
+    const armnn::SubtractionQueueDescriptor& descriptor)
+{
+    return workloadFactory.CreateSubtraction(descriptor, info);
+}
+
+LayerTestResult<uint8_t, 4> SubtractionUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 1, 2, 2 };
+    const unsigned int shape1[] = { 1, 1, 2, 2 };
+
+    std::vector<uint8_t> input0 = { 10, 12, 14, 16 };
+    std::vector<uint8_t> input1 = {  1,  2,  1,  2 };
+    std::vector<uint8_t> output = {  3,  3,  5,  5 };
+
+    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        0.5f,
+        2,
+        shape1,
+        input1,
+        1.0f,
+        0,
+        shape0,
+        output,
+        1.0f,
+        0);
+}
+
+LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 1, 2, 2 };
+    const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<uint8_t> input0 = { 10, 12, 14, 16 };
+
+    std::vector<uint8_t> input1 = { 2 };
+
+    std::vector<uint8_t> output = { 5, 6, 7, 8 };
+
+    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        0.5f,
+        2,
+        shape1,
+        input1,
+        1.0f,
+        0,
+        shape0,
+        output,
+        1.0f,
+        3);
+}
+
+LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 1, 2, 2 };
+    const unsigned int shape1[] = { 1, 1, 2, 1 };
+
+    std::vector<uint8_t> input0 = { 10, 12, 14, 16 };
+
+    std::vector<uint8_t> input1 = { 2, 1 };
+
+    std::vector<uint8_t> output = { 8, 11, 12, 15 };
+
+    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<float, 4> SubtractionTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 1, 2, 2 };
+    const unsigned int shape1[] = { 1, 1, 2, 2 };
+
+    std::vector<float> input0 = { 1,  2, 3, 4 };
+    std::vector<float> input1 = { 1, -1, 0, 2 };
+    std::vector<float> output = { 0,  3, 3, 2 };
+
+    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 1, 2, 2 };
+    const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<float> input0 = { 1,  2, 3, 4 };
+
+    std::vector<float> input1 = { 10 };
+
+    std::vector<float> output = { -9,  -8, -7, -6 };
+
+    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<float, 4> SubtractionBroadcastTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 1, 2, 2 };
+    const unsigned int shape1[] = { 1, 1, 1, 2 };
+
+    std::vector<float> input0 = { 1,  2, 3, 4 };
+
+    std::vector<float> input1 = { 10, -5 };
+
+    std::vector<float> output = { -9,  7, -7, 9 };
+
+    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::Float32>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
+
+LayerTestResult<int16_t, 4> SubtractionInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape[] = { 1, 1, 2, 2 };
+
+    std::vector<int16_t> input0 = { 10, 12, 14, 16 };
+    std::vector<int16_t> input1 = {  1,  2,  1,  2 };
+    std::vector<int16_t> output = {  3,  3,  5,  5 };
+
+    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+        workloadFactory,
+        memoryManager,
+        shape,
+        input0,
+        0.5f,
+        0,
+        shape,
+        input1,
+        1.0f,
+        0,
+        shape,
+        output,
+        1.0f,
+        0);
+}
+
+LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 1, 2, 2 };
+    const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<int16_t> input0 = { 10, 12, 14, 16 };
+
+    std::vector<int16_t> input1 = { 2 };
+
+    std::vector<int16_t> output = { 3, 4, 5, 6 };
+
+    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        0.5f,
+        0,
+        shape1,
+        input1,
+        1.0f,
+        0,
+        shape0,
+        output,
+        1.0f,
+        0);
+}
+
+LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 1, 2, 2 };
+    const unsigned int shape1[] = { 1, 1, 2, 1 };
+
+    std::vector<int16_t> input0 = { 10, 12, 14, 16 };
+
+    std::vector<int16_t> input1 = { 2, 1 };
+
+    std::vector<int16_t> output = { 8, 11, 12, 15 };
+
+    return ElementwiseTestHelper<4, armnn::SubtractionQueueDescriptor, armnn::DataType::QuantisedSymm16>(
+        workloadFactory,
+        memoryManager,
+        shape0,
+        input0,
+        shape1,
+        input1,
+        shape0,
+        output);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SubtractionTestImpl.hpp
new file mode 100644 (file)
index 0000000..ca1742b
--- /dev/null
@@ -0,0 +1,47 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+LayerTestResult<float, 4> SubtractionTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> SubtractionBroadcastTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> SubtractionUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> SubtractionInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> SubtractionBroadcast1ElementInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 4> SubtractionBroadcastInt16Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);