IVGCVSW-4018 Move QuantizeHelper.hpp to armnnUtils
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Tue, 22 Oct 2019 09:00:28 +0000 (10:00 +0100)
committerMatteo Martincigh <matteo.martincigh@arm.com>
Fri, 25 Oct 2019 15:01:24 +0000 (15:01 +0000)
* Moved QuntizeHelper.hpp to armnnUtils
* Reordered parameters for QuantizedVector and added default
  values for qScale and qOffset to make life easier when
  using the function for non-quantized types such as Float16

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I28c263dfa425f1316feccb4116839a84f5d568e5

44 files changed:
CMakeLists.txt
src/armnn/test/EndToEndTest.cpp
src/armnn/test/QuantizerTest.cpp
src/armnn/test/TensorHelpers.hpp
src/armnnDeserializer/test/DeserializeComparison.cpp
src/armnnTfLiteParser/test/DetectionPostProcess.cpp
src/armnnUtils/QuantizeHelper.hpp [moved from src/backends/backendsCommon/test/QuantizeHelper.hpp with 81% similarity]
src/armnnUtils/test/QuantizeHelperTest.cpp [new file with mode: 0644]
src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp
src/backends/backendsCommon/test/CMakeLists.txt
src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp
src/backends/backendsCommon/test/EndToEndTestImpl.hpp
src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp
src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp
src/backends/cl/test/Fp16SupportTest.cpp
src/backends/cl/test/OpenClTimerTest.cpp
tests/DeepSpeechV1Database.hpp
tests/MobileNetSsdDatabase.hpp

index 6edc57f..38605ca 100644 (file)
@@ -63,6 +63,7 @@ list(APPEND armnnUtils_sources
     src/armnnUtils/ParserPrototxtFixture.hpp
     src/armnnUtils/PrototxtConversions.hpp
     src/armnnUtils/PrototxtConversions.cpp
+    src/armnnUtils/QuantizeHelper.hpp
     src/armnnUtils/TensorIOUtils.hpp
     src/armnnUtils/TensorUtils.hpp
     src/armnnUtils/TensorUtils.cpp
@@ -612,6 +613,7 @@ if(BUILD_UNIT_TESTS)
         src/armnn/test/UnitTests.cpp
         src/armnn/test/UnitTests.hpp
         src/armnn/test/UtilsTests.cpp
+        src/armnnUtils/test/QuantizeHelperTest.cpp
         src/armnnUtils/test/PrototxtConversionsTest.cpp
         src/armnnUtils/test/ParserHelperTest.cpp
         src/armnnUtils/test/TensorUtilsTest.cpp
index d25e197..df84be4 100644 (file)
@@ -7,8 +7,6 @@
 #include <armnn/IRuntime.hpp>
 #include <armnn/INetwork.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
-
 #include <boost/core/ignore_unused.hpp>
 #include <boost/test/unit_test.hpp>
 
 
 BOOST_AUTO_TEST_SUITE(EndToEnd)
 
-namespace
-{
-
-template<typename T>
-bool IsFloatIterFunc(T iter)
-{
-    boost::ignore_unused(iter);
-    return IsFloatingPointIterator<T>::value;
-}
-
-} //namespace
-
-BOOST_AUTO_TEST_CASE(QuantizedHelper)
-{
-    std::vector<float> fArray;
-    BOOST_TEST(IsFloatIterFunc(fArray.begin()) == true);
-    BOOST_TEST(IsFloatIterFunc(fArray.cbegin()) == true);
-
-    std::vector<double> dArray;
-    BOOST_TEST(IsFloatIterFunc(dArray.begin()) == true);
-
-    std::vector<int> iArray;
-    BOOST_TEST(IsFloatIterFunc(iArray.begin()) == false);
-
-    float floats[5];
-    BOOST_TEST(IsFloatIterFunc(&floats[0]) == true);
-
-    int ints[5];
-    BOOST_TEST(IsFloatIterFunc(&ints[0]) == false);
-}
-
 BOOST_AUTO_TEST_CASE(ErrorOnLoadNetwork)
 {
     using namespace armnn;
index 90fd5e9..3f57ce8 100644 (file)
@@ -4,17 +4,19 @@
 //
 
 #include <armnn/INetwork.hpp>
+#include <armnn/LayerVisitorBase.hpp>
 #include <armnn/Tensor.hpp>
-#include <armnnQuantizer/INetworkQuantizer.hpp>
 #include <armnn/Types.hpp>
 
-#include "armnn/LayerVisitorBase.hpp"
+#include <armnnQuantizer/INetworkQuantizer.hpp>
+
+#include <QuantizeHelper.hpp>
+
 #include "../Graph.hpp"
 #include "../Network.hpp"
 #include "../NetworkQuantizerUtils.hpp"
 #include "../OverrideInputRangeVisitor.hpp"
 #include "../RangeTracker.hpp"
-#include "../backends/backendsCommon/test/QuantizeHelper.hpp"
 #include "../../armnnQuantizer/CommandLineProcessor.hpp"
 
 #include <boost/test/unit_test.hpp>
@@ -2294,9 +2296,9 @@ std::vector<uint8_t> SetupQuantize(float value)
     std::vector<float> input({ value, 0.0f, 0.0f, 1.0f });
     const std::vector<float> &inputRef = input;
 
-    auto output = QuantizedVector<uint8_t>(inputInfo.GetQuantizationScale(),
-                                           inputInfo.GetQuantizationOffset(),
-                                           inputRef);
+    auto output = armnnUtils::QuantizedVector<uint8_t>(inputRef,
+                                                       inputInfo.GetQuantizationScale(),
+                                                       inputInfo.GetQuantizationOffset());
 
     return output;
 }
index 35e471e..3f85893 100644 (file)
@@ -4,23 +4,21 @@
 //
 #pragma once
 
-#include <armnn/TensorFwd.hpp>
-#include <boost/test/unit_test.hpp>
-#include <boost/multi_array.hpp>
-#include <vector>
-#include <array>
+#include <armnn/Tensor.hpp>
+
+#include <QuantizeHelper.hpp>
 
 #include <boost/assert.hpp>
-#include <boost/test/tools/floating_point_comparison.hpp>
+#include <boost/multi_array.hpp>
+#include <boost/numeric/conversion/cast.hpp>
 #include <boost/random/uniform_real_distribution.hpp>
 #include <boost/random/mersenne_twister.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
-#include <armnn/Tensor.hpp>
-
-#include <backendsCommon/test/QuantizeHelper.hpp>
+#include <boost/test/tools/floating_point_comparison.hpp>
+#include <boost/test/unit_test.hpp>
 
+#include <array>
 #include <cmath>
+#include <vector>
 
 constexpr float g_FloatCloseToZeroTolerance = 1.0e-6f;
 
@@ -235,7 +233,9 @@ boost::multi_array<T, n> MakeRandomTensor(const armnn::TensorInfo& tensorInfo,
     {
         init[i] = dist(gen);
     }
-    float qScale = tensorInfo.GetQuantizationScale();
-    int32_t qOffset = tensorInfo.GetQuantizationOffset();
-    return MakeTensor<T, n>(tensorInfo, QuantizedVector<T>(qScale, qOffset, init));
+
+    const float   qScale  = tensorInfo.GetQuantizationScale();
+    const int32_t qOffset = tensorInfo.GetQuantizationOffset();
+
+    return MakeTensor<T, n>(tensorInfo, armnnUtils::QuantizedVector<T>(init, qScale, qOffset));
 }
index 9a2fabf..6616398 100644 (file)
@@ -6,10 +6,9 @@
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include "../Deserializer.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
-
 #include <boost/test/unit_test.hpp>
 
 #include <string>
@@ -32,8 +31,8 @@ BOOST_FIXTURE_TEST_CASE(operation##dataType, Simple##operation##dataType##Fixtur
     constexpr int32_t qOffset = 0; \
     RunTest<4, armnn::DataType::dataType, armnn::DataType::Boolean>( \
         0, \
-        {{ "InputLayer0", QuantizedVector<T>(qScale, qOffset, s_TestData.m_InputData0)  }, \
-         { "InputLayer1", QuantizedVector<T>(qScale, qOffset, s_TestData.m_InputData1)  }}, \
+        {{ "InputLayer0", armnnUtils::QuantizedVector<T>(s_TestData.m_InputData0, qScale, qOffset)  }, \
+         { "InputLayer1", armnnUtils::QuantizedVector<T>(s_TestData.m_InputData1, qScale, qOffset)  }}, \
         {{ "OutputLayer", s_TestData.m_Output##operation }}); \
 }
 
index 638238d..1ec87f9 100644 (file)
@@ -12,6 +12,8 @@
 #include "ParserPrototxtFixture.hpp"
 #include "ParserHelper.hpp"
 
+#include <QuantizeHelper.hpp>
+
 BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
 
 struct DetectionPostProcessFixture : ParserFlatbuffersFixture
@@ -200,8 +202,9 @@ BOOST_FIXTURE_TEST_CASE( ParseDetectionPostProcess, ParseDetectionPostProcessCus
 
     // Quantize inputs and outputs
     using QuantizedContainer = std::vector<uint8_t>;
-    QuantizedContainer quantBoxEncodings = QuantizedVector<uint8_t>(1.0f, 1, boxEncodings);
-    QuantizedContainer quantScores = QuantizedVector<uint8_t>(0.01f, 0, scores);
+
+    QuantizedContainer quantBoxEncodings = armnnUtils::QuantizedVector<uint8_t>(boxEncodings, 1.00f, 1);
+    QuantizedContainer quantScores       = armnnUtils::QuantizedVector<uint8_t>(scores,       0.01f, 0);
 
     std::map<std::string, QuantizedContainer> input =
     {
similarity index 81%
rename from src/backends/backendsCommon/test/QuantizeHelper.hpp
rename to src/armnnUtils/QuantizeHelper.hpp
index b7ca3b3..a7f68c5 100644 (file)
@@ -17,6 +17,9 @@
 #include <boost/core/ignore_unused.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
+namespace armnnUtils
+{
+
 template<typename T, bool DoQuantize=true>
 struct SelectiveQuantizer
 {
@@ -84,7 +87,7 @@ struct IsFloatingPointIterator
 template <typename T, typename FloatIt,
 typename std::enable_if<IsFloatingPointIterator<FloatIt>::value, int>::type=0 // Makes sure fp iterator is valid.
 >
-std::vector<T> QuantizedVector(float qScale, int32_t qOffset, FloatIt first, FloatIt last)
+std::vector<T> QuantizedVector(FloatIt first, FloatIt last, float qScale, int32_t qOffset)
 {
     std::vector<T> quantized;
     quantized.reserve(boost::numeric_cast<size_t>(std::distance(first, last)));
@@ -92,7 +95,7 @@ std::vector<T> QuantizedVector(float qScale, int32_t qOffset, FloatIt first, Flo
     for (auto it = first; it != last; ++it)
     {
         auto f = *it;
-        T q =SelectiveQuantize<T>(f, qScale, qOffset);
+        T q = SelectiveQuantize<T>(f, qScale, qOffset);
         quantized.push_back(q);
     }
 
@@ -100,13 +103,15 @@ std::vector<T> QuantizedVector(float qScale, int32_t qOffset, FloatIt first, Flo
 }
 
 template<typename T>
-std::vector<T> QuantizedVector(float qScale, int32_t qOffset, const std::vector<float>& array)
+std::vector<T> QuantizedVector(const std::vector<float>& array, float qScale = 1.f, int32_t qOffset = 0)
 {
-    return QuantizedVector<T>(qScale, qOffset, array.begin(), array.end());
+    return QuantizedVector<T>(array.begin(), array.end(), qScale, qOffset);
 }
 
 template<typename T>
-std::vector<T> QuantizedVector(float qScale, int32_t qOffset, std::initializer_list<float> array)
+std::vector<T> QuantizedVector(std::initializer_list<float> array, float qScale = 1.f, int32_t qOffset = 0)
 {
-    return QuantizedVector<T>(qScale, qOffset, array.begin(), array.end());
+    return QuantizedVector<T>(array.begin(), array.end(), qScale, qOffset);
 }
+
+} // namespace armnnUtils
diff --git a/src/armnnUtils/test/QuantizeHelperTest.cpp b/src/armnnUtils/test/QuantizeHelperTest.cpp
new file mode 100644 (file)
index 0000000..7e781d0
--- /dev/null
@@ -0,0 +1,46 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <QuantizeHelper.hpp>
+
+#include <boost/core/ignore_unused.hpp>
+#include <boost/test/unit_test.hpp>
+
+#include <vector>
+
+BOOST_AUTO_TEST_SUITE(QuantizeHelper)
+
+namespace
+{
+
+template<typename T>
+bool IsFloatIterFunc(T iter)
+{
+    boost::ignore_unused(iter);
+    return armnnUtils::IsFloatingPointIterator<T>::value;
+}
+
+} // anonymous namespace
+
+BOOST_AUTO_TEST_CASE(IsFloatIterFuncTest)
+{
+    std::vector<float> fArray;
+    BOOST_TEST(IsFloatIterFunc(fArray.begin()) == true);
+    BOOST_TEST(IsFloatIterFunc(fArray.cbegin()) == true);
+
+    std::vector<double> dArray;
+    BOOST_TEST(IsFloatIterFunc(dArray.begin()) == true);
+
+    std::vector<int> iArray;
+    BOOST_TEST(IsFloatIterFunc(iArray.begin()) == false);
+
+    float floats[5];
+    BOOST_TEST(IsFloatIterFunc(&floats[0]) == true);
+
+    int ints[5];
+    BOOST_TEST(IsFloatIterFunc(&ints[0]) == false);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
index c46376b..dd851e3 100644 (file)
@@ -7,6 +7,7 @@
 
 #include "CommonTestUtils.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -53,8 +54,8 @@ void AbsEndToEnd(const std::vector<armnn::BackendId>& backends)
     };
 
     // quantize data
-    std::vector<T> qInputData          = QuantizedVector<T>(qScale, qOffset, inputData);
-    std::vector<T> qExpectedOutputData = QuantizedVector<T>(qScale, qOffset, expectedOutputData);
+    std::vector<T> qInputData          = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+    std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
 
     INetworkPtr network = CreateAbsNetwork(tensorInfo);
 
index 8a96318..f310ef7 100644 (file)
@@ -36,7 +36,6 @@ list(APPEND armnnBackendsCommonUnitTests_sources
     OptimizeSubgraphViewTests.cpp
     OptimizationViewsTests.cpp
     PreluEndToEndTestImpl.hpp
-    QuantizeHelper.hpp
     QuantizedLstmEndToEndTestImpl.cpp
     QuantizedLstmEndToEndTestImpl.hpp
     ResizeEndToEndTestImpl.hpp
index cf4db1d..fd0b12f 100644 (file)
@@ -9,8 +9,9 @@
 
 #include <armnn/ArmNN.hpp>
 
+#include <QuantizeHelper.hpp>
+
 #include <backendsCommon/test/DataLayoutUtils.hpp>
-#include <backendsCommon/test/QuantizeHelper.hpp>
 
 namespace
 {
@@ -58,8 +59,8 @@ void DepthToSpaceEndToEndImpl(const std::vector<armnn::BackendId>& backends,
         outputInfo.SetQuantizationOffset(qOffset);
     }
 
-    std::vector<T> inputData          = QuantizedVector<T>(qScale, qOffset, floatInputData);
-    std::vector<T> expectedOutputData = QuantizedVector<T>(qScale, qOffset, floatExpectedOutputData);
+    std::vector<T> inputData          = armnnUtils::QuantizedVector<T>(floatInputData, qScale, qOffset);
+    std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(floatExpectedOutputData, qScale, qOffset);
 
     // Permute tensors from NHWC to NCHW (if needed)
     if (descriptor.m_DataLayout == DataLayout::NCHW)
index ee9d2bc..d6f589f 100644 (file)
@@ -4,13 +4,12 @@
 //
 #pragma once
 
-#include <ResolveType.hpp>
-
 #include <armnn/ArmNN.hpp>
 #include <armnn/INetwork.hpp>
-#include <Profiling.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
+#include <Profiling.hpp>
+#include <QuantizeHelper.hpp>
+#include <ResolveType.hpp>
 
 #include <boost/test/unit_test.hpp>
 
@@ -99,9 +98,9 @@ inline bool ConstantUsageUint8Test(const std::vector<BackendId>& backends)
 
     return ConstantUsageTest(backends,
         commonTensorInfo,
-        QuantizedVector<uint8_t>(scale, offset, { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }), // Input.
-        QuantizedVector<uint8_t>(scale, offset, { 6.f, 5.f, 4.f, 3.f, 2.f, 1.f }), // Const input.
-        QuantizedVector<uint8_t>(scale, offset, { 7.f, 7.f, 7.f, 7.f, 7.f, 7.f })  // Expected output.
+        armnnUtils::QuantizedVector<uint8_t>({ 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }, scale, offset), // Input.
+        armnnUtils::QuantizedVector<uint8_t>({ 6.f, 5.f, 4.f, 3.f, 2.f, 1.f }, scale, offset), // Const input.
+        armnnUtils::QuantizedVector<uint8_t>({ 7.f, 7.f, 7.f, 7.f, 7.f, 7.f }, scale, offset)  // Expected output.
     );
 }
 
index 4bf9d51..1eeb944 100644 (file)
@@ -9,6 +9,7 @@
 #include <armnn/ArmNN.hpp>
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <backendsCommon/test/CommonTestUtils.hpp>
@@ -119,8 +120,8 @@ void ResizeEndToEnd(const std::vector<armnn::BackendId>& backends,
     }
 
     // quantize data
-    std::vector<T> qInputData          = QuantizedVector<T>(qScale, qOffset, inputData);
-    std::vector<T> qExpectedOutputData = QuantizedVector<T>(qScale, qOffset, expectedOutputData);
+    std::vector<T> qInputData          = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+    std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
 
     INetworkPtr network = CreateResizeNetwork(descriptor, inputInfo, outputInfo);
 
@@ -144,4 +145,4 @@ void ResizeNearestNeighborEndToEnd(const std::vector<armnn::BackendId>& backends
                                    armnn::DataLayout dataLayout)
 {
     ResizeEndToEnd<ArmnnType>(backends, dataLayout, armnn::ResizeMethod::NearestNeighbor);
-}
\ No newline at end of file
+}
index 9d6312e..4935a18 100644 (file)
@@ -9,6 +9,7 @@
 #include <armnn/ArmNN.hpp>
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <backendsCommon/test/CommonTestUtils.hpp>
@@ -129,12 +130,12 @@ void TransposeConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backend
     }
 
     // quantize data
-    std::vector<T> qInputData          = QuantizedVector<T>(qScale, qOffset, inputData);
-    std::vector<T> qWeightsData        = QuantizedVector<T>(qScale, qOffset, weightsData);
-    std::vector<T> qExpectedOutputData = QuantizedVector<T>(qScale, qOffset, expectedOutputData);
+    std::vector<T> qInputData          = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+    std::vector<T> qWeightsData        = armnnUtils::QuantizedVector<T>(weightsData, qScale, qOffset);
+    std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
 
     using BT = ResolveType<ArmnnBType>;
-    std::vector<BT> qBiasesData  = QuantizedVector<BT>(qScale * qScale, 0, biasesData);
+    std::vector<BT> qBiasesData = armnnUtils::QuantizedVector<BT>(biasesData, qScale * qScale, 0);
 
     ConstTensor weights(weightsInfo, qWeightsData);
     ConstTensor biases(biasesInfo, qBiasesData);
@@ -150,4 +151,4 @@ void TransposeConvolution2dEndToEnd(const std::vector<armnn::BackendId>& backend
                                                 { { 0, qInputData } },
                                                 { { 0, qExpectedOutputData } },
                                                 backends);
-}
\ No newline at end of file
+}
index 075c29d..a45c6d5 100644 (file)
@@ -5,12 +5,12 @@
 
 #include "ActivationTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
 
 #include <backendsCommon/test/ActivationFixture.hpp>
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -424,7 +424,7 @@ LayerTestResult<T, 4> SimpleActivationTest(
 
     LayerTestResult<T, 4> result(inputTensorInfo);
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(scale, offset, inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, scale, offset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -451,8 +451,8 @@ LayerTestResult<T, 4> SimpleActivationTest(
     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
 
     // Calculated manually.
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(outScale, outOffset,
-                                                                                  outputExpectedData));
+    result.outputExpected =
+        MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset));
 
     return result;
 }
@@ -812,7 +812,7 @@ LayerTestResult<float, 5> SqrtNNTest(
 
     LayerTestResult<float, 5> result(inputTensorInfo);
 
-    auto input = MakeTensor<float, 5>(inputTensorInfo, QuantizedVector<float>(0.f, 0.f, inputData));
+    auto input = MakeTensor<float, 5>(inputTensorInfo, inputData);
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -836,8 +836,7 @@ LayerTestResult<float, 5> SqrtNNTest(
     CopyDataFromITensorHandle(&result.output[0][0][0][0][0], outputHandle.get());
 
     // Calculated manually.
-    result.outputExpected = MakeTensor<float, 5>(outputTensorInfo, QuantizedVector<float>(0.f, 0.f,
-                                                                                  outputExpectedData));
+    result.outputExpected = MakeTensor<float, 5>(outputTensorInfo, outputExpectedData);
 
     return result;
 };
index c6d3982..247821b 100644 (file)
@@ -7,6 +7,8 @@
 
 #include "ElementwiseTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
+
 template<>
 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::AdditionQueueDescriptor>(
     const armnn::IWorkloadFactory& workloadFactory,
@@ -177,7 +179,7 @@ LayerTestResult<T, 4> AdditionBroadcastTestImpl(
         outputTensorInfo.SetQuantizationOffset(qOffset);
     }
 
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
     {
         0.0f,
         1.0f,
@@ -187,16 +189,18 @@ LayerTestResult<T, 4> AdditionBroadcastTestImpl(
 
         4.0f,
         5.0f,
-    }));
+    },
+    qScale, qOffset));
 
-    auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
+    auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
     {
         0.5f, 1.5f, 2.5f,
         3.5f, 4.5f, 5.5f,
-    }));
+    },
+    qScale, qOffset));
 
     LayerTestResult<T,4> ret(outputTensorInfo);
-    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
     {
         0.5f, 1.5f, 2.5f,
         4.5f, 5.5f, 6.5f,
@@ -206,7 +210,8 @@ LayerTestResult<T, 4> AdditionBroadcastTestImpl(
 
         4.5f, 5.5f, 6.5f,
         8.5f, 9.5f, 10.5f,
-    }));
+    },
+    qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
     std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
@@ -256,31 +261,34 @@ LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
         outputTensorInfo.SetQuantizationOffset(qOffset);
     }
 
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
     {
-            0.0f,  1.0f,  2.0f,
-            3.0f,  4.0f,  5.0f,
-            6.0f,  7.0f,  8.0f,
-            9.0f, 10.0f, 11.0f,
+         0.0f,  1.0f,  2.0f,
+         3.0f,  4.0f,  5.0f,
+         6.0f,  7.0f,  8.0f,
+         9.0f, 10.0f, 11.0f,
         12.0f, 13.0f, 14.0f,
         15.0f, 16.0f, 17.0f,
-    }));
+    },
+    qScale, qOffset));
 
-    auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
+    auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
     {
         0.5f,
-    }));
+    },
+    qScale, qOffset));
 
     LayerTestResult<T,4> ret(outputTensorInfo);
-    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
     {
-            0.5f,  1.5f,  2.5f,
-            3.5f,  4.5f,  5.5f,
-            6.5f,  7.5f,  8.5f,
-            9.5f, 10.5f, 11.5f,
+         0.5f,  1.5f,  2.5f,
+         3.5f,  4.5f,  5.5f,
+         6.5f,  7.5f,  8.5f,
+         9.5f, 10.5f, 11.5f,
         12.5f, 13.5f, 14.5f,
         15.5f, 16.5f, 17.5f,
-    }));
+    },
+    qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
     std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
index ef43088..68cda7c 100644 (file)
@@ -6,6 +6,7 @@
 #include "BatchNormalizationTestImpl.hpp"
 
 #include <DataLayoutIndexed.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -14,7 +15,6 @@
 #include <backendsCommon/IBackendInternal.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -23,6 +23,8 @@
 namespace
 {
 
+using namespace armnnUtils;
+
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<T, 4> BatchNormTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
@@ -53,19 +55,18 @@ LayerTestResult<T, 4> BatchNormTestImpl(
         tensorInfo.SetQuantizationOffset(qOffset);
     }
 
-    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
-                                        QuantizedVector<T>(qScale, qOffset, inputValues));
+    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputValues, qScale, qOffset));
 
     // These values are per-channel of the input.
-    auto mean     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
-    auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4,  9}));
-    auto beta     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3,  2}));
-    auto gamma    = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2,  1}));
+    auto mean     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
+    auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4,  9 }, qScale, qOffset));
+    auto beta     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3,  2 }, qScale, qOffset));
+    auto gamma    = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2,  1 }, qScale, qOffset));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
 
     result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
-                                             QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
+                                             QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -134,17 +135,18 @@ LayerTestResult<T,4> BatchNormTestNhwcImpl(
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset,
+        QuantizedVector<T>(
         {
             1.f, 1.f, 4.f, 1.f,
             4.f, 4.f, 2.f, 1.f,
             1.f, -2.f, 6.f, 4.f
-        }));
+        },
+        qScale, qOffset));
     // These values are per-channel of the input.
-    auto mean     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
-    auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9}));
-    auto beta     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2}));
-    auto gamma    = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1}));
+    auto mean     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
+    auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4,  9 }, qScale, qOffset));
+    auto beta     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3,  2 }, qScale, qOffset));
+    auto gamma    = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2,  1 }, qScale, qOffset));
     LayerTestResult<T,4> ret(outputTensorInfo);
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
@@ -175,12 +177,13 @@ LayerTestResult<T,4> BatchNormTestNhwcImpl(
     // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
     // multiply by gamma and add beta
     ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset,
+        QuantizedVector<T>(
         {
             1.f, 3.f, 4.f, 3.f,
             4.f, 4.f, 2.f, 3.f,
             1.f, 2.f, 6.f, 4.f
-        }));
+        },
+        qScale, qOffset));
 
     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
 
index 9da1d42..1c54b85 100644 (file)
@@ -8,12 +8,12 @@
 #include <armnn/ArmNN.hpp>
 
 #include <Half.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <backendsCommon/Workload.hpp>
 #include <backendsCommon/WorkloadData.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -155,8 +155,8 @@ LayerTestResult<uint8_t, 4> ComparisonTestImpl(armnn::IWorkloadFactory& workload
 {
     using T = armnn::ResolveType<ArmnnInType>;
 
-    std::vector<T> inputData0 = QuantizedVector<T>(quantScale, quantOffset, testData.m_InputData0);
-    std::vector<T> inputData1 = QuantizedVector<T>(quantScale, quantOffset, testData.m_InputData1);
+    std::vector<T> inputData0 = armnnUtils::QuantizedVector<T>(testData.m_InputData0, quantScale, quantOffset);
+    std::vector<T> inputData1 = armnnUtils::QuantizedVector<T>(testData.m_InputData1, quantScale, quantOffset);
 
     return ComparisonTestImpl<4, ArmnnInType>(
         workloadFactory,
index 29476e5..e9932c8 100644 (file)
@@ -6,6 +6,7 @@
 #include "ConcatTestImpl.hpp"
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
 
 #include <test/TensorHelpers.hpp>
 
+using namespace armnn;
+using namespace armnnUtils;
+
 //
 // Helper functions and templates
 //
 
-armnn::OriginsDescriptor CreateDescriptorForConcat(
-    const std::vector<armnn::TensorInfo> & inputTensorInfos,
+OriginsDescriptor CreateDescriptorForConcat(
+    const std::vector<TensorInfo> & inputTensorInfos,
     unsigned int concatDim)
 {
-    std::vector<armnn::TensorShape> shapes;
+    std::vector<TensorShape> shapes;
     shapes.reserve(inputTensorInfos.size());
-    for (const armnn::TensorInfo& it: inputTensorInfos)
+    for (const TensorInfo& it: inputTensorInfos)
     {
         shapes.push_back(it.GetShape());
     }
 
-    return armnn::CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), concatDim);
+    return CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), concatDim);
 }
 
 //
@@ -40,7 +44,7 @@ armnn::OriginsDescriptor CreateDescriptorForConcat(
 //
 
 bool NeedPermuteForConcat(
-    const std::vector<armnn::TensorInfo> & inputTensorInfos,
+    const std::vector<TensorInfo> & inputTensorInfos,
     unsigned int concatDim)
 {
     // See note above. Additionally we expect the input shapes to have the
@@ -65,7 +69,7 @@ bool NeedPermuteForConcat(
     return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
 }
 
-armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
+TensorShape ExpandTensorShapeTo3dForPermute(const TensorShape & inputShape)
 {
     unsigned int numDims = inputShape.GetNumDimensions();
     if (numDims >= 3)
@@ -80,13 +84,13 @@ armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & in
     {
         newDims[expandedBy+i] = inputShape[i];
     }
-    return armnn::TensorShape(3u, &newDims[0]);
+    return TensorShape(3u, &newDims[0]);
 }
 
 void Generate3dPermuteVectorForConcat(
     unsigned int numDimensions,
     unsigned int & concatDim,
-    std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
+    std::pair<PermutationVector, PermutationVector> & permutations)
 {
     BOOST_ASSERT_MSG(numDimensions <= 3,
        "Only dimensions 1,2 and 3 are supported by this helper");
@@ -96,15 +100,15 @@ void Generate3dPermuteVectorForConcat(
     if (expandedConcatAxis == 2)
     {
         concatDim = 0;
-        armnn::PermutationVector forwardPermutation({1, 2, 0});
-        armnn::PermutationVector reversePermutation({2, 0, 1});
+        PermutationVector forwardPermutation({1, 2, 0});
+        PermutationVector reversePermutation({2, 0, 1});
         permutations = std::make_pair(forwardPermutation, reversePermutation);
     }
     else if (expandedConcatAxis == 1)
     {
         concatDim = 0;
-        armnn::PermutationVector forwardPermutation({2, 0, 1});
-        armnn::PermutationVector reversePermutation({1, 2, 0});
+        PermutationVector forwardPermutation({2, 0, 1});
+        PermutationVector reversePermutation({1, 2, 0});
         permutations = std::make_pair(forwardPermutation, reversePermutation);
     }
     else
@@ -115,10 +119,10 @@ void Generate3dPermuteVectorForConcat(
 }
 
 template<typename T> void PermuteTensorData(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::PermutationVector& mappings,
-    armnn::TensorInfo & inputTensorInfo,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const PermutationVector& mappings,
+    TensorInfo & inputTensorInfo,
     const T * inputData,
     std::vector<T>& outputData)
 {
@@ -131,18 +135,18 @@ template<typename T> void PermuteTensorData(
         return;
     }
 
-    armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
+    TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-    armnn::PermuteQueueDescriptor queueDescriptor;
-    queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
-    armnn::WorkloadInfo workloadInfo;
+    PermuteQueueDescriptor queueDescriptor;
+    queueDescriptor.m_Parameters = PermuteDescriptor{mappings};
+    WorkloadInfo workloadInfo;
     AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -164,23 +168,23 @@ template<typename T> void PermuteTensorData(
 // of the permuted concatenated tensor is going to be.
 //
 template<typename T> void PermuteInputsForConcat(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    std::vector<armnn::TensorInfo> & inputTensorInfos,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    std::vector<TensorInfo> & inputTensorInfos,
     std::vector<T *> & inputData,
     std::vector<std::vector<T>> & inputDataStorage,
-    armnn::PermutationVector & permuteVector,
+    PermutationVector & permuteVector,
     unsigned int & concatDim,
-    armnn::TensorInfo & outputTensorInfo)
+    TensorInfo & outputTensorInfo)
 {
     BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
         "Expecting more than one tensor to be concatenated here");
 
     unsigned int numDims = 0;
     unsigned int nthInput = 0;
-    const armnn::PermutationVector identity({0, 1, 2});
+    const PermutationVector identity({0, 1, 2});
 
-    std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
+    std::pair<PermutationVector, PermutationVector> permutations =
         std::make_pair(identity, identity);
 
     inputDataStorage.resize(inputData.size());
@@ -203,7 +207,7 @@ template<typename T> void PermuteInputsForConcat(
                 "All inputs must have the same number of dimensions");
         }
 
-        armnn::TensorInfo newTensorInfo = tensorInfo;
+        TensorInfo newTensorInfo = tensorInfo;
         newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
 
         PermuteTensorData<T>(workloadFactory,
@@ -231,11 +235,11 @@ template<typename T> void PermuteInputsForConcat(
 // output.
 //
 template <typename T> void PermuteOutputForConcat(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::TensorInfo & tensorInfo,
-    const armnn::PermutationVector & permuteVector,
-    std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const TensorInfo & tensorInfo,
+    const PermutationVector & permuteVector,
+    std::unique_ptr<ITensorHandle> && inputDataHandle,
     T * data)
 {
     BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
@@ -247,7 +251,7 @@ template <typename T> void PermuteOutputForConcat(
         return;
     }
 
-    armnn::TensorInfo resultTensorInfo = tensorInfo;
+    TensorInfo resultTensorInfo = tensorInfo;
     std::vector<T> inputData(tensorInfo.GetNumElements());
     std::vector<T> outputData;
 
@@ -264,11 +268,11 @@ template <typename T> void PermuteOutputForConcat(
 }
 
 template<typename T> void Concatenate(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    std::initializer_list<const TensorInfo> inputTensorInfosOrig,
     std::initializer_list<T *> inputsOrig,
-    const armnn::TensorInfo& outputTensorInfoOrig,
+    const TensorInfo& outputTensorInfoOrig,
     T * output,
     unsigned int concatDim,
     bool useSubtensor)
@@ -283,11 +287,11 @@ template<typename T> void Concatenate(
     }
 
     // Saves a copy of the parameters which we might need to change.
-    std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
+    std::vector<TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
     std::vector<T *> inputs            = inputsOrig;
-    armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
+    TensorInfo outputTensorInfo = outputTensorInfoOrig;
 
-    armnn::PermutationVector permuteVector{0, 1, 2};
+    PermutationVector permuteVector{0, 1, 2};
 
     // Holds and automatically releases memory for the reshaped input data.
     std::vector<std::vector<T>> tmpInputDataStorage;
@@ -312,15 +316,15 @@ template<typename T> void Concatenate(
                                   outputTensorInfo);
     }
 
-    armnn::WorkloadInfo workloadInfo;
+    WorkloadInfo workloadInfo;
 
-    std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
+    std::vector<std::unique_ptr<ITensorHandle>> inputHandles;
     inputHandles.reserve(inputCount);
 
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-    armnn::ConcatQueueDescriptor queueDescriptor;
-    armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcat(inputTensorInfos, concatDim);
+    ConcatQueueDescriptor queueDescriptor;
+    OriginsDescriptor viewsDescriptor = CreateDescriptorForConcat(inputTensorInfos, concatDim);
     queueDescriptor.m_Parameters = viewsDescriptor;
 
     if (useSubtensor)
@@ -337,8 +341,8 @@ template<typename T> void Concatenate(
         const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
         for (unsigned int i = 0; i < inputCount; ++i)
         {
-            const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
-            std::unique_ptr<armnn::ITensorHandle> inputHandle =
+            const TensorInfo& inputTensorInfo = inputTensorInfos[i];
+            std::unique_ptr<ITensorHandle> inputHandle =
                 subTensorsSupported ?
                     workloadFactory.CreateSubTensorHandle(*outputHandle,
                                                           inputTensorInfo.GetShape(),
@@ -353,7 +357,7 @@ template<typename T> void Concatenate(
     {
         for (unsigned int i = 0; i < inputCount; ++i)
         {
-            std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
+            std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
             inputHandles.emplace_back(std::move(inputHandle));
         }
     }
@@ -365,7 +369,7 @@ template<typename T> void Concatenate(
 
     AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
 
     for (auto& inputHandle : inputHandles)
     {
@@ -403,20 +407,20 @@ template<typename T> void Concatenate(
 // Implementation templates
 //
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 1> Concat1dTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
+    TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
 
-    auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
-    auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
-    auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
+    auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 1.0f, 2.0f, 3.0f }, qScale, qOffset));
+    auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 4.0f, 5.0f, 6.0f }, qScale, qOffset));
+    auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 7.0f, 8.0f, 9.0f }, qScale, qOffset));
 
-    armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 1> result(outputTensorInfo);
 
@@ -430,48 +434,56 @@ LayerTestResult<T, 1> Concat1dTestImpl(
                    0,
                    true);
 
-    result.output = MakeTensor<T, 1>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
-    }));
+    result.output         = MakeTensor<T, 1>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(
+        {
+            1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 2> Concat2dTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::TensorInfo& outputTensorInfo,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const TensorInfo& outputTensorInfo,
     unsigned int dimension,
     const float qScale,
     const int32_t qOffset)
 {
-    armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
+    TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
 
-    auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f,
+    auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            1.0f, 2.0f, 3.0f,
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f,
-    }));
+            // Batch 1
+            10.0f, 11.0f, 12.0f,
+        },
+        qScale, qOffset));
 
-    auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        4.0f, 5.0f, 6.0f,
+    auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
+         {
+            // Batch 0
+            4.0f, 5.0f, 6.0f,
 
-        // Batch 1
-        13.0f, 14.0f, 15.0f,
-    }));
+            // Batch 1
+            13.0f, 14.0f, 15.0f,
+        },
+        qScale, qOffset));
 
-    auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        7.0f, 8.0f, 9.0f,
+    auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            7.0f, 8.0f, 9.0f,
 
-        // Batch 1
-        16.0f, 17.0f, 18.0f,
-    }));
+            // Batch 1
+            16.0f, 17.0f, 18.0f,
+        },
+        qScale, qOffset));
 
     LayerTestResult<T, 2> result(outputTensorInfo);
 
@@ -489,99 +501,109 @@ LayerTestResult<T, 2> Concat2dTestImpl(
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 2> Concat2dDim0TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
 
-    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f,
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            1.0f, 2.0f, 3.0f,
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f,
+            // Batch 1
+            10.0f, 11.0f, 12.0f,
 
-        // Batch 2
-        4.0f, 5.0f, 6.0f,
+            // Batch 2
+            4.0f, 5.0f, 6.0f,
 
-        // Batch 3
-        13.0f, 14.0f, 15.0f,
+            // Batch 3
+            13.0f, 14.0f, 15.0f,
 
-        // Batch 4
-        7.0f, 8.0f, 9.0f,
+            // Batch 4
+            7.0f, 8.0f, 9.0f,
 
-        // Batch 5
-        16.0f, 17.0f, 18.0f,
-    }));
+            // Batch 5
+            16.0f, 17.0f, 18.0f,
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 2> Concat2dDim1TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
 
-    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
-    }));
+            // Batch 1
+            10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
-    auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f,
+    TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            1.0f, 2.0f, 3.0f,
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f,
-    }));
+            // Batch 1
+            10.0f, 11.0f, 12.0f,
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
-    auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        4.0f, 5.0f, 6.0f,
+    TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
+    auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            4.0f, 5.0f, 6.0f,
 
-        // Batch 1
-        13.0f, 14.0f, 15.0f,
+            // Batch 1
+            13.0f, 14.0f, 15.0f,
 
-        // Batch 0
-        7.0f, 8.0f, 9.0f,
-    }));
+            // Batch 0
+            7.0f, 8.0f, 9.0f,
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
-    auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 1
-        16.0f, 17.0f, 18.0f,
-    }));
+    TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
+    auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 1
+            16.0f, 17.0f, 18.0f,
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
     LayerTestResult<T, 2> result(outputTensorInfo);
 
     std::vector<T> output;
@@ -595,64 +617,72 @@ LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl(
                    true);
 
     result.output = MakeTensor<T, 2>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f,
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            1.0f, 2.0f, 3.0f,
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f,
+            // Batch 1
+            10.0f, 11.0f, 12.0f,
 
-        // Batch 2
-        4.0f, 5.0f, 6.0f,
+            // Batch 2
+            4.0f, 5.0f, 6.0f,
 
-        // Batch 3
-        13.0f, 14.0f, 15.0f,
+            // Batch 3
+            13.0f, 14.0f, 15.0f,
 
-        // Batch 4
-        7.0f, 8.0f, 9.0f,
+            // Batch 4
+            7.0f, 8.0f, 9.0f,
 
-        // Batch 5
-        16.0f, 17.0f, 18.0f,
-    }));
+            // Batch 5
+            16.0f, 17.0f, 18.0f,
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
-    auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f,
+    TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            1.0f, 2.0f, 3.0f,
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f,
-    }));
+            // Batch 1
+            10.0f, 11.0f, 12.0f,
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
-    auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
+    TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
+    auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
 
-        // Batch 1
-        13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
-    }));
+            // Batch 1
+            13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
-    auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        9.0f,
+    TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
+    auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            9.0f,
 
-        // Batch 1
-        18.0f
-    }));
+            // Batch 1
+            18.0f
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
     LayerTestResult<T, 2> result(outputTensorInfo);
 
     std::vector<T> output;
@@ -666,88 +696,96 @@ LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl(
                    true);
 
     result.output = MakeTensor<T, 2>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
-    }));
+            // Batch 1
+            10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 3> Concat3dTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::TensorInfo& outputTensorInfo,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const TensorInfo& outputTensorInfo,
     unsigned int dimension,
     bool useSubtensor,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
 
-    auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
+    auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f,
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
+            // Batch 0, Channel 1
+            3.0f, 4.0f,
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
+            // Batch 0, Channel 2
+            5.0f, 6.0f,
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
+            // Batch 1, Channel 0
+            19.0f, 20.0f,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
+            // Batch 1, Channel 1
+            21.0f, 22.0f,
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f
-    }));
+            // Batch 1, Channel 2
+            23.0f, 24.0f
+        },
+        qScale, qOffset));
 
-    auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        7.0f, 8.0f,
+    auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            7.0f, 8.0f,
 
-        // Batch 0, Channel 1
-        9.0f, 10.0f,
+            // Batch 0, Channel 1
+            9.0f, 10.0f,
 
-        // Batch 0, Channel 2
-        11.0f, 12.0f,
+            // Batch 0, Channel 2
+            11.0f, 12.0f,
 
-        // Batch 1, Channel 0
-        25.0f, 26.0f,
+            // Batch 1, Channel 0
+            25.0f, 26.0f,
 
-        // Batch 1, Channel 1
-        27.0f, 28.0f,
+            // Batch 1, Channel 1
+            27.0f, 28.0f,
 
-        // Batch 1, Channel 2
-        29.0f, 30.0f
-    }));
+            // Batch 1, Channel 2
+            29.0f, 30.0f
+        },
+        qScale, qOffset));
 
-    auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        13.0f, 14.0f,
+    auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            13.0f, 14.0f,
 
-        // Batch 0, Channel 1
-        15.0f, 16.0f,
+            // Batch 0, Channel 1
+            15.0f, 16.0f,
 
-        // Batch 0, Channel 2
-        17.0f, 18.0f,
+            // Batch 0, Channel 2
+            17.0f, 18.0f,
 
-        // Batch 1, Channel 0
-        31.0f, 32.0f,
+            // Batch 1, Channel 0
+            31.0f, 32.0f,
 
-        // Batch 1, Channel 1
-        33.0f, 34.0f,
+            // Batch 1, Channel 1
+            33.0f, 34.0f,
 
-        // Batch 1, Channel 2
-        35.0f, 36.0f
-    }));
+            // Batch 1, Channel 2
+            35.0f, 36.0f
+        },
+        qScale, qOffset));
 
     LayerTestResult<T, 3> result(outputTensorInfo);
 
@@ -765,193 +803,200 @@ LayerTestResult<T, 3> Concat3dTestImpl(
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 3> Concat3dDim0TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
 
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f,
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
+            // Batch 0, Channel 1
+            3.0f, 4.0f,
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
+            // Batch 0, Channel 2
+            5.0f, 6.0f,
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
+            // Batch 1, Channel 0
+            19.0f, 20.0f,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
+            // Batch 1, Channel 1
+            21.0f, 22.0f,
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f,
+            // Batch 1, Channel 2
+            23.0f, 24.0f,
 
-        // Batch 2, Channel 0
-        7.0f, 8.0f,
+            // Batch 2, Channel 0
+            7.0f, 8.0f,
 
-        // Batch 2, Channel 1
-        9.0f, 10.0f,
+            // Batch 2, Channel 1
+            9.0f, 10.0f,
 
-        // Batch 2, Channel 2
-        11.0f, 12.0f,
+            // Batch 2, Channel 2
+            11.0f, 12.0f,
 
-        // Batch 3, Channel 0
-        25.0f, 26.0f,
+            // Batch 3, Channel 0
+            25.0f, 26.0f,
 
-        // Batch 3, Channel 1
-        27.0f, 28.0f,
+            // Batch 3, Channel 1
+            27.0f, 28.0f,
 
-        // Batch 3, Channel 2
-        29.0f, 30.0f,
+            // Batch 3, Channel 2
+            29.0f, 30.0f,
 
-        // Batch 4, Channel 0
-        13.0f, 14.0f,
+            // Batch 4, Channel 0
+            13.0f, 14.0f,
 
-        // Batch 4, Channel 1
-        15.0f, 16.0f,
+            // Batch 4, Channel 1
+            15.0f, 16.0f,
 
-        // Batch 4, Channel 2
-        17.0f, 18.0f,
+            // Batch 4, Channel 2
+            17.0f, 18.0f,
 
-        // Batch 5, Channel 0
-        31.0f, 32.0f,
+            // Batch 5, Channel 0
+            31.0f, 32.0f,
 
-        // Batch 5, Channel 1
-        33.0f, 34.0f,
+            // Batch 5, Channel 1
+            33.0f, 34.0f,
 
-        // Batch 5, Channel 2
-        35.0f, 36.0f
-    }));
+            // Batch 5, Channel 2
+            35.0f, 36.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 3> Concat3dDim1TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
 
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f,
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
+            // Batch 0, Channel 1
+            3.0f, 4.0f,
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
+            // Batch 0, Channel 2
+            5.0f, 6.0f,
 
-        // Batch 0, Channel 3
-        7.0f, 8.0f,
+            // Batch 0, Channel 3
+            7.0f, 8.0f,
 
-        // Batch 0, Channel 4
-        9.0f, 10.0f,
+            // Batch 0, Channel 4
+            9.0f, 10.0f,
 
-        // Batch 0, Channel 5
-        11.0f, 12.0f,
+            // Batch 0, Channel 5
+            11.0f, 12.0f,
 
-        // Batch 0, Channel 6
-        13.0f, 14.0f,
+            // Batch 0, Channel 6
+            13.0f, 14.0f,
 
-        // Batch 0, Channel 7
-        15.0f, 16.0f,
+            // Batch 0, Channel 7
+            15.0f, 16.0f,
 
-        // Batch 0, Channel 8
-        17.0f, 18.0f,
+            // Batch 0, Channel 8
+            17.0f, 18.0f,
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
+            // Batch 1, Channel 0
+            19.0f, 20.0f,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
+            // Batch 1, Channel 1
+            21.0f, 22.0f,
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f,
+            // Batch 1, Channel 2
+            23.0f, 24.0f,
 
-        // Batch 1, Channel 3
-        25.0f, 26.0f,
+            // Batch 1, Channel 3
+            25.0f, 26.0f,
 
-        // Batch 1, Channel 4
-        27.0f, 28.0f,
+            // Batch 1, Channel 4
+            27.0f, 28.0f,
 
-        // Batch 1, Channel 5
-        29.0f, 30.0f,
+            // Batch 1, Channel 5
+            29.0f, 30.0f,
 
-        // Batch 1, Channel 6
-        31.0f, 32.0f,
+            // Batch 1, Channel 6
+            31.0f, 32.0f,
 
-        // Batch 1, Channel 7
-        33.0f, 34.0f,
+            // Batch 1, Channel 7
+            33.0f, 34.0f,
 
-        // Batch 1, Channel 8
-        35.0f, 36.0f
-    }));
+            // Batch 1, Channel 8
+            35.0f, 36.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 3> Concat3dDim2TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
 
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
+            // Batch 0, Channel 1
+            3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
+            // Batch 0, Channel 2
+            5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
+            // Batch 1, Channel 0
+            19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
+            // Batch 1, Channel 1
+            21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
-    }));
+            // Batch 1, Channel 2
+            23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
-    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+    TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
+    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
+        {
             // Batch 0, Channel 0
             1.0f, 2.0f,
 
@@ -969,10 +1014,12 @@ LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
 
             // Batch 1, Channel 2
             23.0f, 24.0f
-    }));
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
-    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+    TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
+    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
+        {
             // Batch 0, Channel 0
             7.0f, 8.0f,
 
@@ -981,10 +1028,12 @@ LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
 
             // Batch 0, Channel 2
             11.0f, 12.0f,
-    }));
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
-    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+    TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
+    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
+        {
             // Batch 0, Channel 0
             25.0f, 26.0f,
 
@@ -1011,9 +1060,10 @@ LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
 
             // Batch 2, Channel 2
             35.0f, 36.0f
-    }));
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
+    TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
     LayerTestResult<T, 3> result(outputTensorInfo);
 
     std::vector<T> output;
@@ -1027,130 +1077,138 @@ LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
                    true);
 
     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f,
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
+            // Batch 0, Channel 1
+            3.0f, 4.0f,
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
+            // Batch 0, Channel 2
+            5.0f, 6.0f,
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
+            // Batch 1, Channel 0
+            19.0f, 20.0f,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
+            // Batch 1, Channel 1
+            21.0f, 22.0f,
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f,
+            // Batch 1, Channel 2
+            23.0f, 24.0f,
 
-        // Batch 2, Channel 0
-        7.0f, 8.0f,
+            // Batch 2, Channel 0
+            7.0f, 8.0f,
 
-        // Batch 2, Channel 1
-        9.0f, 10.0f,
+            // Batch 2, Channel 1
+            9.0f, 10.0f,
 
-        // Batch 2, Channel 2
-        11.0f, 12.0f,
+            // Batch 2, Channel 2
+            11.0f, 12.0f,
 
-        // Batch 3, Channel 0
-        25.0f, 26.0f,
+            // Batch 3, Channel 0
+            25.0f, 26.0f,
 
-        // Batch 3, Channel 1
-        27.0f, 28.0f,
+            // Batch 3, Channel 1
+            27.0f, 28.0f,
 
-        // Batch 3, Channel 2
-        29.0f, 30.0f,
+            // Batch 3, Channel 2
+            29.0f, 30.0f,
 
-        // Batch 4, Channel 0
-        13.0f, 14.0f,
+            // Batch 4, Channel 0
+            13.0f, 14.0f,
 
-        // Batch 4, Channel 1
-        15.0f, 16.0f,
+            // Batch 4, Channel 1
+            15.0f, 16.0f,
 
-        // Batch 4, Channel 2
-        17.0f, 18.0f,
+            // Batch 4, Channel 2
+            17.0f, 18.0f,
 
-        // Batch 5, Channel 0
-        31.0f, 32.0f,
+            // Batch 5, Channel 0
+            31.0f, 32.0f,
 
-        // Batch 5, Channel 1
-        33.0f, 34.0f,
+            // Batch 5, Channel 1
+            33.0f, 34.0f,
 
-        // Batch 5, Channel 2
-        35.0f, 36.0f
-    }));
+            // Batch 5, Channel 2
+            35.0f, 36.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
-    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
+    TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f,
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
+            // Batch 0, Channel 1
+            3.0f, 4.0f,
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
+            // Batch 0, Channel 2
+            5.0f, 6.0f,
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
+            // Batch 1, Channel 0
+            19.0f, 20.0f,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
+            // Batch 1, Channel 1
+            21.0f, 22.0f,
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f
-    }));
+            // Batch 1, Channel 2
+            23.0f, 24.0f
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
-    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        7.0f, 8.0f,
+    TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
+    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            7.0f, 8.0f,
 
-        // Batch 0, Channel 1
-        9.0f, 10.0f,
+            // Batch 0, Channel 1
+            9.0f, 10.0f,
 
-        // Batch 0, Channel 2
-        11.0f, 12.0f,
+            // Batch 0, Channel 2
+            11.0f, 12.0f,
 
-        // Batch 0, Channel 3
-        25.0f, 26.0f,
+            // Batch 0, Channel 3
+            25.0f, 26.0f,
 
-        // Batch 1, Channel 0
-        27.0f, 28.0f,
+            // Batch 1, Channel 0
+            27.0f, 28.0f,
 
-        // Batch 1, Channel 1
-        29.0f, 30.0f,
+            // Batch 1, Channel 1
+            29.0f, 30.0f,
 
-        // Batch 1, Channel 2
-        13.0f, 14.0f,
+            // Batch 1, Channel 2
+            13.0f, 14.0f,
 
-        // Batch 1, Channel 3
-        15.0f, 16.0f,
-    }));
+            // Batch 1, Channel 3
+            15.0f, 16.0f,
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
-    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        17.0f, 18.0f,
+    TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
+    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            17.0f, 18.0f,
 
-        // Batch 1, Channel 0
-        31.0f, 32.0f,
-    }));
+            // Batch 1, Channel 0
+            31.0f, 32.0f,
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
     LayerTestResult<T, 3> result(outputTensorInfo);
 
     std::vector<T> output;
@@ -1164,131 +1222,139 @@ LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl(
                    true);
 
     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f,
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
+            // Batch 0, Channel 1
+            3.0f, 4.0f,
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
+            // Batch 0, Channel 2
+            5.0f, 6.0f,
 
-        // Batch 0, Channel 3
-        7.0f, 8.0f,
+            // Batch 0, Channel 3
+            7.0f, 8.0f,
 
-        // Batch 0, Channel 4
-        9.0f, 10.0f,
+            // Batch 0, Channel 4
+            9.0f, 10.0f,
 
-        // Batch 0, Channel 5
-        11.0f, 12.0f,
+            // Batch 0, Channel 5
+            11.0f, 12.0f,
 
-        // Batch 0, Channel 6
-        25.0f, 26.0f,
+            // Batch 0, Channel 6
+            25.0f, 26.0f,
 
-        // Batch 0, Channel 7
-        17.0f, 18.0f,
+            // Batch 0, Channel 7
+            17.0f, 18.0f,
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
+            // Batch 1, Channel 0
+            19.0f, 20.0f,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
+            // Batch 1, Channel 1
+            21.0f, 22.0f,
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f,
+            // Batch 1, Channel 2
+            23.0f, 24.0f,
 
-        // Batch 1, Channel 3
-        27.0f, 28.0f,
+            // Batch 1, Channel 3
+            27.0f, 28.0f,
 
-        // Batch 1, Channel 4
-        29.0f, 30.0f,
+            // Batch 1, Channel 4
+            29.0f, 30.0f,
 
-        // Batch 1, Channel 5
-        13.0f, 14.0f,
+            // Batch 1, Channel 5
+            13.0f, 14.0f,
 
-        // Batch 1, Channel 6
-        15.0f, 16.0f,
+            // Batch 1, Channel 6
+            15.0f, 16.0f,
 
-        // Batch 1, Channel 7
-        31.0f, 32.0f,
-    }));
+            // Batch 1, Channel 7
+            31.0f, 32.0f,
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
-    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
+    TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f,
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
+            // Batch 0, Channel 1
+            3.0f, 4.0f,
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
+            // Batch 0, Channel 2
+            5.0f, 6.0f,
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
+            // Batch 1, Channel 0
+            19.0f, 20.0f,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
+            // Batch 1, Channel 1
+            21.0f, 22.0f,
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f
-    }));
+            // Batch 1, Channel 2
+            23.0f, 24.0f
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
-    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        7.0f,
+    TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
+    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            7.0f,
 
-        // Batch 0, Channel 1
-        9.0f,
+            // Batch 0, Channel 1
+            9.0f,
 
-        // Batch 0, Channel 2
-        11.0f,
+            // Batch 0, Channel 2
+            11.0f,
 
-        // Batch 1, Channel 0
-        25.0f,
+            // Batch 1, Channel 0
+            25.0f,
 
-        // Batch 1, Channel 1
-        27.0f,
+            // Batch 1, Channel 1
+            27.0f,
 
-        // Batch 1, Channel 2
-        29.0f
-    }));
+            // Batch 1, Channel 2
+            29.0f
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
-    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        13.0f, 14.0f, 50.0f,
+    TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
+    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            13.0f, 14.0f, 50.0f,
 
-        // Batch 0, Channel 1
-        15.0f, 16.0f, 51.0f,
+            // Batch 0, Channel 1
+            15.0f, 16.0f, 51.0f,
 
-        // Batch 0, Channel 2
-        17.0f, 18.0f, 52.0f,
+            // Batch 0, Channel 2
+            17.0f, 18.0f, 52.0f,
 
-        // Batch 1, Channel 0
-        31.0f, 32.0f, 53.0f,
+            // Batch 1, Channel 0
+            31.0f, 32.0f, 53.0f,
 
-        // Batch 1, Channel 1
-        33.0f, 34.0f, 54.0f,
+            // Batch 1, Channel 1
+            33.0f, 34.0f, 54.0f,
 
-        // Batch 1, Channel 2
-        35.0f, 36.0f, 55.0f,
-    }));
+            // Batch 1, Channel 2
+            35.0f, 36.0f, 55.0f,
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
     LayerTestResult<T, 3> result(outputTensorInfo);
 
     std::vector<T> output;
@@ -1302,67 +1368,75 @@ LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl(
                    useSubtensor);
 
     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
+            // Batch 0, Channel 1
+            3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
+            // Batch 0, Channel 2
+            5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
+            // Batch 1, Channel 0
+            19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
+            // Batch 1, Channel 1
+            21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
-    }));
+            // Batch 1, Channel 2
+            23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::TensorInfo& outputTensorInfo,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const TensorInfo& outputTensorInfo,
     unsigned int dimension,
     bool useSubtensor,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
 
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f
-    }));
-
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f
-    }));
+    auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+             9.0f, 10.0f,
+            11.0f, 12.0f
+        },
+        qScale, qOffset));
+
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
+        {
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f,
+            19.0f, 20.0f,
+            21.0f, 22.0f
+        },
+        qScale, qOffset));
 
-    auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
-    }));
+    auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
+        {
+            21.0f, 22.0f,
+            23.0f, 24.0f,
+            25.0f, 26.0f,
+            27.0f, 28.0f,
+            29.0f, 30.0f,
+            31.0f, 32.0f
+        },
+        qScale, qOffset));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
 
@@ -1382,197 +1456,209 @@ LayerTestResult<T, 4> Concat4dTestImpl(
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dDim0TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
 
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f,
-
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
-
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
-    }));
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+             9.0f, 10.0f,
+            11.0f, 12.0f,
+
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f,
+            19.0f, 20.0f,
+            21.0f, 22.0f,
+
+            21.0f, 22.0f,
+            23.0f, 24.0f,
+            25.0f, 26.0f,
+            27.0f, 28.0f,
+            29.0f, 30.0f,
+            31.0f, 32.0f
+        },
+        qScale, qOffset));
+
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dDim1TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
 
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f,
-
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
-
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
-    }));
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+             9.0f, 10.0f,
+            11.0f, 12.0f,
+
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f,
+            19.0f, 20.0f,
+            21.0f, 22.0f,
+
+            21.0f, 22.0f,
+            23.0f, 24.0f,
+            25.0f, 26.0f,
+            27.0f, 28.0f,
+            29.0f, 30.0f,
+            31.0f, 32.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dDim2TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
 
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
-
-        9.0f, 10.0f,
-        11.0f, 12.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
-    }));
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            21.0f, 22.0f,
+            23.0f, 24.0f,
+
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f,
+            25.0f, 26.0f,
+            27.0f, 28.0f,
+
+             9.0f, 10.0f,
+            11.0f, 12.0f,
+            19.0f, 20.0f,
+            21.0f, 22.0f,
+            29.0f, 30.0f,
+            31.0f, 32.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dDim3TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset,
     bool useSubtensor)
 {
-    armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
 
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        11.0f, 12.0f,
-        21.0f, 22.0f,
-        3.0f, 4.0f,
-        13.0f, 14.0f,
-        23.0f, 24.0f,
-
-        5.0f, 6.0f,
-        15.0f, 16.0f,
-        25.0f, 26.0f,
-        7.0f, 8.0f,
-        17.0f, 18.0f,
-        27.0f, 28.0f,
-
-        9.0f, 10.0f,
-        19.0f, 20.0f,
-        29.0f, 30.0f,
-        11.0f, 12.0f,
-        21.0f, 22.0f,
-        31.0f, 32.0f
-    }));
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+            11.0f, 12.0f,
+            21.0f, 22.0f,
+             3.0f,  4.0f,
+            13.0f, 14.0f,
+            23.0f, 24.0f,
+
+             5.0f,  6.0f,
+            15.0f, 16.0f,
+            25.0f, 26.0f,
+             7.0f,  8.0f,
+            17.0f, 18.0f,
+            27.0f, 28.0f,
+
+             9.0f, 10.0f,
+            19.0f, 20.0f,
+            29.0f, 30.0f,
+            11.0f, 12.0f,
+            21.0f, 22.0f,
+            31.0f, 32.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    unsigned int dimension = 0;
-    armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    constexpr unsigned int dimension = 0u;
 
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f
-    }));
-
-    armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
-
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
-
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
+    TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+             9.0f, 10.0f,
+            11.0f, 12.0f
+        },
+        qScale, qOffset));
+
+    TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
+        {
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f,
+            19.0f, 20.0f,
+            21.0f, 22.0f,
 
-    }));
+            21.0f, 22.0f,
+            23.0f, 24.0f,
+            25.0f, 26.0f,
+            27.0f, 28.0f,
+            29.0f, 30.0f,
+            31.0f, 32.0f
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 4> result(outputTensorInfo);
 
@@ -1588,62 +1674,67 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl(
                    true);
 
     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f,
-
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
-
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
-    }));
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+        {
+             1.0f, 2.0f,
+             3.0f, 4.0f,
+             5.0f, 6.0f,
+             7.0f, 8.0f,
+             9.0f, 10.0f,
+            11.0f, 12.0f,
+
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f,
+            19.0f, 20.0f,
+            21.0f, 22.0f,
+
+            21.0f, 22.0f,
+            23.0f, 24.0f,
+            25.0f, 26.0f,
+            27.0f, 28.0f,
+            29.0f, 30.0f,
+            31.0f, 32.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dDiffShapeDim1TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    unsigned int dimension = 1;
-    armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
-
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f
-    }));
-
-    armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
+    constexpr unsigned int dimension = 1u;
 
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-
-    }));
+    TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+             9.0f, 10.0f,
+            11.0f, 12.0f
+        },
+        qScale, qOffset));
+
+    TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
+
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
+        {
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f,
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 4> result(outputTensorInfo);
 
@@ -1659,57 +1750,61 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim1TestImpl(
                    true);
 
     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f,
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f
-    }));
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+             9.0f, 10.0f,
+            11.0f, 12.0f,
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    unsigned int dimension = 2;
-    armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    constexpr unsigned int dimension = 2u;
 
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f
-    }));
-
-    armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
-
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f
-    }));
+    TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
+        {
+             1.0f, 2.0f,
+             3.0f, 4.0f,
+             5.0f, 6.0f,
+             7.0f, 8.0f,
+            9.0f, 10.0f,
+            11.0f, 12.0f
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
+        {
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f,
+            19.0f, 20.0f,
+            21.0f, 22.0f,
+            23.0f, 24.0f,
+            25.0f, 26.0f,
+            27.0f, 28.0f
+        },
+        qScale, qOffset));
 
+    TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
     LayerTestResult<T, 4> result(outputTensorInfo);
 
     std::vector<T> output;
@@ -1723,64 +1818,69 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl(
                    dimension,
                    true);
 
-    result.output = MakeTensor<T, 4>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
-
-        9.0f, 10.0f,
-        11.0f, 12.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f
-    }));
+    result.output         = MakeTensor<T, 4>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
+
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+            17.0f, 18.0f,
+            19.0f, 20.0f,
+            21.0f, 22.0f,
+
+             9.0f, 10.0f,
+            11.0f, 12.0f,
+            23.0f, 24.0f,
+            25.0f, 26.0f,
+            27.0f, 28.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset,
     bool useSubtensor)
 {
-    unsigned int dimension = 3;
-    armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
-
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f
-    }));
+    constexpr unsigned int dimension = 3u;
 
-    armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
-
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
-        11.0f, 12.0f, 13.0f,
-        14.0f, 15.0f, 16.0f,
+    TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+             9.0f, 10.0f,
+            11.0f, 12.0f
+        },
+        qScale, qOffset));
+
+    TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
+        {
+            11.0f, 12.0f, 13.0f,
+            14.0f, 15.0f, 16.0f,
 
-        17.0f, 18.0f, 19.0f,
-        20.0f, 21.0f, 22.0f,
+            17.0f, 18.0f, 19.0f,
+            20.0f, 21.0f, 22.0f,
 
-        23.0f, 24.0f, 25.0f,
-        26.0f, 27.0f, 28.0f
-    }));
+            23.0f, 24.0f, 25.0f,
+            26.0f, 27.0f, 28.0f
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 4> result(outputTensorInfo);
 
@@ -1796,30 +1896,32 @@ LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl(
                    useSubtensor);
 
     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
-        3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
-        5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
-        7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
-        9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
-        11.0f, 12.0f, 26.0f, 27.0f, 28.0f
-    }));
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+        {
+            1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
+            3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
+            5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
+            7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
+            9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
+            11.0f, 12.0f, 26.0f, 27.0f, 28.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T>
+template<DataType ArmnnType, typename T>
 LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
     // Defines the tensor descriptors.
-    armnn::TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
-    armnn::TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
-    armnn::TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType);
+    TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
+    TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
+    TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType);
 
-    std::vector<armnn::TensorShape> inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()});
+    std::vector<TensorShape> inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()});
 
     // Quantized input1 tensor.
     const float inputScale1 = 0.5f;
@@ -1894,31 +1996,31 @@ LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
     inputTensorInfo2.SetQuantizationOffset(inputOffset2);
 
     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
-    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
+    ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
 
     std::vector<unsigned int> wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1].
-    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
+    ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
 
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors();
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
+    std::unique_ptr<ITensorHandle> inputHandle1 =
             subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo1);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
+    std::unique_ptr<ITensorHandle> inputHandle2 =
             subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo2);
 
-    armnn::ConcatQueueDescriptor data;
-    armnn::OriginsDescriptor desc = armnn::CreateDescriptorForConcatenation(
+    ConcatQueueDescriptor data;
+    OriginsDescriptor desc = CreateDescriptorForConcatenation(
             inputTensorShapes.begin(),inputTensorShapes.end(), 2);
     data.m_Parameters = desc;
 
-    armnn::WorkloadInfo info;
+    WorkloadInfo info;
     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
@@ -1926,7 +2028,7 @@ LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
     data.m_ViewOrigins.push_back(window1);
     data.m_ViewOrigins.push_back(window2);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
@@ -1947,16 +2049,16 @@ LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
 // Explicit template specializations
 //
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 3>
-ConcatDifferentInputOutputQParamTest<armnn::DataType::QuantisedAsymm8>(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+template LayerTestResult<ResolveType<DataType::QuantisedAsymm8>, 3>
+ConcatDifferentInputOutputQParamTest<DataType::QuantisedAsymm8>(
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
-ConcatDifferentInputOutputQParamTest<armnn::DataType::QuantisedSymm16>(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+template LayerTestResult<ResolveType<DataType::QuantisedSymm16>, 3>
+ConcatDifferentInputOutputQParamTest<DataType::QuantisedSymm16>(
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor);
 
 //
@@ -1964,8 +2066,8 @@ ConcatDifferentInputOutputQParamTest<armnn::DataType::QuantisedSymm16>(
 //
 
 LayerTestResult<float,3> ConcatTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
@@ -1980,9 +2082,9 @@ LayerTestResult<float,3> ConcatTest(
     unsigned int inputChannels2 = 1;
 
     // Define the tensor descriptors.
-    armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
-    armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
-    armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
+    TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::Float32);
+    TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32);
+    TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32);
 
     LayerTestResult<float,3> ret(outputTensorInfo);
 
@@ -2041,27 +2143,27 @@ LayerTestResult<float,3> ConcatTest(
     );
 
     std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
-    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
+    ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
 
     std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
-    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
+    ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
 
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
+    std::unique_ptr<ITensorHandle> inputHandle1 =
         subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo1);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2  =
+    std::unique_ptr<ITensorHandle> inputHandle2  =
         subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo2);
 
-    armnn::ConcatQueueDescriptor data;
-    armnn::WorkloadInfo info;
+    ConcatQueueDescriptor data;
+    WorkloadInfo info;
     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
@@ -2069,7 +2171,7 @@ LayerTestResult<float,3> ConcatTest(
     data.m_ViewOrigins.push_back(window1);
     data.m_ViewOrigins.push_back(window2);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
@@ -2087,156 +2189,156 @@ LayerTestResult<float,3> ConcatTest(
 }
 
 LayerTestResult<float, 1> Concat1dTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 2> Concat2dDim0Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 2> Concat2dDim1Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 3> Concat3dDim0Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 3> Concat3dDim1Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 3> Concat3dDim2Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    return Concat3dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
+    return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
 }
 
 LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
+    return Concat3dDim0DiffInputDimsTestImpl<DataType::Float32>(
         workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    return Concat3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
+    return Concat3dDim2DiffInputDimsTestImpl<DataType::Float32>(
         workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
 }
 
 LayerTestResult<float, 4> Concat4dDim0Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 4> Concat4dDim1Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 4> Concat4dDim2Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 4> Concat4dDim3Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    return Concat4dDim3TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
+    return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
 }
 
 LayerTestResult<float, 4> Concat4dDiffShapeDim0Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 4> Concat4dDiffShapeDim1Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
+    return Concat4dDiffShapeDim1TestImpl<DataType::Float32>(
         workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 4> Concat4dDiffShapeDim2Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 4> Concat4dDiffShapeDim3Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    return Concat4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
+    return Concat4dDiffShapeDim3TestImpl<DataType::Float32>(
         workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
 }
 
-LayerTestResult<armnn::Half, 3> ConcatFloat16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+LayerTestResult<Half, 3> ConcatFloat16Test(
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim1TestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
@@ -2251,9 +2353,9 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
     unsigned int inputChannels2 = 1;
 
     // Defines the tensor descriptors.
-    armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
-    armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
-    armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
+    TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedAsymm8);
+    TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedAsymm8);
+    TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedAsymm8);
 
     // Quantized input1 tensor. Range [-3, 1]
     const float inputScale1 = 0.015686f;
@@ -2332,27 +2434,27 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
     inputTensorInfo2.SetQuantizationOffset(inputOffset2);
 
     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
-    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
+    ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
 
     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
-    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
+    ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
 
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
+    std::unique_ptr<ITensorHandle> inputHandle1 =
             subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo1);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
+    std::unique_ptr<ITensorHandle> inputHandle2 =
             subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo2);
 
-    armnn::ConcatQueueDescriptor data;
-    armnn::WorkloadInfo info;
+    ConcatQueueDescriptor data;
+    WorkloadInfo info;
     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
@@ -2360,7 +2462,7 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
     data.m_ViewOrigins.push_back(window1);
     data.m_ViewOrigins.push_back(window2);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
@@ -2378,8 +2480,8 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
 }
 
 LayerTestResult<uint8_t, 3> ConcatUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
@@ -2394,9 +2496,9 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
     unsigned int inputChannels2 = 1;
 
     // Defines the tensor descriptors.
-    armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
-    armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
-    armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
+    TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedAsymm8);
+    TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedAsymm8);
+    TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedAsymm8);
 
     // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
     const float scale = 0.13497836f;
@@ -2466,29 +2568,29 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
     );
 
     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
-    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
+    ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
 
     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
-    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
+    ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
 
 
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
+    std::unique_ptr<ITensorHandle> inputHandle1 =
         subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo1);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
+    std::unique_ptr<ITensorHandle> inputHandle2 =
         subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo2);
 
 
-    armnn::ConcatQueueDescriptor data;
-    armnn::WorkloadInfo info;
+    ConcatQueueDescriptor data;
+    WorkloadInfo info;
     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
@@ -2496,7 +2598,7 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
     data.m_ViewOrigins.push_back(window1);
     data.m_ViewOrigins.push_back(window2);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
@@ -2514,8 +2616,8 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
 }
 
 LayerTestResult<uint16_t, 3> ConcatUint16Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+        IWorkloadFactory& workloadFactory,
+        const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
@@ -2530,9 +2632,9 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
     unsigned int inputChannels2 = 1;
 
     // Defines the tensor descriptors.
-    armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
-    armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
-    armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
+    TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedSymm16);
+    TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedSymm16);
+    TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedSymm16);
 
     // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
     const float scale = 0.13497836f;
@@ -2599,29 +2701,29 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
     }));
 
     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
-    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
+    ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
 
     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
-    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
+    ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
 
 
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
+    std::unique_ptr<ITensorHandle> inputHandle1 =
             subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo1);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
+    std::unique_ptr<ITensorHandle> inputHandle2 =
             subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo2);
 
 
-    armnn::ConcatQueueDescriptor data;
-    armnn::WorkloadInfo info;
+    ConcatQueueDescriptor data;
+    WorkloadInfo info;
     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
@@ -2629,7 +2731,7 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
     data.m_ViewOrigins.push_back(window1);
     data.m_ViewOrigins.push_back(window2);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
@@ -2647,147 +2749,147 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
 }
 
 LayerTestResult<uint8_t, 1> Concat1dUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat1dTestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat2dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat2dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat2dDim0DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat2dDim1DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat3dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat3dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    return Concat3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat3dDim2TestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat3dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat3dDim1DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    return Concat3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat3dDim2DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat4dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat4dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat4dDim2TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
 {
-    return Concat4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat4dDim3TestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat4dDiffShapeDim0TestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat4dDiffShapeDim1TestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat4dDiffShapeDim2TestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    return Concat4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat4dDiffShapeDim3TestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
 }
index c3cacd5..3f22c31 100644 (file)
@@ -6,6 +6,7 @@
 #include "ConstantTestImpl.hpp"
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -53,43 +54,45 @@ LayerTestResult<T, 4> ConstantTestImpl(
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        235.0f,  46.0f, 178.0f,
-        100.0f, 123.0f,  19.0f,
-        172.0f,  74.0f, 250.0f,
-          6.0f, 195.0f,  80.0f,
-
-        // Batch 0, Channel 1
-        113.0f,  95.0f, 202.0f,
-         77.0f, 114.0f,  71.0f,
-        122.0f, 246.0f, 166.0f,
-         82.0f,  28.0f,  37.0f,
-
-        // Batch 0, Channel 2
-         56.0f, 170.0f, 162.0f,
-        194.0f,  89.0f, 254.0f,
-         12.0f, 209.0f, 200.0f,
-          1.0f,  64.0f,  54.0f,
-
-        // Batch 1, Channel 0
-         67.0f,  90.0f,  49.0f,
-          7.0f, 163.0f,  18.0f,
-         25.0f, 117.0f, 103.0f,
-        247.0f,  59.0f, 189.0f,
-
-        // Batch 1, Channel 1
-        239.0f, 104.0f, 199.0f,
-         17.0f, 124.0f, 153.0f,
-        222.0f, 217.0f, 75.0f,
-         32.0f, 126.0f, 21.0f,
-
-        // Batch 1, Channel 2
-         97.0f, 145.0f, 215.0f,
-        115.0f, 116.0f, 238.0f,
-        226.0f,  16.0f, 132.0f,
-         92.0f, 125.0f,  88.0f,
-    })));
+        armnnUtils::QuantizedVector<T>(
+            {
+                // Batch 0, Channel 0
+                235.0f,  46.0f, 178.0f,
+                100.0f, 123.0f,  19.0f,
+                172.0f,  74.0f, 250.0f,
+                  6.0f, 195.0f,  80.0f,
+
+                // Batch 0, Channel 1
+                113.0f,  95.0f, 202.0f,
+                 77.0f, 114.0f,  71.0f,
+                122.0f, 246.0f, 166.0f,
+                 82.0f,  28.0f,  37.0f,
+
+                // Batch 0, Channel 2
+                 56.0f, 170.0f, 162.0f,
+                194.0f,  89.0f, 254.0f,
+                 12.0f, 209.0f, 200.0f,
+                  1.0f,  64.0f,  54.0f,
+
+                // Batch 1, Channel 0
+                 67.0f,  90.0f,  49.0f,
+                  7.0f, 163.0f,  18.0f,
+                 25.0f, 117.0f, 103.0f,
+                247.0f,  59.0f, 189.0f,
+
+                // Batch 1, Channel 1
+                239.0f, 104.0f, 199.0f,
+                 17.0f, 124.0f, 153.0f,
+                222.0f, 217.0f, 75.0f,
+                 32.0f, 126.0f, 21.0f,
+
+                // Batch 1, Channel 2
+                 97.0f, 145.0f, 215.0f,
+                115.0f, 116.0f, 238.0f,
+                226.0f,  16.0f, 132.0f,
+                 92.0f, 125.0f,  88.0f,
+            },
+            qScale, qOffset)));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = input;
index 01c1b18..198904e 100644 (file)
@@ -7,13 +7,13 @@
 
 #include <DataLayoutIndexed.hpp>
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <TensorUtils.hpp>
 
 #include <armnn/ArmNN.hpp>
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -62,6 +62,8 @@ static std::vector<float> ConvInput3x8x16({
     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
 });
 
+using namespace armnnUtils;
+
 //
 // Helper templates
 //
@@ -73,7 +75,7 @@ boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale)
     if(biasEnabled)
     {
         armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
-        boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
+        boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(Bias2, qScale, 0.0f));
         return bias;
     }
     else
@@ -89,7 +91,7 @@ boost::multi_array<T, 1> GetBias4(bool biasEnabled, float qScale)
     if(biasEnabled)
     {
         armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias4.size())}, ArmnnType);
-        boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias4));
+        boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(Bias4, qScale, 0.0f));
         return bias;
     }
     else
@@ -105,7 +107,7 @@ boost::multi_array<T, 1> GetBias8(bool biasEnabled, float qScale)
     if(biasEnabled)
     {
         armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias4.size())}, ArmnnType);
-        boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias8));
+        boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(Bias8, qScale, 0.0f));
         return bias;
     }
     else
@@ -492,35 +494,39 @@ LayerTestResult<T,4> Convolution1dTestImpl(
         biasInfo.SetQuantizationOffset(0);
     }
 
-    std::vector<T> inputData(
-        QuantizedVector<T>(inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(), {
-            5.0f, -2.0f, 2.5f, 0.0f, 1.0f,
-            -3.0f, 3.2f, 5.0f, 2.0f, 3.0f,
-        }));
+    std::vector<T> inputData = QuantizedVector<T>(
+        {
+             5.0f, -2.0f, 2.5f, 0.0f, 1.0f,
+            -3.0f,  3.2f, 5.0f, 2.0f, 3.0f,
+        },
+        inputInfo.GetQuantizationScale(),
+        inputInfo.GetQuantizationOffset());
 
-    std::vector<T> kernelData(
-        QuantizedVector<T>(kernelInfo.GetQuantizationScale(), kernelInfo.GetQuantizationOffset(), {
-            1.0f, 0.0f, 0.0f,
-            0.0f, 2.0f, -1.5f,
+    std::vector<T> kernelData = QuantizedVector<T>(
+        {
+            1.0f,  0.0f,  0.0f,
+            0.0f,  2.0f, -1.5f,
 
-            0.0f, 0.0f, 0.0f,
-            0.2f, 0.2f, 0.2f,
+            0.0f,  0.0f,  0.0f,
+            0.2f,  0.2f,  0.2f,
 
-            0.5f, 0.0f, 0.5f,
-            0.0f, -1.0f, 0.0f
-        }));
+            0.5f,  0.0f,  0.5f,
+            0.0f, -1.0f,  0.0f
+        },
+        kernelInfo.GetQuantizationScale(),
+        kernelInfo.GetQuantizationOffset());
 
-    std::vector<B> biasData(
-        QuantizedVector<B>(biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(), {
-            1.0f, 0.0f, 0.0f
-        }));
+    std::vector<B> biasData =
+        QuantizedVector<B>({ 1.0f, 0.0f, 0.0f }, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset());
 
-    std::vector<T> outputData(
-        QuantizedVector<T>(outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), {
-            4.5f, -10.8f, 5.0f + 6.4f - 7.5f, -2.0f + 10.0f -3.0f, 2.5f + 4.0f - 4.5f, 6.0f, 1.0f,
+    std::vector<T> outputData = QuantizedVector<T>(
+        {
+             4.5f, -10.8f, 5.0f + 6.4f - 7.5f, -2.0f + 10.0f -3.0f, 2.5f + 4.0f - 4.5f, 6.0f, 1.0f,
             -0.6f, -0.6f + 0.64f, -0.6f + 0.64f + 1.0f, 0.64f + 1.0f + 0.4f, 1.0f + 0.4f + 0.6f, 0.4f + 0.6f, 0.6f,
-            2.5f, -1.0f + 3.0f, 1.25f - 3.2f + 2.5f, -1.0f - 5.0f, 1.25f + 0.5f - 2.0f, -3.0f, 0.5f
-        }));
+             2.5f, -1.0f + 3.0f, 1.25f - 3.2f + 2.5f, -1.0f - 5.0f, 1.25f + 0.5f - 2.0f, -3.0f, 0.5f
+        },
+        outputInfo.GetQuantizationScale(),
+        outputInfo.GetQuantizationOffset());
 
     // Optionally apply bias to output image.
     if(biasEnabled)
@@ -698,54 +704,55 @@ LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
 {
     // Use common single-batch 3-channel 16x8 image.
     armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
-    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
+    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset));
 
     // Use a 2-element batch with 3-channel 3x5 kernels.
     armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
-            1, 1, 1,
+        QuantizedVector<T>({
+            1,  1, 1,
             1, -1, 1,
-            1, 1, 1,
-            1, 1, 1,
-            1, 1, 1,
-
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
-
-            2, 2, 2,
-            2, 2, 2,
-            2, 2, 2,
-            2, 2, 2,
-            2, 2, 2,
-
-
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
-
-            1, 1, 1,
-            1, 1, 1,
-            1, 1, 1,
-            1, 1, 1,
-            1, 1, 1,
-
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0
-        })));
+            1,  1, 1,
+            1,  1, 1,
+            1,  1, 1,
+
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
+
+            2,  2, 2,
+            2,  2, 2,
+            2,  2, 2,
+            2,  2, 2,
+            2,  2, 2,
+
+
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
+
+            1,  1, 1,
+            1,  1, 1,
+            1,  1, 1,
+            1,  1, 1,
+            1,  1, 1,
+
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0
+        },
+        qScale, qOffset)));
 
     // Expected output is 2 batch elements of a 1-channel 14x4 image.
     armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
             -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
             -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
@@ -757,7 +764,8 @@ LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-        })));
+        },
+        qScale, qOffset)));
 
     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
         workloadFactory,
@@ -785,42 +793,43 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
 
     // Use common single-batch 3-channel 16x8 image.
     armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
-    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
+    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset));
 
     // Use a 2-element batch of 3-channel 3x3 kernels.
     armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
-            1, 1, 1,
+        QuantizedVector<T>({
+            1,  1, 1,
             1, -1, 1,
-            1, 1, 1,
+            1,  1, 1,
 
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
 
-            2, 2, 2,
-            2, 2, 2,
-            2, 2, 2,
+            2,  2, 2,
+            2,  2, 2,
+            2,  2, 2,
 
 
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
 
-            1, 1, 1,
-            1, 1, 1,
-            1, 1, 1,
+            1,  1, 1,
+            1,  1, 1,
+            1,  1, 1,
 
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0
-        })));
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0
+        },
+        qScale, qOffset)));
 
     // Expected output is 1 batch of a 2-channel 14x6 image.
     armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
             -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
             -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
@@ -834,7 +843,8 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-        })));
+        },
+        qScale, qOffset)));
 
     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
         workloadFactory,
@@ -860,19 +870,21 @@ LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest
     // Use a single-batch 1-channel 3x3 image as input.
     armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             11,21,31,
             12,22,32,
             13,23,33
-        })));
+        },
+        qScale, qOffset)));
 
     // Use 1 batch of a 1-channel 2x2 kernel.
     armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -11,-21,
             -12,-22,
-        })));
+        },
+        qScale, qOffset)));
 
 // Expected output is 1 batch of a 1-channel 6x8 image.
 // Manually calculated like this:
@@ -885,7 +897,7 @@ LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest
 //[..... .....  ..... .....  ; .....  .....  .....  .....  ; .....  .....  .....  .....  ; .....  ..... .....  ..... ..]
     armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
                0,    0,      0,    0,    0,    0,
             -242,  -594,  -934, -372,    0,    0,
             -495, -1190, -1850, -725,    0,    0,
@@ -894,7 +906,8 @@ LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest
                0,    0,     0,     0,    0,    0,
                0,    0,     0,     0,    0,    0,
                0,    0,     0,     0,    0,    0
-        })));
+        },
+        qScale, qOffset)));
 
     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
         workloadFactory,
@@ -924,35 +937,37 @@ LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
     // Use a single-batch 1-channel 5x5 image as input.
     armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             11,21,31,41,51,
             12,22,32,42,52,
             13,23,33,43,53,
             14,24,34,44,54,
             15,25,35,45,55,
-        })));
+        }, qScale, qOffset)));
 
     // Use 1 batch of a 1-channel 4x4 kernel.
     armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -11,-21,-31,-41,
             -12,-22,-32,-42,
             -13,-23,-33,-43,
             -14,-24,-34,-44,
-        })));
+        },
+        qScale, qOffset)));
 
     // Expected output is 1 batch of a 1-channel 5x5 image.
     armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
     std::vector<T> myVec(outputDesc.GetNumElements(), 0);
     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -7140, -10580, -13940,  -9300, -5230,
             -9590, -14120, -18520, -12290, -6860,
             -9980, -14560, -18960, -12560, -7000,
             -7518, -10904, -14144,  -9318, -5152,
             -5032,  -7256,  -9376,  -6142, -3368,
-        })));
+        },
+        qScale, qOffset)));
 
     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
         workloadFactory,
@@ -1025,17 +1040,18 @@ LayerTestResult<T, 4> Convolution2d3x3DilationTestCommon(
     outputTensorInfo.SetQuantizationOffset(qOffset);
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-                                  std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                    inputTensorInfo.GetQuantizationOffset(),
-                                                                    inputNoQuantizedValues)));
+                                  std::vector<T>(QuantizedVector<T>(inputNoQuantizedValues,
+                                                                    inputTensorInfo.GetQuantizationScale(),
+                                                                    inputTensorInfo.GetQuantizationOffset())));
     auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
-                                  std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
-                                                                    kernelTensorInfo.GetQuantizationOffset(),
-                                                                    kernelNoQuantizedValues)));
-    auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
-                                           std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                             outputTensorInfo.GetQuantizationOffset(),
-                                                                             outputExpectedNoQuantizedValues)));
+                                  std::vector<T>(QuantizedVector<T>(kernelNoQuantizedValues,
+                                                                    kernelTensorInfo.GetQuantizationScale(),
+                                                                    kernelTensorInfo.GetQuantizationOffset())));
+    auto expectedOutput =
+        MakeTensor<T, 4>(outputTensorInfo,
+                         std::vector<T>(QuantizedVector<T>(outputExpectedNoQuantizedValues,
+                                                           outputTensorInfo.GetQuantizationScale(),
+                                                           outputTensorInfo.GetQuantizationOffset())));
 
     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
             workloadFactory,
@@ -1539,15 +1555,18 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
         biasDesc.SetQuantizationOffset(0);
     }
     std::vector<T> inputData = std::vector<T>(
-            QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
-                    1.f, 2.f, 1.f,
-                    2.f, 1.f, 2.f,
-                    1.f, 2.f, 1.f,
-
-                    1.f, 2.f, 1.f,
-                    2.f, 1.f, 2.f,
-                    1.f, 2.f, 1.f,
-            }));
+            QuantizedVector<T>({
+                1.f, 2.f, 1.f,
+                2.f, 1.f, 2.f,
+                1.f, 2.f, 1.f,
+
+                1.f, 2.f, 1.f,
+                2.f, 1.f, 2.f,
+                1.f, 2.f, 1.f,
+            },
+            inputTensorInfo.GetQuantizationScale(),
+            inputTensorInfo.GetQuantizationOffset()));
+
     // at this point if we require it permute the input data
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
     if (layout == armnn::DataLayout::NHWC)
@@ -1558,27 +1577,32 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
     }
     auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
 
-    std::vector<B> biasV(QuantizedVector<B>(biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
-                                            {0, 2}));
+    std::vector<B> biasV(QuantizedVector<B>({ 0, 2 },
+                                            biasDesc.GetQuantizationScale(),
+                                            biasDesc.GetQuantizationOffset()));
+
     auto bias = MakeTensor<B, 1>(biasDesc, biasV);
 
     std::vector<T> kernelData = std::vector<T>(
-            QuantizedVector<T>(kernelDesc.GetQuantizationScale(), kernelDesc.GetQuantizationOffset(), {
-                    1.f, 0.f,  1.f,
-                    0.f, 0.f,  0.f,
-                    -1.f, 0.f, -1.f,
-
-                    1.f, 0.f,  1.f,
-                    0.f, 0.f,  0.f,
-                    -1.f, 0.f, -1.f,
-            }));
+            QuantizedVector<T>({
+                 1.f, 0.f,  1.f,
+                 0.f, 0.f,  0.f,
+                -1.f, 0.f, -1.f,
+
+                 1.f, 0.f,  1.f,
+                 0.f, 0.f,  0.f,
+                -1.f, 0.f, -1.f,
+            },
+            kernelDesc.GetQuantizationScale(),
+            kernelDesc.GetQuantizationOffset()));
+
     auto kernel = MakeTensor<T, 4>(kernelDesc, kernelData);
 
     // Manually calculated.
     std::vector<T> outputImage(
-        QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                           outputTensorInfo.GetQuantizationOffset(),
-                           {0.f, 0.f})
+        QuantizedVector<T>({ 0.f, 0.f },
+                           outputTensorInfo.GetQuantizationScale(),
+                           outputTensorInfo.GetQuantizationOffset())
     );
 
     // Optionally apply bias to output image.
@@ -1686,24 +1710,27 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
 
     // NOTE: originalInputData is in NCHW format
     std::vector<T> originalInputData = std::vector<T>(
-            QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
-                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-                    0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
-                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-            }));
+            QuantizedVector<T>({
+                0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
+            },
+            inputTensorInfo.GetQuantizationScale(),
+            inputTensorInfo.GetQuantizationOffset()));
+
     std::vector<T> inputData = originalInputData;
     // at this point if we require it permute the input data
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
@@ -1714,70 +1741,76 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
     }
     auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
 
-    std::vector<B> biasV(QuantizedVector<B>(biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
-        {0, 2, 1, -1}));
+    std::vector<B> biasV = QuantizedVector<B>({ 0, 2, 1, -1 },
+                                              biasDesc.GetQuantizationScale(),
+                                              biasDesc.GetQuantizationOffset());
+
     auto bias = MakeTensor<B, 1>(biasDesc, biasV);
 
     std::vector<T> kernelData = std::vector<T>(
-            QuantizedVector<T>(kernelDesc.GetQuantizationScale(), kernelDesc.GetQuantizationOffset(), {
-                    1, 1, 1,
-                    1, -1, 1,
-                    1, 1, 1,
-                    1, 1, 1,
-                    1, 1, 1,
-
-                    2, 2, 2,
-                    2, 2, 2,
-                    2, 2, 2,
-                    2, 2, 2,
-                    2, 2, 2,
-
-                    0, 0, 0,
-                    0, -1, 0,
-                    0, 0, 0,
-                    0, 0, 0,
-                    0, 0, 0,
-
-                    0, 0, 0,
-                    0, 0, 0,
-                    0, 1, 0,
-                    0, 0, 0,
-                    0, 0, 0
-
-            }));
+            QuantizedVector<T>({
+                1,  1, 1,
+                1, -1, 1,
+                1,  1, 1,
+                1,  1, 1,
+                1,  1, 1,
+
+                2,  2, 2,
+                2,  2, 2,
+                2,  2, 2,
+                2,  2, 2,
+                2,  2, 2,
+
+                0,  0, 0,
+                0, -1, 0,
+                0,  0, 0,
+                0,  0, 0,
+                0,  0, 0,
+
+                0,  0, 0,
+                0,  0, 0,
+                0,  1, 0,
+                0,  0, 0,
+                0,  0, 0
+            },
+            kernelDesc.GetQuantizationScale(),
+            kernelDesc.GetQuantizationOffset()));
+
     auto kernel = MakeTensor<T, 4>(kernelDesc, kernelData);
 
     // Manually calculated.
     std::vector<T> originalOutputImage = std::vector<T>(
-        QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
-            3.5f,  3.5f,  3.5f,  3.5f,  3.5f,  3.5f,  3.5f,
-            6.0f,  6.0f,  6.0f,  6.0f,  6.0f,  6.0f,  6.0f,
-            5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,
-            6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,
-            6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,
-            5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,
+        QuantizedVector<T>({
+             3.5f,  3.5f,  3.5f,  3.5f,  3.5f,  3.5f,  3.5f,
+             6.0f,  6.0f,  6.0f,  6.0f,  6.0f,  6.0f,  6.0f,
+             5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,
+             6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,
+             6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,
+             5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,
 
             -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
-            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+             0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
             -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
             -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
             -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
             -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
 
-            8.0f,  8.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+             8.0f,  8.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
             10.0f, 10.0f, 0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
             10.0f, 10.0f, 0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
             10.0f, 10.0f, 0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
             10.0f, 10.0f, 0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
-            8.0f,  8.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
-
-            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
-            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
-            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
-            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
-            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
-            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f
-        }));
+             8.0f,  8.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+
+             0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+             0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+             0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+             0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+             0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+             0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f
+        },
+        outputTensorInfo.GetQuantizationScale(),
+        outputTensorInfo.GetQuantizationOffset()));
 
     // Optionally apply bias to output image.
     if(biasEnabled)
@@ -2016,8 +2049,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
     // Use a single-batch 2-channel 5x5 image as input.
     armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
-        {
+        QuantizedVector<T>({
              0,  1,  2,  3,  4,
              5,  6,  7,  8,  9,
             10, 11, 12, 13, 14,
@@ -2029,13 +2061,14 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
             35, 36, 37, 38, 39,
             40, 41, 42, 43, 44,
             45, 46, 47, 48, 49
-        })));
+        },
+        inputTensorInfo.GetQuantizationScale(),
+        inputTensorInfo.GetQuantizationOffset())));
 
     // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
     armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
     auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
-        QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
-        {
+        QuantizedVector<T>({
             32, 31, 30, 29,
             28, 27, 26, 25,
             24, 23, 22, 21,
@@ -2045,14 +2078,15 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
             12, 11, 10,  9,
              8,  7,  6,  5,
              4,  3,  2,  1
-        })));
+        },
+        kernelTensorInfo.GetQuantizationScale(),
+        kernelTensorInfo.GetQuantizationOffset())));
 
     // Expected output is 1 batch of a 2-channel 5x5 image.
     // Calculated using the python tensorflow library with strideX=1, strideY=1.
     armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
-        {
+        QuantizedVector<T>({
             1062, 1580, 1850, 1530, 1117,
             2140, 3108, 3500, 2842, 2042,
             3580, 5068, 5460, 4342, 3062,
@@ -2064,7 +2098,9 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
             3390, 4886, 5022, 4068, 2916,
             3566, 5056, 5182, 4133, 2922,
             3100, 4352, 4452, 3517, 2465
-        })));
+        },
+        outputTensorInfo.GetQuantizationScale(),
+        outputTensorInfo.GetQuantizationOffset())));
 
     return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
         workloadFactory,
@@ -2097,8 +2133,7 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
 
     armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
-        {
+        QuantizedVector<T>({
              0,  1,  2,  3,  4,
              5,  6,  7,  8,  9,
             10, 11, 12, 13, 14,
@@ -2110,12 +2145,13 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
             35, 36, 37, 38, 39,
             40, 41, 42, 43, 44,
             45, 46, 47, 48, 49
-        })));
+        },
+        inputTensorInfo.GetQuantizationScale(),
+        inputTensorInfo.GetQuantizationOffset())));
 
     armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
     auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
-        QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
-        {
+        QuantizedVector<T>({
              32, 31, 30, 29,
              28, 27, 26, 25,
              24, 23, 22, 21,
@@ -2125,12 +2161,13 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
              12, 11, 10,  9,
               8,  7,  6,  5,
               4,  3,  2,  1
-        })));
+        },
+        kernelTensorInfo.GetQuantizationScale(),
+        kernelTensorInfo.GetQuantizationOffset())));
 
     armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
-        {
+        QuantizedVector<T>({
             1062, 1580, 1850, 1530, 1117,
             2140, 3108, 3500, 2842, 2042,
             3580, 5068, 5460, 4342, 3062,
@@ -2142,7 +2179,9 @@ LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
             3390, 4886, 5022, 4068, 2916,
             3566, 5056, 5182, 4133, 2922,
             3100, 4352, 4452, 3517, 2465
-        })));
+        },
+        outputTensorInfo.GetQuantizationScale(),
+        outputTensorInfo.GetQuantizationOffset())));
 
     return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
         workloadFactory,
@@ -2175,27 +2214,29 @@ LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
 
     armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType);
     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
-        {
-             0, 0, 0, 0, 0, 0, 0, 0, 0,
-             0, 0, 0, 0, 0, 0, 0, 0, 0,
-             0, 0, 0, 0, 0, 0, 0, 0, 0,
-             0, 0, 0, 1, 1, 1, 0, 0, 0,
-             0, 0, 0, 1, 1, 1, 0, 0, 0,
-             0, 0, 0, 1, 1, 1, 0, 0, 0,
-             0, 0, 0, 0, 0, 0, 0, 0, 0,
-             0, 0, 0, 0, 0, 0, 0, 0, 0,
-             0, 0, 0, 0, 0, 0, 0, 0, 0
-        })));
+        QuantizedVector<T>({
+            0, 0, 0, 0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0, 0, 0, 0,
+            0, 0, 0, 1, 1, 1, 0, 0, 0,
+            0, 0, 0, 1, 1, 1, 0, 0, 0,
+            0, 0, 0, 1, 1, 1, 0, 0, 0,
+            0, 0, 0, 0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0, 0, 0, 0
+        },
+        inputTensorInfo.GetQuantizationScale(),
+        inputTensorInfo.GetQuantizationOffset())));
 
     armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
     auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
-        QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
-        {
-             1, 2, 3,
-             4, 5, 6,
-             7, 8, 9
-        })));
+        QuantizedVector<T>({
+            1, 2, 3,
+            4, 5, 6,
+            7, 8, 9
+        },
+        kernelTensorInfo.GetQuantizationScale(),
+        kernelTensorInfo.GetQuantizationOffset())));
 
     uint32_t padLeft = 0;
     uint32_t padTop = 0;
@@ -2209,12 +2250,13 @@ LayerTestResult<T, 4> SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon(
     // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
     armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType);
     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
-        {
-             5, 5, 5,
-             5, 5, 5,
-             5, 5, 5
-        })));
+        QuantizedVector<T>({
+            5, 5, 5,
+            5, 5, 5,
+            5, 5, 5
+        },
+        outputTensorInfo.GetQuantizationScale(),
+        outputTensorInfo.GetQuantizationOffset())));
 
     return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
         workloadFactory,
@@ -2284,17 +2326,18 @@ LayerTestResult<T, 4> DepthwiseConvolution2d3x3DilationTestCommon(
     outputTensorInfo.SetQuantizationOffset(qOffset);
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-                                  std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                    inputTensorInfo.GetQuantizationOffset(),
-                                                                    inputNoQuantizedValues)));
+                                  std::vector<T>(QuantizedVector<T>(inputNoQuantizedValues,
+                                                                    inputTensorInfo.GetQuantizationScale(),
+                                                                    inputTensorInfo.GetQuantizationOffset())));
     auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
-                                   std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
-                                                                     kernelTensorInfo.GetQuantizationOffset(),
-                                                                     kernelNoQuantizedValues)));
-    auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
-                                           std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                             outputTensorInfo.GetQuantizationOffset(),
-                                                                             outputExpectedNoQuantizedValues)));
+                                   std::vector<T>(QuantizedVector<T>(kernelNoQuantizedValues,
+                                                                     kernelTensorInfo.GetQuantizationScale(),
+                                                                     kernelTensorInfo.GetQuantizationOffset())));
+    auto expectedOutput =
+        MakeTensor<T, 4>(outputTensorInfo,
+                         std::vector<T>(QuantizedVector<T>(outputExpectedNoQuantizedValues,
+                                                           outputTensorInfo.GetQuantizationScale(),
+                                                           outputTensorInfo.GetQuantizationOffset())));
 
     uint32_t padLeft = 0;
     uint32_t padTop = 0;
index 1997c4b..023bbae 100644 (file)
@@ -5,6 +5,7 @@
 
 #include "DebugTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -40,11 +41,11 @@ LayerTestResult<T, Dim> DebugTestImpl(
     }
 
     boost::multi_array<T, Dim> input =
-        MakeTensor<T, Dim>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+        MakeTensor<T, Dim>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
 
     LayerTestResult<T, Dim> ret(outputTensorInfo);
     ret.outputExpected =
-        MakeTensor<T, Dim>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData));
+        MakeTensor<T, Dim>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle =
         workloadFactory.CreateTensorHandle(inputTensorInfo);
index e21a4b6..4e8c938 100644 (file)
@@ -5,7 +5,7 @@
 
 #include "DepthToSpaceTestImpl.hpp"
 
-#include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 
 #include <armnn/ArmNN.hpp>
 
@@ -44,10 +44,12 @@ LayerTestResult<T, 4> DepthToSpaceTestImpl(
         outputInfo.SetQuantizationOffset(qOffset);
     }
 
-    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+    boost::multi_array<T, 4> input =
+        MakeTensor<T, 4>(inputInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
 
     LayerTestResult<T, 4> result(outputInfo);
-    result.outputExpected = MakeTensor<T, 4>(outputInfo, QuantizedVector<T>(qScale, qOffset, expectedOutputData));
+    result.outputExpected =
+        MakeTensor<T, 4>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
index c84b941..cf101ee 100644 (file)
@@ -7,6 +7,8 @@
 
 #include <armnn/ArmNN.hpp>
 
+#include <QuantizeHelper.hpp>
+
 #include <backendsCommon/CpuTensorHandle.hpp>
 
 #include <backendsCommon/test/DataTypeUtils.hpp>
@@ -191,15 +193,17 @@ LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
     LayerTestResult<T, 2> result(outputTensorInfo);
 
     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        armnnUtils::QuantizedVector<T>({
             1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
-        })
+        },
+        qScale, qOffset)
     );
 
     boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
-        QuantizedVector<T>(qScale, qOffset, {
+        armnnUtils::QuantizedVector<T>({
             2.0f, 3.0f, 4.0f, 5.0f, 6.0f
-        })
+        },
+        qScale, qOffset)
     );
 
     std::vector<T> biasValues({900000.f});
@@ -215,10 +219,7 @@ LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
     );
 
     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
-            965432.0f,
-        })
-    );
+                                             armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset));
 
     return result;
 }
index 4e9cbbf..d25fcea 100644 (file)
@@ -5,6 +5,7 @@
 
 #include "InstanceNormalizationTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -14,7 +15,6 @@
 #include <backendsCommon/WorkloadFactory.hpp>
 
 #include <backendsCommon/test/DataLayoutUtils.hpp>
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -35,12 +35,12 @@ LayerTestResult<T, 4> InstanceNormTestImpl(
     float qScale = 0.0f,
     int32_t qOffset = 0)
 {
-    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputValues));
+    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
+                                        armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
-
-    result.outputExpected =
-        MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+                                             armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
index 5c75b6f..569f5af 100644 (file)
@@ -6,6 +6,7 @@
 #include "L2NormalizationTestImpl.hpp"
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 #include <TensorUtils.hpp>
 
@@ -44,10 +45,10 @@ LayerTestResult<T, 4> L2NormalizationTestImpl(
         inputData = tmp;
     }
 
-    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
-                                                         inputTensorInfo.GetQuantizationScale(),
-                                                         inputTensorInfo.GetQuantizationOffset(),
-                                                         inputData));
+    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
+                                        armnnUtils::QuantizedVector<T>(inputData,
+                                                                       inputTensorInfo.GetQuantizationScale(),
+                                                                       inputTensorInfo.GetQuantizationOffset()));
 
     std::vector<float> expectedOutputData = expectedOutputValues;
     if (layout == armnn::DataLayout::NHWC)
@@ -59,10 +60,11 @@ LayerTestResult<T, 4> L2NormalizationTestImpl(
     }
 
     LayerTestResult<T, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
-                                                               outputTensorInfo.GetQuantizationScale(),
-                                                               outputTensorInfo.GetQuantizationOffset(),
-                                                               expectedOutputData));
+    result.outputExpected =
+        MakeTensor<T, 4>(outputTensorInfo,
+                         armnnUtils::QuantizedVector<T>(expectedOutputData,
+                                                        outputTensorInfo.GetQuantizationScale(),
+                                                        outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -693,16 +695,10 @@ LayerTestResult<float, 2> L2Normalization2dShapeTest(
     const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
     const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
 
-    auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, QuantizedVector<float>(
-                                                             inputTensorInfo.GetQuantizationScale(),
-                                                             inputTensorInfo.GetQuantizationOffset(),
-                                                             inputData));
+    auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, inputData);
 
     LayerTestResult<float, 2> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, QuantizedVector<float>(
-                                                                   outputTensorInfo.GetQuantizationScale(),
-                                                                   outputTensorInfo.GetQuantizationOffset(),
-                                                                   expectedOutputData));
+    result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, expectedOutputData);
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
index 0b73d37..4c340c8 100644 (file)
@@ -6,6 +6,7 @@
 #include "LogSoftmaxTestImpl.hpp"
 
 #include <Half.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -14,7 +15,6 @@
 #include <backendsCommon/IBackendInternal.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -39,7 +39,7 @@ LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
 {
     LayerTestResult<T, NumDims> result(outputInfo);
     result.outputExpected =
-        MakeTensor<T, NumDims>(outputInfo, QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
+        MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
@@ -54,7 +54,7 @@ LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
     inputHandle->Allocate();
     outputHandle->Allocate();
 
-    auto inputTensor = MakeTensor<T, NumDims>(inputInfo, QuantizedVector<T>(qScale, qOffset, inputValues));
+    auto inputTensor = MakeTensor<T, NumDims>(inputInfo, armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
     CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
 
     workload->Execute();
index c07f623..6cea777 100644 (file)
@@ -5,11 +5,12 @@
 
 #include "LstmTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
+
 #include <armnn/ArmNN.hpp>
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -1963,13 +1964,19 @@ LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
 
     armnn::TensorInfo inputDesc({2, 2}, datatype);
-    boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
-            std::vector<float>{2., 3., 3., 4.}));
+    boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(
+        inputDesc,
+        armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
 
     armnn::TensorInfo outputDesc({2, 4}, datatype);
-    boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
-            qOffset, std::vector<float>({{-0.02973187f, 0.1229473f,   0.20885126f, -0.15358765f,
-                                          -0.0185422f,  0.11281417f,  0.24466537f, -0.1826292f}})));
+    boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(
+        outputDesc,
+        armnnUtils::QuantizedVector<int16_t>(
+            {
+                -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f,
+                -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f
+            },
+            qScale, qOffset));
 
     return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
@@ -1987,14 +1994,21 @@ LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
 
     armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
-    boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
-            std::vector<float>({ 2., 3., 3., 4. })));
+    boost::multi_array<int16_t, 2> input =
+        MakeTensor<int16_t, 2>(
+            inputDesc,
+            armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
 
     armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
-    boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
-            qOffset, std::vector<float>(
-            {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
-             -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
+    boost::multi_array<int16_t, 2> expectedOutput =
+        MakeTensor<int16_t, 2>(
+            outputDesc,
+            armnnUtils::QuantizedVector<int16_t>(
+                {
+                    -0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
+                    -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f
+                },
+                qScale, qOffset));
 
     return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
@@ -2011,20 +2025,32 @@ LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
 
     armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
-    boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
-            qOffset, std::vector<float>(
-            {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
-             0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
+    boost::multi_array<int16_t, 2> input =
+        MakeTensor<int16_t, 2>(
+            inputDesc,
+            armnnUtils::QuantizedVector<int16_t>(
+                {
+                    0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
+                    0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f
+                },
+                qScale, qOffset));
 
     armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
-    boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
-            qOffset, std::vector<float>(
-            {-0.00396806f,  0.029352f,   -0.00279226f, 0.0159977f,  -0.00835576f,
-             -0.0211779f,   0.0283512f,  -0.0114597f,  0.00907307f, -0.0244004f,
-             -0.0152191f,  -0.0259063f,   0.00914318f, 0.00415118f,  0.017147f,
-              0.0134203f,  -0.013869f,    0.0287268f, -0.00334693f,  0.00733398f, -0.0287926f,
-             -0.0186926f,   0.0193662f,  -0.0115437f,  0.00422612f, -0.0345232f,
-              0.00223253f, -0.00957321f,  0.0210624f,  0.013331f,    0.0150954f,   0.02168f})));
+    boost::multi_array<int16_t, 2> expectedOutput =
+        MakeTensor<int16_t, 2>(
+            outputDesc,
+            armnnUtils::QuantizedVector<int16_t>(
+                {
+                    -0.00396806f,  0.02935200f, -0.00279226f,  0.01599770f,
+                    -0.00835576f, -0.02117790f,  0.02835120f, -0.01145970f,
+                     0.00907307f, -0.02440040f, -0.01521910f, -0.02590630f,
+                     0.00914318f,  0.00415118f,  0.01714700f,  0.01342030f,
+                    -0.01386900f,  0.02872680f, -0.00334693f,  0.00733398f,
+                    -0.02879260f, -0.01869260f,  0.01936620f, -0.01154370f,
+                     0.00422612f, -0.03452320f,  0.00223253f, -0.00957321f,
+                     0.02106240f,  0.01333100f,  0.01509540f,  0.02168000f
+                },
+                qScale, qOffset));
 
     return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
@@ -2040,13 +2066,20 @@ LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16Const
     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
 
     armnn::TensorInfo inputDesc({2, 2}, datatype);
-    boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
-            qOffset, std::vector<float>{2., 3., 3., 4.}));
+    boost::multi_array<int16_t , 2> input =
+        MakeTensor<int16_t , 2>(inputDesc,
+                                armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
 
     armnn::TensorInfo outputDesc({2, 4}, datatype);
-    boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
-            qOffset, std::vector<float>({{-0.02973187f, 0.1229473f,   0.20885126f, -0.15358765f,
-                                          -0.0185422f,  0.11281417f,  0.24466537f, -0.1826292f}})));
+    boost::multi_array<int16_t, 2> expectedOutput =
+        MakeTensor<int16_t, 2>(
+            outputDesc,
+            armnnUtils::QuantizedVector<int16_t>(
+                {
+                    -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f,
+                    -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f
+                },
+                qScale, qOffset));
 
     return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
index 82b772e..0f9a30e 100644 (file)
@@ -5,6 +5,8 @@
 
 #include "PadTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
+
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -28,28 +30,27 @@ LayerTestResult<T, 2> Pad2dTestCommon(
     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
 
-    std::vector<T> inputValues(
-    QuantizedVector<T>(qScale, qOffset,
-    {
-      // Height (3) x Width (3)
-      4, 8, 6,
-      7, 4, 4,
-      3, 2, 4
-    }));
+    std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+        {
+            // Height (3) x Width (3)
+            4, 8, 6,
+            7, 4, 4,
+            3, 2, 4
+        },
+        qScale, qOffset);
 
     auto p = customPaddingValue;
-    std::vector<T> expectedOutputValues;
-    expectedOutputValues = (
-    QuantizedVector<T>(qScale, qOffset,
-    {
-      p, p, p, p, p, p, p,
-      p, p, p, p, p, p, p,
-      p, p, 4, 8, 6, p, p,
-      p, p, 7, 4, 4, p, p,
-      p, p, 3, 2, 4, p, p,
-      p, p, p, p, p, p, p,
-      p, p, p, p, p, p, p
-    }));
+    std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+        {
+            p, p, p, p, p, p, p,
+            p, p, p, p, p, p, p,
+            p, p, 4, 8, 6, p, p,
+            p, p, 7, 4, 4, p, p,
+            p, p, 3, 2, 4, p, p,
+            p, p, p, p, p, p, p,
+            p, p, p, p, p, p, p
+        },
+        qScale, qOffset);
 
     auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
 
@@ -100,41 +101,39 @@ LayerTestResult<T, 3> Pad3dTestCommon(
     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
 
-    std::vector<T> inputValues(
-      QuantizedVector<T>(qScale,qOffset,
-    {
-        // Channel 0, Height (2) x Width (2)
-        0, 4,
-        2, 5,
-
-        // Channel 1, Height (2) x Width (2)
-        6, 1,
-        5, 2
-    }));
-
-    std::vector<T> expectedOutputValues(
-      QuantizedVector<T>(qScale,qOffset,
-    {
-
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 4, 0, 0,
-        0, 0, 2, 5, 0, 0,
-        0, 0, 0, 0, 0, 0,
-
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0,
-        0, 0, 6, 1, 0, 0,
-        0, 0, 5, 2, 0, 0,
-        0, 0, 0, 0, 0, 0,
-
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0
-
-    }));
+    std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+        {
+            // Channel 0, Height (2) x Width (2)
+            0, 4,
+            2, 5,
+
+            // Channel 1, Height (2) x Width (2)
+            6, 1,
+            5, 2
+        },
+        qScale, qOffset);
+
+    std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+        {
+            0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0,
+            0, 0, 0, 4, 0, 0,
+            0, 0, 2, 5, 0, 0,
+            0, 0, 0, 0, 0, 0,
+
+            0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0,
+            0, 0, 6, 1, 0, 0,
+            0, 0, 5, 2, 0, 0,
+            0, 0, 0, 0, 0, 0,
+
+            0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0
+       },
+       qScale, qOffset);
 
     auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
 
@@ -185,193 +184,193 @@ LayerTestResult<T, 4> Pad4dTestCommon(
     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
 
-    std::vector<T> inputValues(
-      QuantizedVector<T>(qScale,qOffset,
-    {
-        // Batch 0, Channel 0, Height (3) x Width (2)
-        0, 1,
-        2, 3,
-        4, 5,
-
-        // Batch 0, Channel 1, Height (3) x Width (2)
-        6, 7,
-        8, 9,
-        10, 11,
-
-        // Batch 1, Channel 0, Height (3) x Width (2)
-        12, 13,
-        14, 15,
-        16, 17,
-
-        // Batch 1, Channel 1, Height (3) x Width (2)
-        18, 19,
-        20, 21,
-        22, 23
-    }));
-
-    std::vector<T> expectedOutputValues(
-      QuantizedVector<T>(qScale,qOffset,
-    {
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 1, 0,
-        0, 2, 3, 0,
-        0, 4, 5, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 6, 7, 0,
-        0, 8, 9, 0,
-        0, 10, 11, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 12, 13, 0,
-        0, 14, 15, 0,
-        0, 16, 17, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 18, 19, 0,
-        0, 20, 21, 0,
-        0, 22, 23, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0
-    }));
+    std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0, Height (3) x Width (2)
+             0,  1,
+             2,  3,
+             4,  5,
+
+            // Batch 0, Channel 1, Height (3) x Width (2)
+             6,  7,
+             8,  9,
+            10, 11,
+
+            // Batch 1, Channel 0, Height (3) x Width (2)
+            12, 13,
+            14, 15,
+            16, 17,
+
+            // Batch 1, Channel 1, Height (3) x Width (2)
+            18, 19,
+            20, 21,
+            22, 23
+        },
+        qScale, qOffset);
+
+    std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+        {
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 1, 0,
+            0, 2, 3, 0,
+            0, 4, 5, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 6, 7, 0,
+            0, 8, 9, 0,
+            0, 10, 11, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 12, 13, 0,
+            0, 14, 15, 0,
+            0, 16, 17, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 18, 19, 0,
+            0, 20, 21, 0,
+            0, 22, 23, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0
+        },
+        qScale, qOffset);
 
     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
 
index ef48c97..fe0d076 100644 (file)
@@ -12,7 +12,6 @@
 #include <backendsCommon/IBackendInternal.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
 #include <test/TensorHelpers.hpp>
index f250fa5..fcc8980 100644 (file)
@@ -9,12 +9,12 @@
 
 #include <DataLayoutIndexed.hpp>
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 #include <TensorUtils.hpp>
 
 #include <backendsCommon/WorkloadInfo.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -25,6 +25,8 @@
 namespace
 {
 
+using namespace armnnUtils;
+
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<T, 4> SimplePooling2dTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
@@ -187,7 +189,7 @@ LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
     inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
     std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
 
     // These were calculated manually.
     auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
@@ -195,7 +197,7 @@ LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
     if (forceNoPadding)
     {
         outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-            QuantizedVector<T>(qScale, qOffset, {
+            QuantizedVector<T>({
                  8.0f,  8.0f,  8.0f,
                  9.0f,  7.0f,  9.0f,
                  9.0f,  9.0f,  9.0f,
@@ -211,12 +213,13 @@ LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
                  0.0f,  0.0f, -3.0f,
                 -1.0f,  0.0f,  0.0f,
                 -1.0f, -1.0f, -1.0f
-        }));
+            },
+            qScale, qOffset));
     }
     else
     {
         outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-            QuantizedVector<T>(qScale, qOffset, {
+            QuantizedVector<T>({
                 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
                 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
                 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
@@ -232,7 +235,8 @@ LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
                 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f,
                 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
                 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f
-        }));
+            },
+            qScale, qOffset));
     }
 
     return SimplePooling2dTestImpl<ArmnnType>(
@@ -267,7 +271,7 @@ LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
     }
 
     std::vector<T> inputData(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
              1.0f,  2.0f,  5.0f,  6.0f,
              3.0f,  4.0f,  7.0f,  8.0f,
              9.0f, 10.0f, 13.0f, 14.0f,
@@ -277,16 +281,18 @@ LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
             19.0f, 20.0f, 23.0f, 24.0f,
             25.0f, 26.0f, 29.0f, 30.0f,
             27.0f, 28.0f, 31.0f, 32.0f,
-        }));
+        },
+        qScale, qOffset));
 
     std::vector<T> outputData(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
              4.0f,  8.0f,
             12.0f, 16.0f,
 
             20.0f, 24.0f,
             28.0f, 32.0f,
-        }));
+        },
+        qScale, qOffset));
 
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
     if (dataLayout == armnn::DataLayout::NHWC)
@@ -336,7 +342,7 @@ LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
     }
 
     std::vector<T> inputData(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
              2.0f,  2.0f,  6.0f,  6.0f,
              4.0f,  4.0f,  8.0f,  8.0f,
             10.0f, 12.0f, 14.0f, 16.0f,
@@ -346,16 +352,18 @@ LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
             20.0f, 18.0f, 22.0f, 24.0f,
             26.0f, 28.0f,  0.0f,  0.0f,
             26.0f, 28.0f,  0.0f,  0.0f,
-        }));
+        },
+        qScale, qOffset));
 
     std::vector<T> outputData(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
              3.0f,  7.0f,
             11.0f, 15.0f,
 
             19.0f, 23.0f,
             27.0f,  0.0f,
-        }));
+        },
+        qScale, qOffset));
 
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
     if (dataLayout == armnn::DataLayout::NHWC)
@@ -447,7 +455,7 @@ LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
     armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
 
     std::vector<T> inputData(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             1.0f, 7.0f, 5.0f, 5.0f,
             1.0f, 7.0f, 5.0f, 5.0f,
             3.0f, 3.0f, 1.0f, 1.0f,
@@ -457,16 +465,18 @@ LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
             1.0f, 7.0f, 2.0f, 0.0f,
             0.0f, 2.0f, 1.0f, 1.0f,
             0.0f, 0.0f, 1.0f, 1.0f,
-        }));
+        },
+        qScale, qOffset));
 
     std::vector<T> outputData(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             5.0f, 5.0f,
             3.0f, 1.0f,
 
             5.0f, 1.0f,
             1.0f, 1.0f,
-        }));
+        },
+        qScale, qOffset));
 
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
     if (dataLayout == armnn::DataLayout::NHWC)
@@ -503,19 +513,21 @@ LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
 
     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             2.0f, 1.0f, 5.0f, 2.0f,
             1.0f, 2.0f, 2.0f, 1.0f,
             5.0f, 4.0f, 1.0f, 5.0f,
             2.0f, 1.0f, 5.0f, 2.0f,
-        }));
+        },
+        qScale, qOffset));
 
     armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             3.0f, 3.0f,
             3.0f, 3.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -536,7 +548,7 @@ LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
 
     armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
@@ -546,15 +558,17 @@ LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
-        }));
+        },
+        qScale, qOffset));
 
     armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             3.0f, 3.0f, 3.0f,
             3.0f, 3.0f, 3.0f,
             3.0f, 3.0f, 3.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -575,7 +589,7 @@ LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
 
     armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
             1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
             5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
@@ -583,14 +597,16 @@ LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
             2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
             1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
             5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
-        }));
+        },
+        qScale, qOffset));
 
     armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             3.0f, 3.0f,
             3.0f, 3.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -611,7 +627,7 @@ LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
 
     armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             1.0f, 0.0f, 2.0f, 0.0f,  3.0f, 0.0f, 4.0f,
             0.0f, 0.0f, 0.0f, 0.0f,  0.0f, 0.0f, 0.0f,
             0.0f, 5.0f, 0.0f, 6.0f,  0.0f, 7.0f, 0.0f,
@@ -619,13 +635,15 @@ LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
             0.0f, 5.0f, 0.0f, 2.0f,  0.0f, 1.0f, 1.0f,
             0.0f, 0.0f, 0.0f, 0.0f,  0.0f, 0.0f, 0.0f,
             0.0f, 0.0f, 0.0f, 0.0f,  0.0f, 0.0f, 0.0f,
-        }));
+        },
+        qScale, qOffset));
 
     armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             3.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -646,7 +664,7 @@ LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
 
     armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
@@ -656,13 +674,15 @@ LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
-        }));
+        },
+        qScale, qOffset));
 
     armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             3.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -693,15 +713,17 @@ LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
 
     // Construct input data.
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             1.0f, 3.0f, 4.0f,
-        }));
+        },
+        qScale, qOffset));
 
     // These were calculated manually.
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             0.0f, 3.0f, 0.0f, 3.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -883,11 +905,11 @@ LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
         outputTensorInfo.SetQuantizationOffset(qOffset);
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
-                         QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
+        forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
+                         QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -961,11 +983,11 @@ LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
         outputTensorInfo.SetQuantizationOffset(qOffset);
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
-                         QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
+        forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
+                         QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -1002,19 +1024,21 @@ LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -1.0f, -2.0f,  3.0f,  4.0f,
             -1.0f, -2.0f,  3.0f,  4.0f,
              1.0f,  2.0f, -3.0f, -4.0f,
              1.0f,  2.0f, -3.0f, -4.0f,
-        }));
+        },
+        qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -1.0f,  3.0f,  4.0f,
              1.0f,  3.0f,  4.0f,
              1.0f,  2.0f, -4.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -1050,20 +1074,22 @@ LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -1.0f, -2.0f,  3.0f,  4.0f,
             -1.0f, -2.0f,  3.0f,  4.0f,
              1.0f,  2.0f, -3.0f, -4.0f,
              1.0f,  2.0f, -3.0f, -4.0f,
-        }));
+        },
+        qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -1.0f,  3.0f,  4.0f,  4.0f,
              2.0f,  3.0f,  4.0f,  4.0f,
              2.0f,  3.0f,  4.0f,  4.0f,
              2.0f,  2.0f,  2.0f, -3.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -1099,19 +1125,21 @@ LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             12.0f, 20.0f, 32.0f, 40.0f,
             12.0f, 20.0f, 32.0f, 40.0f,
             12.0f, 20.0f, 32.0f, 40.0f,
             12.0f, 20.0f, 32.0f, 40.0f,
-        }));
+        },
+        qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             3.0f,  13.0f,  10.0f,
             6.0f,  26.0f,  20.0f,
             3.0f,  13.0f,  10.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -1148,18 +1176,20 @@ LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             1.0f, 2.0f, 3.0f, 4.0f,
             1.0f, 2.0f, 3.0f, 4.0f,
             1.0f, 2.0f, 3.0f, 4.0f,
             1.0f, 2.0f, 3.0f, 4.0f,
-        }));
+        },
+        qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             2.0f, 3.5f,
             2.0f, 3.5f
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -1195,20 +1225,22 @@ LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             9.0f,   27.0f,  18.0f,  36.0f,
             18.0f,   9.0f,  18.0f,   9.0f,
             27.0f,  18.0f,   9.0f,  27.0f,
             9.0f,   27.0f,   9.0f,  18.0f,
-        }));
+        },
+        qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
              7.0f,  11.0f,  13.0f, 9.0f,
             12.0f,  17.0f,  19.0f, 13.0f,
             12.0f,  16.0f,  16.0f, 10.0f,
              9.0f,  11.0f,  12.0f, 7.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -1244,19 +1276,21 @@ LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             2.0f,  4.0f, 8.0f, 16.0f,
             4.0f,  2.0f, 2.0f, 4.0f,
             8.0f,  2.0f, 4.0f, 2.0f,
             16.0f, 2.0f, 2.0f, 8.0f,
-        }));
+        },
+        qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
                1.0f,     4.4721f,   8.0f,
             4.4721f,     2.6457f,   2.236f,
                8.0f,     1.4142f,   4.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -1292,20 +1326,22 @@ LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             1.0f, 2.0f, 3.0f, 4.0f,
             1.0f, 2.0f, 3.0f, 4.0f,
             1.0f, 2.0f, 3.0f, 4.0f,
             1.0f, 2.0f, 3.0f, 4.0f,
-        }));
+        },
+        qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             1.0540f, 1.7638f, 2.5385f, 2.3570f,
             1.2909f, 2.1602f, 3.1091f, 2.8867f,
             1.2909f, 2.1602f, 3.1091f, 2.8867f,
             1.0540f, 1.7638f, 2.5385f, 2.3570f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
index 18a5bd0..dc9b908 100644 (file)
@@ -7,6 +7,7 @@
 
 #include "LayerTestResult.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -57,18 +58,22 @@ LayerTestResult<T, 4> PreluTest(
        0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, -1.0f, -2.0f, 0.0f, -2.0f, -4.0f
     };
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
-    auto alpha = MakeTensor<T, 4>(alphaTensorInfo, QuantizedVector<T>(alphaTensorInfo.GetQuantizationScale(),
-                                                                      alphaTensorInfo.GetQuantizationOffset(),
-                                                                      alphaData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
+
+    auto alpha = MakeTensor<T, 4>(alphaTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(alphaData,
+                                                                 alphaTensorInfo.GetQuantizationScale(),
+                                                                 alphaTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputExpectedData));
+    result.outputExpected =
+        MakeTensor<T, 4>(outputTensorInfo,
+                         armnnUtils::QuantizedVector<T>(outputExpectedData,
+                                                        outputTensorInfo.GetQuantizationScale(),
+                                                        outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr <armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr <armnn::ITensorHandle> alphaHandle  = workloadFactory.CreateTensorHandle(alphaTensorInfo);
index bb2392f..56ce51a 100644 (file)
@@ -8,6 +8,7 @@
 #include "LayerTestResult.hpp"
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 #include <TensorUtils.hpp>
 
@@ -76,9 +77,10 @@ LayerTestResult<T, 4> ResizeBilinearNopTest(
         inputData = tmp;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = input;
@@ -174,15 +176,16 @@ LayerTestResult<T, 4> SimpleResizeBilinearTest(
         outputData = tmp1;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputData));
+                                             armnnUtils::QuantizedVector<T>(outputData,
+                                                                            outputTensorInfo.GetQuantizationScale(),
+                                                                            outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -278,15 +281,16 @@ LayerTestResult<T, 4> ResizeBilinearSqMinTest(
         outputData = tmp1;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputData));
+                                             armnnUtils::QuantizedVector<T>(outputData,
+                                                                            outputTensorInfo.GetQuantizationScale(),
+                                                                            outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -377,15 +381,16 @@ LayerTestResult<T, 4> ResizeBilinearMinTest(
         outputData = tmp1;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputData));
+                                             armnnUtils::QuantizedVector<T>(outputData,
+                                                                            outputTensorInfo.GetQuantizationScale(),
+                                                                            outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -484,15 +489,16 @@ LayerTestResult<T, 4> ResizeBilinearMagTest(
         outputData = tmp1;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputData));
+                                             armnnUtils::QuantizedVector<T>(outputData,
+                                                                            outputTensorInfo.GetQuantizationScale(),
+                                                                            outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -573,9 +579,10 @@ LayerTestResult<T, 4> ResizeNearestNeighborNopTest(
         inputData = tmp;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = input;
@@ -670,15 +677,16 @@ LayerTestResult<T, 4> SimpleResizeNearestNeighborTest(
         outputData = tmp1;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputData));
+                                             armnnUtils::QuantizedVector<T>(outputData,
+                                                                            outputTensorInfo.GetQuantizationScale(),
+                                                                            outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -773,15 +781,16 @@ LayerTestResult<T, 4> ResizeNearestNeighborSqMinTest(
         outputData = tmp1;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputData));
+                                             armnnUtils::QuantizedVector<T>(outputData,
+                                                                            outputTensorInfo.GetQuantizationScale(),
+                                                                            outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -871,15 +880,16 @@ LayerTestResult<T, 4> ResizeNearestNeighborMinTest(
         outputData = tmp1;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputData));
+                                             armnnUtils::QuantizedVector<T>(outputData,
+                                                                            outputTensorInfo.GetQuantizationScale(),
+                                                                            outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -978,15 +988,16 @@ LayerTestResult<T, 4> ResizeNearestNeighborMagTest(
         outputData = tmp1;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputData));
+                                             armnnUtils::QuantizedVector<T>(outputData,
+                                                                            outputTensorInfo.GetQuantizationScale(),
+                                                                            outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr <armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
index f0479c8..a60b189 100644 (file)
@@ -5,6 +5,7 @@
 
 #include "SliceTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -39,11 +40,11 @@ LayerTestResult<T, NumDims> SliceTestImpl(
     }
 
     boost::multi_array<T, NumDims> input =
-        MakeTensor<T, NumDims>(inputInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+        MakeTensor<T, NumDims>(inputInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
 
     LayerTestResult<T, NumDims> result(outputInfo);
     result.outputExpected =
-        MakeTensor<T, NumDims>(outputInfo, QuantizedVector<T>(qScale, qOffset, expectedOutputData));
+        MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
index c0b62aa..a5f6477 100644 (file)
@@ -5,13 +5,13 @@
 
 #include "SoftmaxTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -85,8 +85,7 @@ LayerTestResult<T, n> SimpleSoftmaxBaseTestImpl(
     LayerTestResult<T, n> ret(outputTensorInfo);
 
     // Each row is independently softmax'd.
-    auto input = MakeTensor<T, n>(inputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, inputData)));
+    auto input = MakeTensor<T, n>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -111,8 +110,7 @@ LayerTestResult<T, n> SimpleSoftmaxBaseTestImpl(
 
     CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
 
-    std::vector<T> expectedOutput = std::vector<T>(
-            QuantizedVector<T>(qScale, qOffset, outputData));
+    std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(outputData, qScale, qOffset);
     ret.outputExpected = MakeTensor<T, n>(outputTensorInfo, expectedOutput);
 
     return ret;
index 094ed23..f815604 100644 (file)
@@ -6,6 +6,7 @@
 #include "SpaceToBatchNdTestImpl.hpp"
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -55,10 +56,12 @@ LayerTestResult<T, 4> SpaceToBatchNdTestImpl(
         outputTensorInfo.SetQuantizationOffset(qOffset);
     }
 
-    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
+                                                      armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
 
     LayerTestResult<T, 4> ret(outputTensorInfo);
-    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData));
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+                                          armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
index 48e157d..0541323 100644 (file)
@@ -6,6 +6,7 @@
 #include "SpaceToDepthTestImpl.hpp"
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -56,10 +57,12 @@ LayerTestResult<T, 4> SpaceToDepthTestImpl(
         outputTensorInfo.SetQuantizationOffset(qOffset);
     }
 
-    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
+                                                      armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
 
     LayerTestResult<T, 4> ret(outputTensorInfo);
-    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData));
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+                                          armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
index 1716091..7aebdd0 100644 (file)
@@ -5,11 +5,11 @@
 
 #include "SplitterTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -80,7 +80,7 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
     LayerTestResult<T,3> ret4(outputTensorInfo4);
 
     auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        armnnUtils::QuantizedVector<T>({
             1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
             6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
             11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
@@ -101,24 +101,26 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
             76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
             81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
             86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
-        })
+        },
+        qScale, qOffset)
     ));
 
     // Channel 0 of the original input.
     ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        armnnUtils::QuantizedVector<T>({
             1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
             6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
             11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
             16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
             21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
             26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
-        })
+        },
+        qScale, qOffset)
     ));
 
     // Channel 1 & 2 of the original input.
     ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        armnnUtils::QuantizedVector<T>({
             31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
             36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
             41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
@@ -132,31 +134,34 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
             76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
             81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
             86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
-        })
+        },
+        qScale, qOffset)
     ));
 
     // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input).
     ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        armnnUtils::QuantizedVector<T>({
             31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
             36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
             41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
             46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
             51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
             56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
-        })
+        },
+        qScale, qOffset)
     ));
 
     // Channel 1 of return 2.
     ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        armnnUtils::QuantizedVector<T>({
             61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
             66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
             71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
             76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
             81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
             86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
-        })
+        },
+        qScale, qOffset)
     ));
 
     // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins
@@ -253,29 +258,31 @@ LayerTestResult<T, 3> CopyViaSplitterTestImpl(
     float qScale, int32_t qOffset)
 {
     const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType, qScale, qOffset);
-    auto input = MakeTensor<T, 3>(tensorInfo, QuantizedVector<T>(qScale, qOffset,
-                                                                 {
-                                                                     1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
-                                                                     6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
-                                                                     11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
-                                                                     16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
-                                                                     21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
-                                                                     26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
-
-                                                                     31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
-                                                                     36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
-                                                                     41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
-                                                                     46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
-                                                                     51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
-                                                                     56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
-
-                                                                     61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
-                                                                     66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
-                                                                     71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
-                                                                     76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
-                                                                     81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
-                                                                     86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
-                                                                 }));
+    auto input = MakeTensor<T, 3>(
+        tensorInfo,
+        armnnUtils::QuantizedVector<T>({
+             1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
+             6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+            11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
+            16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
+            21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
+            26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
+
+            31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
+            36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
+            41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
+            46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
+            51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
+            56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
+
+            61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
+            66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
+            71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
+            76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
+            81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
+            86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
+        },
+        qScale, qOffset));
 
     std::vector<unsigned int> origin = { 0, 0, 0 };
     armnn::SplitterQueueDescriptor::ViewOrigin window(origin);
index b32e622..515b5a0 100644 (file)
@@ -5,6 +5,7 @@
 
 #include "StridedSliceTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -39,11 +40,11 @@ LayerTestResult<T, OutDim> StridedSliceTestImpl(
     }
 
     boost::multi_array<T, InDim> input =
-        MakeTensor<T, InDim>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+        MakeTensor<T, InDim>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
 
     LayerTestResult<T, OutDim> ret(outputTensorInfo);
     ret.outputExpected =
-        MakeTensor<T, OutDim>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData));
+        MakeTensor<T, OutDim>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle =
         workloadFactory.CreateTensorHandle(inputTensorInfo);
index 7391f9c..a2b477c 100644 (file)
@@ -8,12 +8,12 @@
 #include <armnn/ArmNN.hpp>
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 
 #include <backendsCommon/test/DataLayoutUtils.hpp>
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -146,14 +146,16 @@ LayerTestResult<T, 4> TransposeConvolution2dTest(
     TensorData<T> input =
     {
         inputInfo,
-        QuantizedVector<T>(inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(), inputData)
+        armnnUtils::QuantizedVector<T>(inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset())
     };
 
     // set up weights
     TensorData<T> weights =
     {
         weightsInfo,
-        QuantizedVector<T>(weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(), weightsData)
+        armnnUtils::QuantizedVector<T>(weightsData,
+                                       weightsInfo.GetQuantizationScale(),
+                                       weightsInfo.GetQuantizationOffset())
     };
 
     // set up biases
@@ -164,7 +166,9 @@ LayerTestResult<T, 4> TransposeConvolution2dTest(
         TensorData<BT> biases =
         {
             biasesInfo,
-            QuantizedVector<BT>(biasesInfo.GetQuantizationScale(), biasesInfo.GetQuantizationOffset(), biasesData)
+            armnnUtils::QuantizedVector<BT>(biasesData,
+                                            biasesInfo.GetQuantizationScale(),
+                                            biasesInfo.GetQuantizationOffset())
         };
 
         optionalBiases = Optional<TensorData<BT>>(biases);
@@ -186,9 +190,9 @@ LayerTestResult<T, 4> TransposeConvolution2dTest(
     LayerTestResult<T, 4> testResult(outputInfo);
     testResult.output         = MakeTensor<T, 4>(outputInfo, output.second);
     testResult.outputExpected = MakeTensor<T, 4>(outputInfo,
-                                                 QuantizedVector<T>(outputInfo.GetQuantizationScale(),
-                                                                    outputInfo.GetQuantizationOffset(),
-                                                                    expectedOutputData));
+                                                 armnnUtils::QuantizedVector<T>(expectedOutputData,
+                                                                                outputInfo.GetQuantizationScale(),
+                                                                                outputInfo.GetQuantizationOffset()));
 
     return testResult;
 }
index dac1ebc..f117c92 100644 (file)
@@ -12,7 +12,6 @@
 #include <Graph.hpp>
 #include <Optimizer.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
-#include <backendsCommon/test/QuantizeHelper.hpp>
 
 #include <boost/core/ignore_unused.hpp>
 #include <boost/test/unit_test.hpp>
index 1eeb9ed..13620c4 100644 (file)
@@ -51,40 +51,27 @@ BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm)
     const unsigned int height   = 3;
     const unsigned int channels = 2;
     const unsigned int num      = 1;
-    int32_t qOffset = 0;
-    float qScale = 0.f;
 
-    TensorInfo inputTensorInfo({num, channels, height, width}, DataType::Float32);
+    TensorInfo inputTensorInfo( {num, channels, height, width}, DataType::Float32);
     TensorInfo outputTensorInfo({num, channels, height, width}, DataType::Float32);
     TensorInfo tensorInfo({channels}, DataType::Float32);
 
-    // Set quantization parameters if the requested type is a quantized type.
-    if(IsQuantizedType<float>())
-    {
-         inputTensorInfo.SetQuantizationScale(qScale);
-         inputTensorInfo.SetQuantizationOffset(qOffset);
-         outputTensorInfo.SetQuantizationScale(qScale);
-         outputTensorInfo.SetQuantizationOffset(qOffset);
-         tensorInfo.SetQuantizationScale(qScale);
-         tensorInfo.SetQuantizationOffset(qOffset);
-    }
-
     auto input = MakeTensor<float, 4>(inputTensorInfo,
-    QuantizedVector<float>(qScale, qOffset,
-    {
-        1.f, 4.f,
-        4.f, 2.f,
-        1.f, 6.f,
-
-        1.f, 1.f,
-        4.f, 1.f,
-        -2.f, 4.f
-    }));
+        {
+             1.f, 4.f,
+             4.f, 2.f,
+             1.f, 6.f,
+
+             1.f, 1.f,
+             4.f, 1.f,
+            -2.f, 4.f
+        });
+
     // these values are per-channel of the input
-    auto mean     = MakeTensor<float, 1>(tensorInfo, QuantizedVector<float>(qScale, qOffset, {3, -2}));
-    auto variance = MakeTensor<float, 1>(tensorInfo, QuantizedVector<float>(qScale, qOffset, {4, 9}));
-    auto beta     = MakeTensor<float, 1>(tensorInfo, QuantizedVector<float>(qScale, qOffset, {3, 2}));
-    auto gamma    = MakeTensor<float, 1>(tensorInfo, QuantizedVector<float>(qScale, qOffset, {2, 1}));
+    auto mean     = MakeTensor<float, 1>(tensorInfo, { 3.f, -2.f });
+    auto variance = MakeTensor<float, 1>(tensorInfo, { 4.f,  9.f });
+    auto beta     = MakeTensor<float, 1>(tensorInfo, { 3.f,  2.f });
+    auto gamma    = MakeTensor<float, 1>(tensorInfo, { 2.f,  1.f });
 
     std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
index 037c810..182f373 100644 (file)
@@ -11,7 +11,6 @@
 #include <vector>
 
 #include <armnn/TypesUtils.hpp>
-#include <backendsCommon/test/QuantizeHelper.hpp>
 
 #include <boost/log/trivial.hpp>
 #include <boost/numeric/conversion/cast.hpp>
@@ -202,4 +201,3 @@ std::unique_ptr<DeepSpeechV1TestCaseData> DeepSpeechV1Database::GetTestCaseData(
 }
 
 } // anonymous namespace
-
index 349d1ad..1a99ed7 100644 (file)
@@ -2,24 +2,23 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
 #pragma once
 
+#include "InferenceTestImage.hpp"
 #include "ObjectDetectionCommon.hpp"
 
-#include <memory>
-#include <string>
-#include <vector>
+#include <QuantizeHelper.hpp>
 
 #include <armnn/TypesUtils.hpp>
-#include <backendsCommon/test/QuantizeHelper.hpp>
 
 #include <boost/log/trivial.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 #include <array>
+#include <memory>
 #include <string>
-
-#include "InferenceTestImage.hpp"
+#include <vector>
 
 namespace
 {
@@ -97,7 +96,7 @@ std::unique_ptr<MobileNetSsdTestCaseData> MobileNetSsdDatabase::GetTestCaseData(
 
         // Get image data as a vector of floats
         std::vector<float> floatImageData = GetImageDataAsNormalizedFloats(ImageChannelLayout::Rgb, image);
-        imageData = QuantizedVector<uint8_t>(m_Scale, m_Offset, floatImageData);
+        imageData = armnnUtils::QuantizedVector<uint8_t>(floatImageData, m_Scale, m_Offset);
     }
     catch (const InferenceTestImageException& e)
     {