From 48623a0f6f4681ce0d9525b1587b7f96bfd58519 Mon Sep 17 00:00:00 2001 From: Aron Virginas-Tar Date: Tue, 22 Oct 2019 10:00:28 +0100 Subject: [PATCH] IVGCVSW-4018 Move QuantizeHelper.hpp to armnnUtils * Moved QuntizeHelper.hpp to armnnUtils * Reordered parameters for QuantizedVector and added default values for qScale and qOffset to make life easier when using the function for non-quantized types such as Float16 Signed-off-by: Aron Virginas-Tar Change-Id: I28c263dfa425f1316feccb4116839a84f5d568e5 --- CMakeLists.txt | 2 + src/armnn/test/EndToEndTest.cpp | 33 - src/armnn/test/QuantizerTest.cpp | 14 +- src/armnn/test/TensorHelpers.hpp | 28 +- .../test/DeserializeComparison.cpp | 7 +- .../test/DetectionPostProcess.cpp | 7 +- .../test => armnnUtils}/QuantizeHelper.hpp | 17 +- src/armnnUtils/test/QuantizeHelperTest.cpp | 46 + .../backendsCommon/test/AbsEndToEndTestImpl.hpp | 5 +- src/backends/backendsCommon/test/CMakeLists.txt | 1 - .../test/DepthToSpaceEndToEndTestImpl.hpp | 7 +- .../backendsCommon/test/EndToEndTestImpl.hpp | 13 +- .../backendsCommon/test/ResizeEndToEndTestImpl.hpp | 7 +- .../TransposeConvolution2dEndToEndTestImpl.hpp | 11 +- .../test/layerTests/ActivationTestImpl.cpp | 13 +- .../test/layerTests/AdditionTestImpl.cpp | 48 +- .../test/layerTests/BatchNormalizationTestImpl.cpp | 35 +- .../test/layerTests/ComparisonTestImpl.cpp | 6 +- .../test/layerTests/ConcatTestImpl.cpp | 2158 ++++++++++---------- .../test/layerTests/ConstantTestImpl.cpp | 77 +- .../test/layerTests/Conv2dTestImpl.cpp | 543 ++--- .../test/layerTests/DebugTestImpl.cpp | 5 +- .../test/layerTests/DepthToSpaceTestImpl.cpp | 8 +- .../test/layerTests/FullyConnectedTestImpl.cpp | 17 +- .../layerTests/InstanceNormalizationTestImpl.cpp | 10 +- .../test/layerTests/L2NormalizationTestImpl.cpp | 28 +- .../test/layerTests/LogSoftmaxTestImpl.cpp | 6 +- .../test/layerTests/LstmTestImpl.cpp | 91 +- .../backendsCommon/test/layerTests/PadTestImpl.cpp | 483 +++-- .../test/layerTests/PermuteTestImpl.hpp | 1 - .../test/layerTests/Pooling2dTestImpl.cpp | 188 +- .../test/layerTests/PreluTestImpl.hpp | 25 +- .../test/layerTests/ResizeTestImpl.hpp | 119 +- .../test/layerTests/SliceTestImpl.cpp | 5 +- .../test/layerTests/SoftmaxTestImpl.cpp | 8 +- .../test/layerTests/SpaceToBatchNdTestImpl.cpp | 7 +- .../test/layerTests/SpaceToDepthTestImpl.cpp | 7 +- .../test/layerTests/SplitterTestImpl.cpp | 75 +- .../test/layerTests/StridedSliceTestImpl.cpp | 5 +- .../layerTests/TransposeConvolution2dTestImpl.hpp | 18 +- src/backends/cl/test/Fp16SupportTest.cpp | 1 - src/backends/cl/test/OpenClTimerTest.cpp | 43 +- tests/DeepSpeechV1Database.hpp | 2 - tests/MobileNetSsdDatabase.hpp | 13 +- 44 files changed, 2255 insertions(+), 1988 deletions(-) rename src/{backends/backendsCommon/test => armnnUtils}/QuantizeHelper.hpp (81%) create mode 100644 src/armnnUtils/test/QuantizeHelperTest.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 6edc57f..38605ca 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -63,6 +63,7 @@ list(APPEND armnnUtils_sources src/armnnUtils/ParserPrototxtFixture.hpp src/armnnUtils/PrototxtConversions.hpp src/armnnUtils/PrototxtConversions.cpp + src/armnnUtils/QuantizeHelper.hpp src/armnnUtils/TensorIOUtils.hpp src/armnnUtils/TensorUtils.hpp src/armnnUtils/TensorUtils.cpp @@ -612,6 +613,7 @@ if(BUILD_UNIT_TESTS) src/armnn/test/UnitTests.cpp src/armnn/test/UnitTests.hpp src/armnn/test/UtilsTests.cpp + src/armnnUtils/test/QuantizeHelperTest.cpp src/armnnUtils/test/PrototxtConversionsTest.cpp src/armnnUtils/test/ParserHelperTest.cpp src/armnnUtils/test/TensorUtilsTest.cpp diff --git a/src/armnn/test/EndToEndTest.cpp b/src/armnn/test/EndToEndTest.cpp index d25e197..df84be4 100644 --- a/src/armnn/test/EndToEndTest.cpp +++ b/src/armnn/test/EndToEndTest.cpp @@ -7,8 +7,6 @@ #include #include -#include - #include #include @@ -16,37 +14,6 @@ BOOST_AUTO_TEST_SUITE(EndToEnd) -namespace -{ - -template -bool IsFloatIterFunc(T iter) -{ - boost::ignore_unused(iter); - return IsFloatingPointIterator::value; -} - -} //namespace - -BOOST_AUTO_TEST_CASE(QuantizedHelper) -{ - std::vector fArray; - BOOST_TEST(IsFloatIterFunc(fArray.begin()) == true); - BOOST_TEST(IsFloatIterFunc(fArray.cbegin()) == true); - - std::vector dArray; - BOOST_TEST(IsFloatIterFunc(dArray.begin()) == true); - - std::vector iArray; - BOOST_TEST(IsFloatIterFunc(iArray.begin()) == false); - - float floats[5]; - BOOST_TEST(IsFloatIterFunc(&floats[0]) == true); - - int ints[5]; - BOOST_TEST(IsFloatIterFunc(&ints[0]) == false); -} - BOOST_AUTO_TEST_CASE(ErrorOnLoadNetwork) { using namespace armnn; diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index 90fd5e9..3f57ce8 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -4,17 +4,19 @@ // #include +#include #include -#include #include -#include "armnn/LayerVisitorBase.hpp" +#include + +#include + #include "../Graph.hpp" #include "../Network.hpp" #include "../NetworkQuantizerUtils.hpp" #include "../OverrideInputRangeVisitor.hpp" #include "../RangeTracker.hpp" -#include "../backends/backendsCommon/test/QuantizeHelper.hpp" #include "../../armnnQuantizer/CommandLineProcessor.hpp" #include @@ -2294,9 +2296,9 @@ std::vector SetupQuantize(float value) std::vector input({ value, 0.0f, 0.0f, 1.0f }); const std::vector &inputRef = input; - auto output = QuantizedVector(inputInfo.GetQuantizationScale(), - inputInfo.GetQuantizationOffset(), - inputRef); + auto output = armnnUtils::QuantizedVector(inputRef, + inputInfo.GetQuantizationScale(), + inputInfo.GetQuantizationOffset()); return output; } diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp index 35e471e..3f85893 100644 --- a/src/armnn/test/TensorHelpers.hpp +++ b/src/armnn/test/TensorHelpers.hpp @@ -4,23 +4,21 @@ // #pragma once -#include -#include -#include -#include -#include +#include + +#include #include -#include +#include +#include #include #include -#include - -#include - -#include +#include +#include +#include #include +#include constexpr float g_FloatCloseToZeroTolerance = 1.0e-6f; @@ -235,7 +233,9 @@ boost::multi_array MakeRandomTensor(const armnn::TensorInfo& tensorInfo, { init[i] = dist(gen); } - float qScale = tensorInfo.GetQuantizationScale(); - int32_t qOffset = tensorInfo.GetQuantizationOffset(); - return MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, init)); + + const float qScale = tensorInfo.GetQuantizationScale(); + const int32_t qOffset = tensorInfo.GetQuantizationOffset(); + + return MakeTensor(tensorInfo, armnnUtils::QuantizedVector(init, qScale, qOffset)); } diff --git a/src/armnnDeserializer/test/DeserializeComparison.cpp b/src/armnnDeserializer/test/DeserializeComparison.cpp index 9a2fabf..6616398 100644 --- a/src/armnnDeserializer/test/DeserializeComparison.cpp +++ b/src/armnnDeserializer/test/DeserializeComparison.cpp @@ -6,10 +6,9 @@ #include "ParserFlatbuffersSerializeFixture.hpp" #include "../Deserializer.hpp" +#include #include -#include - #include #include @@ -32,8 +31,8 @@ BOOST_FIXTURE_TEST_CASE(operation##dataType, Simple##operation##dataType##Fixtur constexpr int32_t qOffset = 0; \ RunTest<4, armnn::DataType::dataType, armnn::DataType::Boolean>( \ 0, \ - {{ "InputLayer0", QuantizedVector(qScale, qOffset, s_TestData.m_InputData0) }, \ - { "InputLayer1", QuantizedVector(qScale, qOffset, s_TestData.m_InputData1) }}, \ + {{ "InputLayer0", armnnUtils::QuantizedVector(s_TestData.m_InputData0, qScale, qOffset) }, \ + { "InputLayer1", armnnUtils::QuantizedVector(s_TestData.m_InputData1, qScale, qOffset) }}, \ {{ "OutputLayer", s_TestData.m_Output##operation }}); \ } diff --git a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp index 638238d..1ec87f9 100644 --- a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp +++ b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp @@ -12,6 +12,8 @@ #include "ParserPrototxtFixture.hpp" #include "ParserHelper.hpp" +#include + BOOST_AUTO_TEST_SUITE(TensorflowLiteParser) struct DetectionPostProcessFixture : ParserFlatbuffersFixture @@ -200,8 +202,9 @@ BOOST_FIXTURE_TEST_CASE( ParseDetectionPostProcess, ParseDetectionPostProcessCus // Quantize inputs and outputs using QuantizedContainer = std::vector; - QuantizedContainer quantBoxEncodings = QuantizedVector(1.0f, 1, boxEncodings); - QuantizedContainer quantScores = QuantizedVector(0.01f, 0, scores); + + QuantizedContainer quantBoxEncodings = armnnUtils::QuantizedVector(boxEncodings, 1.00f, 1); + QuantizedContainer quantScores = armnnUtils::QuantizedVector(scores, 0.01f, 0); std::map input = { diff --git a/src/backends/backendsCommon/test/QuantizeHelper.hpp b/src/armnnUtils/QuantizeHelper.hpp similarity index 81% rename from src/backends/backendsCommon/test/QuantizeHelper.hpp rename to src/armnnUtils/QuantizeHelper.hpp index b7ca3b3..a7f68c5 100644 --- a/src/backends/backendsCommon/test/QuantizeHelper.hpp +++ b/src/armnnUtils/QuantizeHelper.hpp @@ -17,6 +17,9 @@ #include #include +namespace armnnUtils +{ + template struct SelectiveQuantizer { @@ -84,7 +87,7 @@ struct IsFloatingPointIterator template ::value, int>::type=0 // Makes sure fp iterator is valid. > -std::vector QuantizedVector(float qScale, int32_t qOffset, FloatIt first, FloatIt last) +std::vector QuantizedVector(FloatIt first, FloatIt last, float qScale, int32_t qOffset) { std::vector quantized; quantized.reserve(boost::numeric_cast(std::distance(first, last))); @@ -92,7 +95,7 @@ std::vector QuantizedVector(float qScale, int32_t qOffset, FloatIt first, Flo for (auto it = first; it != last; ++it) { auto f = *it; - T q =SelectiveQuantize(f, qScale, qOffset); + T q = SelectiveQuantize(f, qScale, qOffset); quantized.push_back(q); } @@ -100,13 +103,15 @@ std::vector QuantizedVector(float qScale, int32_t qOffset, FloatIt first, Flo } template -std::vector QuantizedVector(float qScale, int32_t qOffset, const std::vector& array) +std::vector QuantizedVector(const std::vector& array, float qScale = 1.f, int32_t qOffset = 0) { - return QuantizedVector(qScale, qOffset, array.begin(), array.end()); + return QuantizedVector(array.begin(), array.end(), qScale, qOffset); } template -std::vector QuantizedVector(float qScale, int32_t qOffset, std::initializer_list array) +std::vector QuantizedVector(std::initializer_list array, float qScale = 1.f, int32_t qOffset = 0) { - return QuantizedVector(qScale, qOffset, array.begin(), array.end()); + return QuantizedVector(array.begin(), array.end(), qScale, qOffset); } + +} // namespace armnnUtils diff --git a/src/armnnUtils/test/QuantizeHelperTest.cpp b/src/armnnUtils/test/QuantizeHelperTest.cpp new file mode 100644 index 0000000..7e781d0 --- /dev/null +++ b/src/armnnUtils/test/QuantizeHelperTest.cpp @@ -0,0 +1,46 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include + +#include +#include + +#include + +BOOST_AUTO_TEST_SUITE(QuantizeHelper) + +namespace +{ + +template +bool IsFloatIterFunc(T iter) +{ + boost::ignore_unused(iter); + return armnnUtils::IsFloatingPointIterator::value; +} + +} // anonymous namespace + +BOOST_AUTO_TEST_CASE(IsFloatIterFuncTest) +{ + std::vector fArray; + BOOST_TEST(IsFloatIterFunc(fArray.begin()) == true); + BOOST_TEST(IsFloatIterFunc(fArray.cbegin()) == true); + + std::vector dArray; + BOOST_TEST(IsFloatIterFunc(dArray.begin()) == true); + + std::vector iArray; + BOOST_TEST(IsFloatIterFunc(iArray.begin()) == false); + + float floats[5]; + BOOST_TEST(IsFloatIterFunc(&floats[0]) == true); + + int ints[5]; + BOOST_TEST(IsFloatIterFunc(&ints[0]) == false); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp index c46376b..dd851e3 100644 --- a/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp @@ -7,6 +7,7 @@ #include "CommonTestUtils.hpp" +#include #include #include @@ -53,8 +54,8 @@ void AbsEndToEnd(const std::vector& backends) }; // quantize data - std::vector qInputData = QuantizedVector(qScale, qOffset, inputData); - std::vector qExpectedOutputData = QuantizedVector(qScale, qOffset, expectedOutputData); + std::vector qInputData = armnnUtils::QuantizedVector(inputData, qScale, qOffset); + std::vector qExpectedOutputData = armnnUtils::QuantizedVector(expectedOutputData, qScale, qOffset); INetworkPtr network = CreateAbsNetwork(tensorInfo); diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 8a96318..f310ef7 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -36,7 +36,6 @@ list(APPEND armnnBackendsCommonUnitTests_sources OptimizeSubgraphViewTests.cpp OptimizationViewsTests.cpp PreluEndToEndTestImpl.hpp - QuantizeHelper.hpp QuantizedLstmEndToEndTestImpl.cpp QuantizedLstmEndToEndTestImpl.hpp ResizeEndToEndTestImpl.hpp diff --git a/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp index cf4db1d..fd0b12f 100644 --- a/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp @@ -9,8 +9,9 @@ #include +#include + #include -#include namespace { @@ -58,8 +59,8 @@ void DepthToSpaceEndToEndImpl(const std::vector& backends, outputInfo.SetQuantizationOffset(qOffset); } - std::vector inputData = QuantizedVector(qScale, qOffset, floatInputData); - std::vector expectedOutputData = QuantizedVector(qScale, qOffset, floatExpectedOutputData); + std::vector inputData = armnnUtils::QuantizedVector(floatInputData, qScale, qOffset); + std::vector expectedOutputData = armnnUtils::QuantizedVector(floatExpectedOutputData, qScale, qOffset); // Permute tensors from NHWC to NCHW (if needed) if (descriptor.m_DataLayout == DataLayout::NCHW) diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp index ee9d2bc..d6f589f 100644 --- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp @@ -4,13 +4,12 @@ // #pragma once -#include - #include #include -#include -#include +#include +#include +#include #include @@ -99,9 +98,9 @@ inline bool ConstantUsageUint8Test(const std::vector& backends) return ConstantUsageTest(backends, commonTensorInfo, - QuantizedVector(scale, offset, { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }), // Input. - QuantizedVector(scale, offset, { 6.f, 5.f, 4.f, 3.f, 2.f, 1.f }), // Const input. - QuantizedVector(scale, offset, { 7.f, 7.f, 7.f, 7.f, 7.f, 7.f }) // Expected output. + armnnUtils::QuantizedVector({ 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }, scale, offset), // Input. + armnnUtils::QuantizedVector({ 6.f, 5.f, 4.f, 3.f, 2.f, 1.f }, scale, offset), // Const input. + armnnUtils::QuantizedVector({ 7.f, 7.f, 7.f, 7.f, 7.f, 7.f }, scale, offset) // Expected output. ); } diff --git a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp index 4bf9d51..1eeb944 100644 --- a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp @@ -9,6 +9,7 @@ #include #include +#include #include #include @@ -119,8 +120,8 @@ void ResizeEndToEnd(const std::vector& backends, } // quantize data - std::vector qInputData = QuantizedVector(qScale, qOffset, inputData); - std::vector qExpectedOutputData = QuantizedVector(qScale, qOffset, expectedOutputData); + std::vector qInputData = armnnUtils::QuantizedVector(inputData, qScale, qOffset); + std::vector qExpectedOutputData = armnnUtils::QuantizedVector(expectedOutputData, qScale, qOffset); INetworkPtr network = CreateResizeNetwork(descriptor, inputInfo, outputInfo); @@ -144,4 +145,4 @@ void ResizeNearestNeighborEndToEnd(const std::vector& backends armnn::DataLayout dataLayout) { ResizeEndToEnd(backends, dataLayout, armnn::ResizeMethod::NearestNeighbor); -} \ No newline at end of file +} diff --git a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp index 9d6312e..4935a18 100644 --- a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp @@ -9,6 +9,7 @@ #include #include +#include #include #include @@ -129,12 +130,12 @@ void TransposeConvolution2dEndToEnd(const std::vector& backend } // quantize data - std::vector qInputData = QuantizedVector(qScale, qOffset, inputData); - std::vector qWeightsData = QuantizedVector(qScale, qOffset, weightsData); - std::vector qExpectedOutputData = QuantizedVector(qScale, qOffset, expectedOutputData); + std::vector qInputData = armnnUtils::QuantizedVector(inputData, qScale, qOffset); + std::vector qWeightsData = armnnUtils::QuantizedVector(weightsData, qScale, qOffset); + std::vector qExpectedOutputData = armnnUtils::QuantizedVector(expectedOutputData, qScale, qOffset); using BT = ResolveType; - std::vector qBiasesData = QuantizedVector(qScale * qScale, 0, biasesData); + std::vector qBiasesData = armnnUtils::QuantizedVector(biasesData, qScale * qScale, 0); ConstTensor weights(weightsInfo, qWeightsData); ConstTensor biases(biasesInfo, qBiasesData); @@ -150,4 +151,4 @@ void TransposeConvolution2dEndToEnd(const std::vector& backend { { 0, qInputData } }, { { 0, qExpectedOutputData } }, backends); -} \ No newline at end of file +} diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp index 075c29d..a45c6d5 100644 --- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp @@ -5,12 +5,12 @@ #include "ActivationTestImpl.hpp" +#include #include #include #include -#include #include #include @@ -424,7 +424,7 @@ LayerTestResult SimpleActivationTest( LayerTestResult result(inputTensorInfo); - auto input = MakeTensor(inputTensorInfo, QuantizedVector(scale, offset, inputData)); + auto input = MakeTensor(inputTensorInfo, armnnUtils::QuantizedVector(inputData, scale, offset)); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -451,8 +451,8 @@ LayerTestResult SimpleActivationTest( CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); // Calculated manually. - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(outScale, outOffset, - outputExpectedData)); + result.outputExpected = + MakeTensor(outputTensorInfo, armnnUtils::QuantizedVector(outputExpectedData, outScale, outOffset)); return result; } @@ -812,7 +812,7 @@ LayerTestResult SqrtNNTest( LayerTestResult result(inputTensorInfo); - auto input = MakeTensor(inputTensorInfo, QuantizedVector(0.f, 0.f, inputData)); + auto input = MakeTensor(inputTensorInfo, inputData); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -836,8 +836,7 @@ LayerTestResult SqrtNNTest( CopyDataFromITensorHandle(&result.output[0][0][0][0][0], outputHandle.get()); // Calculated manually. - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(0.f, 0.f, - outputExpectedData)); + result.outputExpected = MakeTensor(outputTensorInfo, outputExpectedData); return result; }; diff --git a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp index c6d3982..247821b 100644 --- a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp @@ -7,6 +7,8 @@ #include "ElementwiseTestImpl.hpp" +#include + template<> std::unique_ptr CreateWorkload( const armnn::IWorkloadFactory& workloadFactory, @@ -177,7 +179,7 @@ LayerTestResult AdditionBroadcastTestImpl( outputTensorInfo.SetQuantizationOffset(qOffset); } - auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector(qScale, qOffset, + auto input1 = MakeTensor(inputTensorInfo1, armnnUtils::QuantizedVector( { 0.0f, 1.0f, @@ -187,16 +189,18 @@ LayerTestResult AdditionBroadcastTestImpl( 4.0f, 5.0f, - })); + }, + qScale, qOffset)); - auto input2 = MakeTensor(inputTensorInfo2, QuantizedVector(qScale, qOffset, + auto input2 = MakeTensor(inputTensorInfo2, armnnUtils::QuantizedVector( { 0.5f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5f, - })); + }, + qScale, qOffset)); LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, + ret.outputExpected = MakeTensor(outputTensorInfo, armnnUtils::QuantizedVector( { 0.5f, 1.5f, 2.5f, 4.5f, 5.5f, 6.5f, @@ -206,7 +210,8 @@ LayerTestResult AdditionBroadcastTestImpl( 4.5f, 5.5f, 6.5f, 8.5f, 9.5f, 10.5f, - })); + }, + qScale, qOffset)); std::unique_ptr inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1); std::unique_ptr inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2); @@ -256,31 +261,34 @@ LayerTestResult AdditionBroadcast1ElementTestImpl( outputTensorInfo.SetQuantizationOffset(qOffset); } - auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector(qScale, qOffset, + auto input1 = MakeTensor(inputTensorInfo1, armnnUtils::QuantizedVector( { - 0.0f, 1.0f, 2.0f, - 3.0f, 4.0f, 5.0f, - 6.0f, 7.0f, 8.0f, - 9.0f, 10.0f, 11.0f, + 0.0f, 1.0f, 2.0f, + 3.0f, 4.0f, 5.0f, + 6.0f, 7.0f, 8.0f, + 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, - })); + }, + qScale, qOffset)); - auto input2 = MakeTensor(inputTensorInfo2, QuantizedVector(qScale, qOffset, + auto input2 = MakeTensor(inputTensorInfo2, armnnUtils::QuantizedVector( { 0.5f, - })); + }, + qScale, qOffset)); LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, + ret.outputExpected = MakeTensor(outputTensorInfo, armnnUtils::QuantizedVector( { - 0.5f, 1.5f, 2.5f, - 3.5f, 4.5f, 5.5f, - 6.5f, 7.5f, 8.5f, - 9.5f, 10.5f, 11.5f, + 0.5f, 1.5f, 2.5f, + 3.5f, 4.5f, 5.5f, + 6.5f, 7.5f, 8.5f, + 9.5f, 10.5f, 11.5f, 12.5f, 13.5f, 14.5f, 15.5f, 16.5f, 17.5f, - })); + }, + qScale, qOffset)); std::unique_ptr inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1); std::unique_ptr inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2); diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp index ef43088..68cda7c 100644 --- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp @@ -6,6 +6,7 @@ #include "BatchNormalizationTestImpl.hpp" #include +#include #include #include @@ -14,7 +15,6 @@ #include #include -#include #include #include @@ -23,6 +23,8 @@ namespace { +using namespace armnnUtils; + template> LayerTestResult BatchNormTestImpl( armnn::IWorkloadFactory& workloadFactory, @@ -53,19 +55,18 @@ LayerTestResult BatchNormTestImpl( tensorInfo.SetQuantizationOffset(qOffset); } - auto inputTensor = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, inputValues)); + auto inputTensor = MakeTensor(inputTensorInfo, QuantizedVector(inputValues, qScale, qOffset)); // These values are per-channel of the input. - auto mean = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {3, -2})); - auto variance = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {4, 9})); - auto beta = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {3, 2})); - auto gamma = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {2, 1})); + auto mean = MakeTensor(tensorInfo, QuantizedVector({ 3, -2 }, qScale, qOffset)); + auto variance = MakeTensor(tensorInfo, QuantizedVector({ 4, 9 }, qScale, qOffset)); + auto beta = MakeTensor(tensorInfo, QuantizedVector({ 3, 2 }, qScale, qOffset)); + auto gamma = MakeTensor(tensorInfo, QuantizedVector({ 2, 1 }, qScale, qOffset)); LayerTestResult result(outputTensorInfo); result.outputExpected = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, expectedOutputValues)); + QuantizedVector(expectedOutputValues, qScale, qOffset)); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -134,17 +135,18 @@ LayerTestResult BatchNormTestNhwcImpl( } auto input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, + QuantizedVector( { 1.f, 1.f, 4.f, 1.f, 4.f, 4.f, 2.f, 1.f, 1.f, -2.f, 6.f, 4.f - })); + }, + qScale, qOffset)); // These values are per-channel of the input. - auto mean = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {3, -2})); - auto variance = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {4, 9})); - auto beta = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {3, 2})); - auto gamma = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {2, 1})); + auto mean = MakeTensor(tensorInfo, QuantizedVector({ 3, -2 }, qScale, qOffset)); + auto variance = MakeTensor(tensorInfo, QuantizedVector({ 4, 9 }, qScale, qOffset)); + auto beta = MakeTensor(tensorInfo, QuantizedVector({ 3, 2 }, qScale, qOffset)); + auto gamma = MakeTensor(tensorInfo, QuantizedVector({ 2, 1 }, qScale, qOffset)); LayerTestResult ret(outputTensorInfo); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); @@ -175,12 +177,13 @@ LayerTestResult BatchNormTestNhwcImpl( // substract mean, divide by standard deviation (with an epsilon to avoid div by 0), // multiply by gamma and add beta ret.outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, + QuantizedVector( { 1.f, 3.f, 4.f, 3.f, 4.f, 4.f, 2.f, 3.f, 1.f, 2.f, 6.f, 4.f - })); + }, + qScale, qOffset)); std::unique_ptr workload = workloadFactory.CreateBatchNormalization(data, info); diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp index 9da1d42..1c54b85 100644 --- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp @@ -8,12 +8,12 @@ #include #include +#include #include #include #include -#include #include #include @@ -155,8 +155,8 @@ LayerTestResult ComparisonTestImpl(armnn::IWorkloadFactory& workload { using T = armnn::ResolveType; - std::vector inputData0 = QuantizedVector(quantScale, quantOffset, testData.m_InputData0); - std::vector inputData1 = QuantizedVector(quantScale, quantOffset, testData.m_InputData1); + std::vector inputData0 = armnnUtils::QuantizedVector(testData.m_InputData0, quantScale, quantOffset); + std::vector inputData1 = armnnUtils::QuantizedVector(testData.m_InputData1, quantScale, quantOffset); return ComparisonTestImpl<4, ArmnnInType>( workloadFactory, diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp index 29476e5..e9932c8 100644 --- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp @@ -6,6 +6,7 @@ #include "ConcatTestImpl.hpp" #include +#include #include #include @@ -15,22 +16,25 @@ #include +using namespace armnn; +using namespace armnnUtils; + // // Helper functions and templates // -armnn::OriginsDescriptor CreateDescriptorForConcat( - const std::vector & inputTensorInfos, +OriginsDescriptor CreateDescriptorForConcat( + const std::vector & inputTensorInfos, unsigned int concatDim) { - std::vector shapes; + std::vector shapes; shapes.reserve(inputTensorInfos.size()); - for (const armnn::TensorInfo& it: inputTensorInfos) + for (const TensorInfo& it: inputTensorInfos) { shapes.push_back(it.GetShape()); } - return armnn::CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), concatDim); + return CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), concatDim); } // @@ -40,7 +44,7 @@ armnn::OriginsDescriptor CreateDescriptorForConcat( // bool NeedPermuteForConcat( - const std::vector & inputTensorInfos, + const std::vector & inputTensorInfos, unsigned int concatDim) { // See note above. Additionally we expect the input shapes to have the @@ -65,7 +69,7 @@ bool NeedPermuteForConcat( return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1)); } -armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape) +TensorShape ExpandTensorShapeTo3dForPermute(const TensorShape & inputShape) { unsigned int numDims = inputShape.GetNumDimensions(); if (numDims >= 3) @@ -80,13 +84,13 @@ armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & in { newDims[expandedBy+i] = inputShape[i]; } - return armnn::TensorShape(3u, &newDims[0]); + return TensorShape(3u, &newDims[0]); } void Generate3dPermuteVectorForConcat( unsigned int numDimensions, unsigned int & concatDim, - std::pair & permutations) + std::pair & permutations) { BOOST_ASSERT_MSG(numDimensions <= 3, "Only dimensions 1,2 and 3 are supported by this helper"); @@ -96,15 +100,15 @@ void Generate3dPermuteVectorForConcat( if (expandedConcatAxis == 2) { concatDim = 0; - armnn::PermutationVector forwardPermutation({1, 2, 0}); - armnn::PermutationVector reversePermutation({2, 0, 1}); + PermutationVector forwardPermutation({1, 2, 0}); + PermutationVector reversePermutation({2, 0, 1}); permutations = std::make_pair(forwardPermutation, reversePermutation); } else if (expandedConcatAxis == 1) { concatDim = 0; - armnn::PermutationVector forwardPermutation({2, 0, 1}); - armnn::PermutationVector reversePermutation({1, 2, 0}); + PermutationVector forwardPermutation({2, 0, 1}); + PermutationVector reversePermutation({1, 2, 0}); permutations = std::make_pair(forwardPermutation, reversePermutation); } else @@ -115,10 +119,10 @@ void Generate3dPermuteVectorForConcat( } template void PermuteTensorData( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::PermutationVector& mappings, - armnn::TensorInfo & inputTensorInfo, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const PermutationVector& mappings, + TensorInfo & inputTensorInfo, const T * inputData, std::vector& outputData) { @@ -131,18 +135,18 @@ template void PermuteTensorData( return; } - armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings); + TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings); - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - armnn::PermuteQueueDescriptor queueDescriptor; - queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings}; - armnn::WorkloadInfo workloadInfo; + PermuteQueueDescriptor queueDescriptor; + queueDescriptor.m_Parameters = PermuteDescriptor{mappings}; + WorkloadInfo workloadInfo; AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get()); - std::unique_ptr workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo); + std::unique_ptr workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo); inputHandle->Allocate(); outputHandle->Allocate(); @@ -164,23 +168,23 @@ template void PermuteTensorData( // of the permuted concatenated tensor is going to be. // template void PermuteInputsForConcat( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - std::vector & inputTensorInfos, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + std::vector & inputTensorInfos, std::vector & inputData, std::vector> & inputDataStorage, - armnn::PermutationVector & permuteVector, + PermutationVector & permuteVector, unsigned int & concatDim, - armnn::TensorInfo & outputTensorInfo) + TensorInfo & outputTensorInfo) { BOOST_ASSERT_MSG(inputTensorInfos.size() > 1, "Expecting more than one tensor to be concatenated here"); unsigned int numDims = 0; unsigned int nthInput = 0; - const armnn::PermutationVector identity({0, 1, 2}); + const PermutationVector identity({0, 1, 2}); - std::pair permutations = + std::pair permutations = std::make_pair(identity, identity); inputDataStorage.resize(inputData.size()); @@ -203,7 +207,7 @@ template void PermuteInputsForConcat( "All inputs must have the same number of dimensions"); } - armnn::TensorInfo newTensorInfo = tensorInfo; + TensorInfo newTensorInfo = tensorInfo; newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape())); PermuteTensorData(workloadFactory, @@ -231,11 +235,11 @@ template void PermuteInputsForConcat( // output. // template void PermuteOutputForConcat( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::TensorInfo & tensorInfo, - const armnn::PermutationVector & permuteVector, - std::unique_ptr && inputDataHandle, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const TensorInfo & tensorInfo, + const PermutationVector & permuteVector, + std::unique_ptr && inputDataHandle, T * data) { BOOST_ASSERT_MSG(data != nullptr, "data must not be null"); @@ -247,7 +251,7 @@ template void PermuteOutputForConcat( return; } - armnn::TensorInfo resultTensorInfo = tensorInfo; + TensorInfo resultTensorInfo = tensorInfo; std::vector inputData(tensorInfo.GetNumElements()); std::vector outputData; @@ -264,11 +268,11 @@ template void PermuteOutputForConcat( } template void Concatenate( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - std::initializer_list inputTensorInfosOrig, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + std::initializer_list inputTensorInfosOrig, std::initializer_list inputsOrig, - const armnn::TensorInfo& outputTensorInfoOrig, + const TensorInfo& outputTensorInfoOrig, T * output, unsigned int concatDim, bool useSubtensor) @@ -283,11 +287,11 @@ template void Concatenate( } // Saves a copy of the parameters which we might need to change. - std::vector inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end()); + std::vector inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end()); std::vector inputs = inputsOrig; - armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig; + TensorInfo outputTensorInfo = outputTensorInfoOrig; - armnn::PermutationVector permuteVector{0, 1, 2}; + PermutationVector permuteVector{0, 1, 2}; // Holds and automatically releases memory for the reshaped input data. std::vector> tmpInputDataStorage; @@ -312,15 +316,15 @@ template void Concatenate( outputTensorInfo); } - armnn::WorkloadInfo workloadInfo; + WorkloadInfo workloadInfo; - std::vector> inputHandles; + std::vector> inputHandles; inputHandles.reserve(inputCount); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); - armnn::ConcatQueueDescriptor queueDescriptor; - armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcat(inputTensorInfos, concatDim); + ConcatQueueDescriptor queueDescriptor; + OriginsDescriptor viewsDescriptor = CreateDescriptorForConcat(inputTensorInfos, concatDim); queueDescriptor.m_Parameters = viewsDescriptor; if (useSubtensor) @@ -337,8 +341,8 @@ template void Concatenate( const bool subTensorsSupported = workloadFactory.SupportsSubTensors(); for (unsigned int i = 0; i < inputCount; ++i) { - const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i]; - std::unique_ptr inputHandle = + const TensorInfo& inputTensorInfo = inputTensorInfos[i]; + std::unique_ptr inputHandle = subTensorsSupported ? workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo.GetShape(), @@ -353,7 +357,7 @@ template void Concatenate( { for (unsigned int i = 0; i < inputCount; ++i) { - std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]); + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]); inputHandles.emplace_back(std::move(inputHandle)); } } @@ -365,7 +369,7 @@ template void Concatenate( AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get()); - std::unique_ptr workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo); + std::unique_ptr workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo); for (auto& inputHandle : inputHandles) { @@ -403,20 +407,20 @@ template void Concatenate( // Implementation templates // -template> +template> LayerTestResult Concat1dTestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset); + TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { 1.0f, 2.0f, 3.0f })); - auto input1 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { 4.0f, 5.0f, 6.0f })); - auto input2 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { 7.0f, 8.0f, 9.0f })); + auto input0 = MakeTensor(inputTensorInfo, QuantizedVector({ 1.0f, 2.0f, 3.0f }, qScale, qOffset)); + auto input1 = MakeTensor(inputTensorInfo, QuantizedVector({ 4.0f, 5.0f, 6.0f }, qScale, qOffset)); + auto input2 = MakeTensor(inputTensorInfo, QuantizedVector({ 7.0f, 8.0f, 9.0f }, qScale, qOffset)); - armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset); + TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset); LayerTestResult result(outputTensorInfo); @@ -430,48 +434,56 @@ LayerTestResult Concat1dTestImpl( 0, true); - result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f - })); + result.output = MakeTensor(outputTensorInfo, output); + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f + }, + qScale, qOffset)); return result; } -template> +template> LayerTestResult Concat2dTestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::TensorInfo& outputTensorInfo, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const TensorInfo& outputTensorInfo, unsigned int dimension, const float qScale, const int32_t qOffset) { - armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset); + TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0 - 1.0f, 2.0f, 3.0f, + auto input0 = MakeTensor(inputTensorInfo, QuantizedVector( + { + // Batch 0 + 1.0f, 2.0f, 3.0f, - // Batch 1 - 10.0f, 11.0f, 12.0f, - })); + // Batch 1 + 10.0f, 11.0f, 12.0f, + }, + qScale, qOffset)); - auto input1 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0 - 4.0f, 5.0f, 6.0f, + auto input1 = MakeTensor(inputTensorInfo, QuantizedVector( + { + // Batch 0 + 4.0f, 5.0f, 6.0f, - // Batch 1 - 13.0f, 14.0f, 15.0f, - })); + // Batch 1 + 13.0f, 14.0f, 15.0f, + }, + qScale, qOffset)); - auto input2 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0 - 7.0f, 8.0f, 9.0f, + auto input2 = MakeTensor(inputTensorInfo, QuantizedVector( + { + // Batch 0 + 7.0f, 8.0f, 9.0f, - // Batch 1 - 16.0f, 17.0f, 18.0f, - })); + // Batch 1 + 16.0f, 17.0f, 18.0f, + }, + qScale, qOffset)); LayerTestResult result(outputTensorInfo); @@ -489,99 +501,109 @@ LayerTestResult Concat2dTestImpl( return result; } -template> +template> LayerTestResult Concat2dDim0TestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset); + TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset); LayerTestResult result = Concat2dTestImpl( workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0 - 1.0f, 2.0f, 3.0f, + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + // Batch 0 + 1.0f, 2.0f, 3.0f, - // Batch 1 - 10.0f, 11.0f, 12.0f, + // Batch 1 + 10.0f, 11.0f, 12.0f, - // Batch 2 - 4.0f, 5.0f, 6.0f, + // Batch 2 + 4.0f, 5.0f, 6.0f, - // Batch 3 - 13.0f, 14.0f, 15.0f, + // Batch 3 + 13.0f, 14.0f, 15.0f, - // Batch 4 - 7.0f, 8.0f, 9.0f, + // Batch 4 + 7.0f, 8.0f, 9.0f, - // Batch 5 - 16.0f, 17.0f, 18.0f, - })); + // Batch 5 + 16.0f, 17.0f, 18.0f, + }, + qScale, qOffset)); return result; } -template> +template> LayerTestResult Concat2dDim1TestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset); + TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset); LayerTestResult result = Concat2dTestImpl( workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0 - 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + // Batch 0 + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, - // Batch 1 - 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f - })); + // Batch 1 + 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f + }, + qScale, qOffset)); return result; } -template> +template> LayerTestResult Concat2dDim0DiffInputDimsTestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(input0TensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0 - 1.0f, 2.0f, 3.0f, + TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset); + auto input0 = MakeTensor(input0TensorInfo, QuantizedVector( + { + // Batch 0 + 1.0f, 2.0f, 3.0f, - // Batch 1 - 10.0f, 11.0f, 12.0f, - })); + // Batch 1 + 10.0f, 11.0f, 12.0f, + }, + qScale, qOffset)); - armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset); - auto input1 = MakeTensor(input1TensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0 - 4.0f, 5.0f, 6.0f, + TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset); + auto input1 = MakeTensor(input1TensorInfo, QuantizedVector( + { + // Batch 0 + 4.0f, 5.0f, 6.0f, - // Batch 1 - 13.0f, 14.0f, 15.0f, + // Batch 1 + 13.0f, 14.0f, 15.0f, - // Batch 0 - 7.0f, 8.0f, 9.0f, - })); + // Batch 0 + 7.0f, 8.0f, 9.0f, + }, + qScale, qOffset)); - armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset); - auto input2 = MakeTensor(input2TensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 1 - 16.0f, 17.0f, 18.0f, - })); + TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset); + auto input2 = MakeTensor(input2TensorInfo, QuantizedVector( + { + // Batch 1 + 16.0f, 17.0f, 18.0f, + }, + qScale, qOffset)); - armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset); + TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset); LayerTestResult result(outputTensorInfo); std::vector output; @@ -595,64 +617,72 @@ LayerTestResult Concat2dDim0DiffInputDimsTestImpl( true); result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0 - 1.0f, 2.0f, 3.0f, + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + // Batch 0 + 1.0f, 2.0f, 3.0f, - // Batch 1 - 10.0f, 11.0f, 12.0f, + // Batch 1 + 10.0f, 11.0f, 12.0f, - // Batch 2 - 4.0f, 5.0f, 6.0f, + // Batch 2 + 4.0f, 5.0f, 6.0f, - // Batch 3 - 13.0f, 14.0f, 15.0f, + // Batch 3 + 13.0f, 14.0f, 15.0f, - // Batch 4 - 7.0f, 8.0f, 9.0f, + // Batch 4 + 7.0f, 8.0f, 9.0f, - // Batch 5 - 16.0f, 17.0f, 18.0f, - })); + // Batch 5 + 16.0f, 17.0f, 18.0f, + }, + qScale, qOffset)); return result; } -template> +template> LayerTestResult Concat2dDim1DiffInputDimsTestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(input0TensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0 - 1.0f, 2.0f, 3.0f, + TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset); + auto input0 = MakeTensor(input0TensorInfo, QuantizedVector( + { + // Batch 0 + 1.0f, 2.0f, 3.0f, - // Batch 1 - 10.0f, 11.0f, 12.0f, - })); + // Batch 1 + 10.0f, 11.0f, 12.0f, + }, + qScale, qOffset)); - armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset); - auto input1 = MakeTensor(input1TensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0 - 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, + TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset); + auto input1 = MakeTensor(input1TensorInfo, QuantizedVector( + { + // Batch 0 + 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, - // Batch 1 - 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, - })); + // Batch 1 + 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, + }, + qScale, qOffset)); - armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset); - auto input2 = MakeTensor(input2TensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0 - 9.0f, + TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset); + auto input2 = MakeTensor(input2TensorInfo, QuantizedVector( + { + // Batch 0 + 9.0f, - // Batch 1 - 18.0f - })); + // Batch 1 + 18.0f + }, + qScale, qOffset)); - armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset); + TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset); LayerTestResult result(outputTensorInfo); std::vector output; @@ -666,88 +696,96 @@ LayerTestResult Concat2dDim1DiffInputDimsTestImpl( true); result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0 - 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + // Batch 0 + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, - // Batch 1 - 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, - })); + // Batch 1 + 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, + }, + qScale, qOffset)); return result; } -template> +template> LayerTestResult Concat3dTestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::TensorInfo& outputTensorInfo, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const TensorInfo& outputTensorInfo, unsigned int dimension, bool useSubtensor, float qScale, int32_t qOffset) { - armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset); + TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0, Channel 0 - 1.0f, 2.0f, + auto input0 = MakeTensor(inputTensorInfo, QuantizedVector( + { + // Batch 0, Channel 0 + 1.0f, 2.0f, - // Batch 0, Channel 1 - 3.0f, 4.0f, + // Batch 0, Channel 1 + 3.0f, 4.0f, - // Batch 0, Channel 2 - 5.0f, 6.0f, + // Batch 0, Channel 2 + 5.0f, 6.0f, - // Batch 1, Channel 0 - 19.0f, 20.0f, + // Batch 1, Channel 0 + 19.0f, 20.0f, - // Batch 1, Channel 1 - 21.0f, 22.0f, + // Batch 1, Channel 1 + 21.0f, 22.0f, - // Batch 1, Channel 2 - 23.0f, 24.0f - })); + // Batch 1, Channel 2 + 23.0f, 24.0f + }, + qScale, qOffset)); - auto input1 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0, Channel 0 - 7.0f, 8.0f, + auto input1 = MakeTensor(inputTensorInfo, QuantizedVector( + { + // Batch 0, Channel 0 + 7.0f, 8.0f, - // Batch 0, Channel 1 - 9.0f, 10.0f, + // Batch 0, Channel 1 + 9.0f, 10.0f, - // Batch 0, Channel 2 - 11.0f, 12.0f, + // Batch 0, Channel 2 + 11.0f, 12.0f, - // Batch 1, Channel 0 - 25.0f, 26.0f, + // Batch 1, Channel 0 + 25.0f, 26.0f, - // Batch 1, Channel 1 - 27.0f, 28.0f, + // Batch 1, Channel 1 + 27.0f, 28.0f, - // Batch 1, Channel 2 - 29.0f, 30.0f - })); + // Batch 1, Channel 2 + 29.0f, 30.0f + }, + qScale, qOffset)); - auto input2 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0, Channel 0 - 13.0f, 14.0f, + auto input2 = MakeTensor(inputTensorInfo, QuantizedVector( + { + // Batch 0, Channel 0 + 13.0f, 14.0f, - // Batch 0, Channel 1 - 15.0f, 16.0f, + // Batch 0, Channel 1 + 15.0f, 16.0f, - // Batch 0, Channel 2 - 17.0f, 18.0f, + // Batch 0, Channel 2 + 17.0f, 18.0f, - // Batch 1, Channel 0 - 31.0f, 32.0f, + // Batch 1, Channel 0 + 31.0f, 32.0f, - // Batch 1, Channel 1 - 33.0f, 34.0f, + // Batch 1, Channel 1 + 33.0f, 34.0f, - // Batch 1, Channel 2 - 35.0f, 36.0f - })); + // Batch 1, Channel 2 + 35.0f, 36.0f + }, + qScale, qOffset)); LayerTestResult result(outputTensorInfo); @@ -765,193 +803,200 @@ LayerTestResult Concat3dTestImpl( return result; } -template> +template> LayerTestResult Concat3dDim0TestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset); + TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset); LayerTestResult result = Concat3dTestImpl( workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0, Channel 0 - 1.0f, 2.0f, + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + // Batch 0, Channel 0 + 1.0f, 2.0f, - // Batch 0, Channel 1 - 3.0f, 4.0f, + // Batch 0, Channel 1 + 3.0f, 4.0f, - // Batch 0, Channel 2 - 5.0f, 6.0f, + // Batch 0, Channel 2 + 5.0f, 6.0f, - // Batch 1, Channel 0 - 19.0f, 20.0f, + // Batch 1, Channel 0 + 19.0f, 20.0f, - // Batch 1, Channel 1 - 21.0f, 22.0f, + // Batch 1, Channel 1 + 21.0f, 22.0f, - // Batch 1, Channel 2 - 23.0f, 24.0f, + // Batch 1, Channel 2 + 23.0f, 24.0f, - // Batch 2, Channel 0 - 7.0f, 8.0f, + // Batch 2, Channel 0 + 7.0f, 8.0f, - // Batch 2, Channel 1 - 9.0f, 10.0f, + // Batch 2, Channel 1 + 9.0f, 10.0f, - // Batch 2, Channel 2 - 11.0f, 12.0f, + // Batch 2, Channel 2 + 11.0f, 12.0f, - // Batch 3, Channel 0 - 25.0f, 26.0f, + // Batch 3, Channel 0 + 25.0f, 26.0f, - // Batch 3, Channel 1 - 27.0f, 28.0f, + // Batch 3, Channel 1 + 27.0f, 28.0f, - // Batch 3, Channel 2 - 29.0f, 30.0f, + // Batch 3, Channel 2 + 29.0f, 30.0f, - // Batch 4, Channel 0 - 13.0f, 14.0f, + // Batch 4, Channel 0 + 13.0f, 14.0f, - // Batch 4, Channel 1 - 15.0f, 16.0f, + // Batch 4, Channel 1 + 15.0f, 16.0f, - // Batch 4, Channel 2 - 17.0f, 18.0f, + // Batch 4, Channel 2 + 17.0f, 18.0f, - // Batch 5, Channel 0 - 31.0f, 32.0f, + // Batch 5, Channel 0 + 31.0f, 32.0f, - // Batch 5, Channel 1 - 33.0f, 34.0f, + // Batch 5, Channel 1 + 33.0f, 34.0f, - // Batch 5, Channel 2 - 35.0f, 36.0f - })); + // Batch 5, Channel 2 + 35.0f, 36.0f + }, + qScale, qOffset)); return result; } -template> +template> LayerTestResult Concat3dDim1TestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset); + TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset); LayerTestResult result = Concat3dTestImpl( workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0, Channel 0 - 1.0f, 2.0f, + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + // Batch 0, Channel 0 + 1.0f, 2.0f, - // Batch 0, Channel 1 - 3.0f, 4.0f, + // Batch 0, Channel 1 + 3.0f, 4.0f, - // Batch 0, Channel 2 - 5.0f, 6.0f, + // Batch 0, Channel 2 + 5.0f, 6.0f, - // Batch 0, Channel 3 - 7.0f, 8.0f, + // Batch 0, Channel 3 + 7.0f, 8.0f, - // Batch 0, Channel 4 - 9.0f, 10.0f, + // Batch 0, Channel 4 + 9.0f, 10.0f, - // Batch 0, Channel 5 - 11.0f, 12.0f, + // Batch 0, Channel 5 + 11.0f, 12.0f, - // Batch 0, Channel 6 - 13.0f, 14.0f, + // Batch 0, Channel 6 + 13.0f, 14.0f, - // Batch 0, Channel 7 - 15.0f, 16.0f, + // Batch 0, Channel 7 + 15.0f, 16.0f, - // Batch 0, Channel 8 - 17.0f, 18.0f, + // Batch 0, Channel 8 + 17.0f, 18.0f, - // Batch 1, Channel 0 - 19.0f, 20.0f, + // Batch 1, Channel 0 + 19.0f, 20.0f, - // Batch 1, Channel 1 - 21.0f, 22.0f, + // Batch 1, Channel 1 + 21.0f, 22.0f, - // Batch 1, Channel 2 - 23.0f, 24.0f, + // Batch 1, Channel 2 + 23.0f, 24.0f, - // Batch 1, Channel 3 - 25.0f, 26.0f, + // Batch 1, Channel 3 + 25.0f, 26.0f, - // Batch 1, Channel 4 - 27.0f, 28.0f, + // Batch 1, Channel 4 + 27.0f, 28.0f, - // Batch 1, Channel 5 - 29.0f, 30.0f, + // Batch 1, Channel 5 + 29.0f, 30.0f, - // Batch 1, Channel 6 - 31.0f, 32.0f, + // Batch 1, Channel 6 + 31.0f, 32.0f, - // Batch 1, Channel 7 - 33.0f, 34.0f, + // Batch 1, Channel 7 + 33.0f, 34.0f, - // Batch 1, Channel 8 - 35.0f, 36.0f - })); + // Batch 1, Channel 8 + 35.0f, 36.0f + }, + qScale, qOffset)); return result; } -template> +template> LayerTestResult Concat3dDim2TestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor, float qScale, int32_t qOffset) { - armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset); + TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset); LayerTestResult result = Concat3dTestImpl( workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0, Channel 0 - 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f, + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + // Batch 0, Channel 0 + 1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f, - // Batch 0, Channel 1 - 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f, + // Batch 0, Channel 1 + 3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f, - // Batch 0, Channel 2 - 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f, + // Batch 0, Channel 2 + 5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f, - // Batch 1, Channel 0 - 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f, + // Batch 1, Channel 0 + 19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f, - // Batch 1, Channel 1 - 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f, + // Batch 1, Channel 1 + 21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f, - // Batch 1, Channel 2 - 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f, - })); + // Batch 1, Channel 2 + 23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f, + }, + qScale, qOffset)); return result; } -template> +template> LayerTestResult Concat3dDim0DiffInputDimsTestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType); - auto input0 = MakeTensor(input0TensorInfo, QuantizedVector(qScale, qOffset, { + TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType); + auto input0 = MakeTensor(input0TensorInfo, QuantizedVector( + { // Batch 0, Channel 0 1.0f, 2.0f, @@ -969,10 +1014,12 @@ LayerTestResult Concat3dDim0DiffInputDimsTestImpl( // Batch 1, Channel 2 23.0f, 24.0f - })); + }, + qScale, qOffset)); - armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType); - auto input1 = MakeTensor(input1TensorInfo, QuantizedVector(qScale, qOffset, { + TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType); + auto input1 = MakeTensor(input1TensorInfo, QuantizedVector( + { // Batch 0, Channel 0 7.0f, 8.0f, @@ -981,10 +1028,12 @@ LayerTestResult Concat3dDim0DiffInputDimsTestImpl( // Batch 0, Channel 2 11.0f, 12.0f, - })); + }, + qScale, qOffset)); - armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType); - auto input2 = MakeTensor(input2TensorInfo, QuantizedVector(qScale, qOffset, { + TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType); + auto input2 = MakeTensor(input2TensorInfo, QuantizedVector( + { // Batch 0, Channel 0 25.0f, 26.0f, @@ -1011,9 +1060,10 @@ LayerTestResult Concat3dDim0DiffInputDimsTestImpl( // Batch 2, Channel 2 35.0f, 36.0f - })); + }, + qScale, qOffset)); - armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType); + TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType); LayerTestResult result(outputTensorInfo); std::vector output; @@ -1027,130 +1077,138 @@ LayerTestResult Concat3dDim0DiffInputDimsTestImpl( true); result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0, Channel 0 - 1.0f, 2.0f, + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + // Batch 0, Channel 0 + 1.0f, 2.0f, - // Batch 0, Channel 1 - 3.0f, 4.0f, + // Batch 0, Channel 1 + 3.0f, 4.0f, - // Batch 0, Channel 2 - 5.0f, 6.0f, + // Batch 0, Channel 2 + 5.0f, 6.0f, - // Batch 1, Channel 0 - 19.0f, 20.0f, + // Batch 1, Channel 0 + 19.0f, 20.0f, - // Batch 1, Channel 1 - 21.0f, 22.0f, + // Batch 1, Channel 1 + 21.0f, 22.0f, - // Batch 1, Channel 2 - 23.0f, 24.0f, + // Batch 1, Channel 2 + 23.0f, 24.0f, - // Batch 2, Channel 0 - 7.0f, 8.0f, + // Batch 2, Channel 0 + 7.0f, 8.0f, - // Batch 2, Channel 1 - 9.0f, 10.0f, + // Batch 2, Channel 1 + 9.0f, 10.0f, - // Batch 2, Channel 2 - 11.0f, 12.0f, + // Batch 2, Channel 2 + 11.0f, 12.0f, - // Batch 3, Channel 0 - 25.0f, 26.0f, + // Batch 3, Channel 0 + 25.0f, 26.0f, - // Batch 3, Channel 1 - 27.0f, 28.0f, + // Batch 3, Channel 1 + 27.0f, 28.0f, - // Batch 3, Channel 2 - 29.0f, 30.0f, + // Batch 3, Channel 2 + 29.0f, 30.0f, - // Batch 4, Channel 0 - 13.0f, 14.0f, + // Batch 4, Channel 0 + 13.0f, 14.0f, - // Batch 4, Channel 1 - 15.0f, 16.0f, + // Batch 4, Channel 1 + 15.0f, 16.0f, - // Batch 4, Channel 2 - 17.0f, 18.0f, + // Batch 4, Channel 2 + 17.0f, 18.0f, - // Batch 5, Channel 0 - 31.0f, 32.0f, + // Batch 5, Channel 0 + 31.0f, 32.0f, - // Batch 5, Channel 1 - 33.0f, 34.0f, + // Batch 5, Channel 1 + 33.0f, 34.0f, - // Batch 5, Channel 2 - 35.0f, 36.0f - })); + // Batch 5, Channel 2 + 35.0f, 36.0f + }, + qScale, qOffset)); return result; } -template> +template> LayerTestResult Concat3dDim1DiffInputDimsTestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(input0TensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0, Channel 0 - 1.0f, 2.0f, + TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset); + auto input0 = MakeTensor(input0TensorInfo, QuantizedVector( + { + // Batch 0, Channel 0 + 1.0f, 2.0f, - // Batch 0, Channel 1 - 3.0f, 4.0f, + // Batch 0, Channel 1 + 3.0f, 4.0f, - // Batch 0, Channel 2 - 5.0f, 6.0f, + // Batch 0, Channel 2 + 5.0f, 6.0f, - // Batch 1, Channel 0 - 19.0f, 20.0f, + // Batch 1, Channel 0 + 19.0f, 20.0f, - // Batch 1, Channel 1 - 21.0f, 22.0f, + // Batch 1, Channel 1 + 21.0f, 22.0f, - // Batch 1, Channel 2 - 23.0f, 24.0f - })); + // Batch 1, Channel 2 + 23.0f, 24.0f + }, + qScale, qOffset)); - armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset); - auto input1 = MakeTensor(input1TensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0, Channel 0 - 7.0f, 8.0f, + TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset); + auto input1 = MakeTensor(input1TensorInfo, QuantizedVector( + { + // Batch 0, Channel 0 + 7.0f, 8.0f, - // Batch 0, Channel 1 - 9.0f, 10.0f, + // Batch 0, Channel 1 + 9.0f, 10.0f, - // Batch 0, Channel 2 - 11.0f, 12.0f, + // Batch 0, Channel 2 + 11.0f, 12.0f, - // Batch 0, Channel 3 - 25.0f, 26.0f, + // Batch 0, Channel 3 + 25.0f, 26.0f, - // Batch 1, Channel 0 - 27.0f, 28.0f, + // Batch 1, Channel 0 + 27.0f, 28.0f, - // Batch 1, Channel 1 - 29.0f, 30.0f, + // Batch 1, Channel 1 + 29.0f, 30.0f, - // Batch 1, Channel 2 - 13.0f, 14.0f, + // Batch 1, Channel 2 + 13.0f, 14.0f, - // Batch 1, Channel 3 - 15.0f, 16.0f, - })); + // Batch 1, Channel 3 + 15.0f, 16.0f, + }, + qScale, qOffset)); - armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset); - auto input2 = MakeTensor(input2TensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0, Channel 0 - 17.0f, 18.0f, + TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset); + auto input2 = MakeTensor(input2TensorInfo, QuantizedVector( + { + // Batch 0, Channel 0 + 17.0f, 18.0f, - // Batch 1, Channel 0 - 31.0f, 32.0f, - })); + // Batch 1, Channel 0 + 31.0f, 32.0f, + }, + qScale, qOffset)); - armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset); + TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset); LayerTestResult result(outputTensorInfo); std::vector output; @@ -1164,131 +1222,139 @@ LayerTestResult Concat3dDim1DiffInputDimsTestImpl( true); result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0, Channel 0 - 1.0f, 2.0f, + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + // Batch 0, Channel 0 + 1.0f, 2.0f, - // Batch 0, Channel 1 - 3.0f, 4.0f, + // Batch 0, Channel 1 + 3.0f, 4.0f, - // Batch 0, Channel 2 - 5.0f, 6.0f, + // Batch 0, Channel 2 + 5.0f, 6.0f, - // Batch 0, Channel 3 - 7.0f, 8.0f, + // Batch 0, Channel 3 + 7.0f, 8.0f, - // Batch 0, Channel 4 - 9.0f, 10.0f, + // Batch 0, Channel 4 + 9.0f, 10.0f, - // Batch 0, Channel 5 - 11.0f, 12.0f, + // Batch 0, Channel 5 + 11.0f, 12.0f, - // Batch 0, Channel 6 - 25.0f, 26.0f, + // Batch 0, Channel 6 + 25.0f, 26.0f, - // Batch 0, Channel 7 - 17.0f, 18.0f, + // Batch 0, Channel 7 + 17.0f, 18.0f, - // Batch 1, Channel 0 - 19.0f, 20.0f, + // Batch 1, Channel 0 + 19.0f, 20.0f, - // Batch 1, Channel 1 - 21.0f, 22.0f, + // Batch 1, Channel 1 + 21.0f, 22.0f, - // Batch 1, Channel 2 - 23.0f, 24.0f, + // Batch 1, Channel 2 + 23.0f, 24.0f, - // Batch 1, Channel 3 - 27.0f, 28.0f, + // Batch 1, Channel 3 + 27.0f, 28.0f, - // Batch 1, Channel 4 - 29.0f, 30.0f, + // Batch 1, Channel 4 + 29.0f, 30.0f, - // Batch 1, Channel 5 - 13.0f, 14.0f, + // Batch 1, Channel 5 + 13.0f, 14.0f, - // Batch 1, Channel 6 - 15.0f, 16.0f, + // Batch 1, Channel 6 + 15.0f, 16.0f, - // Batch 1, Channel 7 - 31.0f, 32.0f, - })); + // Batch 1, Channel 7 + 31.0f, 32.0f, + }, + qScale, qOffset)); return result; } -template> +template> LayerTestResult Concat3dDim2DiffInputDimsTestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor, float qScale, int32_t qOffset) { - armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(input0TensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0, Channel 0 - 1.0f, 2.0f, + TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset); + auto input0 = MakeTensor(input0TensorInfo, QuantizedVector( + { + // Batch 0, Channel 0 + 1.0f, 2.0f, - // Batch 0, Channel 1 - 3.0f, 4.0f, + // Batch 0, Channel 1 + 3.0f, 4.0f, - // Batch 0, Channel 2 - 5.0f, 6.0f, + // Batch 0, Channel 2 + 5.0f, 6.0f, - // Batch 1, Channel 0 - 19.0f, 20.0f, + // Batch 1, Channel 0 + 19.0f, 20.0f, - // Batch 1, Channel 1 - 21.0f, 22.0f, + // Batch 1, Channel 1 + 21.0f, 22.0f, - // Batch 1, Channel 2 - 23.0f, 24.0f - })); + // Batch 1, Channel 2 + 23.0f, 24.0f + }, + qScale, qOffset)); - armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset); - auto input1 = MakeTensor(input1TensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0, Channel 0 - 7.0f, + TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset); + auto input1 = MakeTensor(input1TensorInfo, QuantizedVector( + { + // Batch 0, Channel 0 + 7.0f, - // Batch 0, Channel 1 - 9.0f, + // Batch 0, Channel 1 + 9.0f, - // Batch 0, Channel 2 - 11.0f, + // Batch 0, Channel 2 + 11.0f, - // Batch 1, Channel 0 - 25.0f, + // Batch 1, Channel 0 + 25.0f, - // Batch 1, Channel 1 - 27.0f, + // Batch 1, Channel 1 + 27.0f, - // Batch 1, Channel 2 - 29.0f - })); + // Batch 1, Channel 2 + 29.0f + }, + qScale, qOffset)); - armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset); - auto input2 = MakeTensor(input2TensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0, Channel 0 - 13.0f, 14.0f, 50.0f, + TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset); + auto input2 = MakeTensor(input2TensorInfo, QuantizedVector( + { + // Batch 0, Channel 0 + 13.0f, 14.0f, 50.0f, - // Batch 0, Channel 1 - 15.0f, 16.0f, 51.0f, + // Batch 0, Channel 1 + 15.0f, 16.0f, 51.0f, - // Batch 0, Channel 2 - 17.0f, 18.0f, 52.0f, + // Batch 0, Channel 2 + 17.0f, 18.0f, 52.0f, - // Batch 1, Channel 0 - 31.0f, 32.0f, 53.0f, + // Batch 1, Channel 0 + 31.0f, 32.0f, 53.0f, - // Batch 1, Channel 1 - 33.0f, 34.0f, 54.0f, + // Batch 1, Channel 1 + 33.0f, 34.0f, 54.0f, - // Batch 1, Channel 2 - 35.0f, 36.0f, 55.0f, - })); + // Batch 1, Channel 2 + 35.0f, 36.0f, 55.0f, + }, + qScale, qOffset)); - armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset); + TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset); LayerTestResult result(outputTensorInfo); std::vector output; @@ -1302,67 +1368,75 @@ LayerTestResult Concat3dDim2DiffInputDimsTestImpl( useSubtensor); result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - // Batch 0, Channel 0 - 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f, + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + // Batch 0, Channel 0 + 1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f, - // Batch 0, Channel 1 - 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f, + // Batch 0, Channel 1 + 3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f, - // Batch 0, Channel 2 - 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f, + // Batch 0, Channel 2 + 5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f, - // Batch 1, Channel 0 - 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f, + // Batch 1, Channel 0 + 19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f, - // Batch 1, Channel 1 - 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f, + // Batch 1, Channel 1 + 21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f, - // Batch 1, Channel 2 - 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f, - })); + // Batch 1, Channel 2 + 23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f, + }, + qScale, qOffset)); return result; } -template> +template> LayerTestResult Concat4dTestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, - const armnn::TensorInfo& outputTensorInfo, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const TensorInfo& outputTensorInfo, unsigned int dimension, bool useSubtensor, float qScale, int32_t qOffset) { - armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset); + TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset); - auto input0 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { - 1.0f, 2.0f, - 3.0f, 4.0f, - 5.0f, 6.0f, - 7.0f, 8.0f, - 9.0f, 10.0f, - 11.0f, 12.0f - })); - - auto input1 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { - 11.0f, 12.0f, - 13.0f, 14.0f, - 15.0f, 16.0f, - 17.0f, 18.0f, - 19.0f, 20.0f, - 21.0f, 22.0f - })); + auto input0 = MakeTensor(inputTensorInfo, QuantizedVector( + { + 1.0f, 2.0f, + 3.0f, 4.0f, + 5.0f, 6.0f, + 7.0f, 8.0f, + 9.0f, 10.0f, + 11.0f, 12.0f + }, + qScale, qOffset)); + + auto input1 = MakeTensor(inputTensorInfo, QuantizedVector( + { + 11.0f, 12.0f, + 13.0f, 14.0f, + 15.0f, 16.0f, + 17.0f, 18.0f, + 19.0f, 20.0f, + 21.0f, 22.0f + }, + qScale, qOffset)); - auto input2 = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, { - 21.0f, 22.0f, - 23.0f, 24.0f, - 25.0f, 26.0f, - 27.0f, 28.0f, - 29.0f, 30.0f, - 31.0f, 32.0f - })); + auto input2 = MakeTensor(inputTensorInfo, QuantizedVector( + { + 21.0f, 22.0f, + 23.0f, 24.0f, + 25.0f, 26.0f, + 27.0f, 28.0f, + 29.0f, 30.0f, + 31.0f, 32.0f + }, + qScale, qOffset)); LayerTestResult result(outputTensorInfo); @@ -1382,197 +1456,209 @@ LayerTestResult Concat4dTestImpl( return result; } -template> +template> LayerTestResult Concat4dDim0TestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset); + TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset); LayerTestResult result = Concat4dTestImpl( workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - 1.0f, 2.0f, - 3.0f, 4.0f, - 5.0f, 6.0f, - 7.0f, 8.0f, - 9.0f, 10.0f, - 11.0f, 12.0f, - - 11.0f, 12.0f, - 13.0f, 14.0f, - 15.0f, 16.0f, - 17.0f, 18.0f, - 19.0f, 20.0f, - 21.0f, 22.0f, - - 21.0f, 22.0f, - 23.0f, 24.0f, - 25.0f, 26.0f, - 27.0f, 28.0f, - 29.0f, 30.0f, - 31.0f, 32.0f - })); + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + 1.0f, 2.0f, + 3.0f, 4.0f, + 5.0f, 6.0f, + 7.0f, 8.0f, + 9.0f, 10.0f, + 11.0f, 12.0f, + + 11.0f, 12.0f, + 13.0f, 14.0f, + 15.0f, 16.0f, + 17.0f, 18.0f, + 19.0f, 20.0f, + 21.0f, 22.0f, + + 21.0f, 22.0f, + 23.0f, 24.0f, + 25.0f, 26.0f, + 27.0f, 28.0f, + 29.0f, 30.0f, + 31.0f, 32.0f + }, + qScale, qOffset)); + return result; } -template> +template> LayerTestResult Concat4dDim1TestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset); + TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset); LayerTestResult result = Concat4dTestImpl( workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - 1.0f, 2.0f, - 3.0f, 4.0f, - 5.0f, 6.0f, - 7.0f, 8.0f, - 9.0f, 10.0f, - 11.0f, 12.0f, - - 11.0f, 12.0f, - 13.0f, 14.0f, - 15.0f, 16.0f, - 17.0f, 18.0f, - 19.0f, 20.0f, - 21.0f, 22.0f, - - 21.0f, 22.0f, - 23.0f, 24.0f, - 25.0f, 26.0f, - 27.0f, 28.0f, - 29.0f, 30.0f, - 31.0f, 32.0f - })); + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + 1.0f, 2.0f, + 3.0f, 4.0f, + 5.0f, 6.0f, + 7.0f, 8.0f, + 9.0f, 10.0f, + 11.0f, 12.0f, + + 11.0f, 12.0f, + 13.0f, 14.0f, + 15.0f, 16.0f, + 17.0f, 18.0f, + 19.0f, 20.0f, + 21.0f, 22.0f, + + 21.0f, 22.0f, + 23.0f, 24.0f, + 25.0f, 26.0f, + 27.0f, 28.0f, + 29.0f, 30.0f, + 31.0f, 32.0f + }, + qScale, qOffset)); return result; } -template> +template> LayerTestResult Concat4dDim2TestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset); + TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset); LayerTestResult result = Concat4dTestImpl( workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - 1.0f, 2.0f, - 3.0f, 4.0f, - 11.0f, 12.0f, - 13.0f, 14.0f, - 21.0f, 22.0f, - 23.0f, 24.0f, - - 5.0f, 6.0f, - 7.0f, 8.0f, - 15.0f, 16.0f, - 17.0f, 18.0f, - 25.0f, 26.0f, - 27.0f, 28.0f, - - 9.0f, 10.0f, - 11.0f, 12.0f, - 19.0f, 20.0f, - 21.0f, 22.0f, - 29.0f, 30.0f, - 31.0f, 32.0f - })); + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + 1.0f, 2.0f, + 3.0f, 4.0f, + 11.0f, 12.0f, + 13.0f, 14.0f, + 21.0f, 22.0f, + 23.0f, 24.0f, + + 5.0f, 6.0f, + 7.0f, 8.0f, + 15.0f, 16.0f, + 17.0f, 18.0f, + 25.0f, 26.0f, + 27.0f, 28.0f, + + 9.0f, 10.0f, + 11.0f, 12.0f, + 19.0f, 20.0f, + 21.0f, 22.0f, + 29.0f, 30.0f, + 31.0f, 32.0f + }, + qScale, qOffset)); return result; } -template> +template> LayerTestResult Concat4dDim3TestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset, bool useSubtensor) { - armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset); + TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset); LayerTestResult result = Concat4dTestImpl( workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - 1.0f, 2.0f, - 11.0f, 12.0f, - 21.0f, 22.0f, - 3.0f, 4.0f, - 13.0f, 14.0f, - 23.0f, 24.0f, - - 5.0f, 6.0f, - 15.0f, 16.0f, - 25.0f, 26.0f, - 7.0f, 8.0f, - 17.0f, 18.0f, - 27.0f, 28.0f, - - 9.0f, 10.0f, - 19.0f, 20.0f, - 29.0f, 30.0f, - 11.0f, 12.0f, - 21.0f, 22.0f, - 31.0f, 32.0f - })); + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + 1.0f, 2.0f, + 11.0f, 12.0f, + 21.0f, 22.0f, + 3.0f, 4.0f, + 13.0f, 14.0f, + 23.0f, 24.0f, + + 5.0f, 6.0f, + 15.0f, 16.0f, + 25.0f, 26.0f, + 7.0f, 8.0f, + 17.0f, 18.0f, + 27.0f, 28.0f, + + 9.0f, 10.0f, + 19.0f, 20.0f, + 29.0f, 30.0f, + 11.0f, 12.0f, + 21.0f, 22.0f, + 31.0f, 32.0f + }, + qScale, qOffset)); return result; } -template> +template> LayerTestResult Concat4dDiffShapeDim0TestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - unsigned int dimension = 0; - armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset); + constexpr unsigned int dimension = 0u; - auto input0 = MakeTensor(inputTensorInfo0, QuantizedVector(qScale, qOffset, { - 1.0f, 2.0f, - 3.0f, 4.0f, - 5.0f, 6.0f, - 7.0f, 8.0f, - 9.0f, 10.0f, - 11.0f, 12.0f - })); - - armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset); - - auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector(qScale, qOffset, { - 11.0f, 12.0f, - 13.0f, 14.0f, - 15.0f, 16.0f, - 17.0f, 18.0f, - 19.0f, 20.0f, - 21.0f, 22.0f, - - 21.0f, 22.0f, - 23.0f, 24.0f, - 25.0f, 26.0f, - 27.0f, 28.0f, - 29.0f, 30.0f, - 31.0f, 32.0f + TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset); + auto input0 = MakeTensor(inputTensorInfo0, QuantizedVector( + { + 1.0f, 2.0f, + 3.0f, 4.0f, + 5.0f, 6.0f, + 7.0f, 8.0f, + 9.0f, 10.0f, + 11.0f, 12.0f + }, + qScale, qOffset)); + + TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset); + + auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector( + { + 11.0f, 12.0f, + 13.0f, 14.0f, + 15.0f, 16.0f, + 17.0f, 18.0f, + 19.0f, 20.0f, + 21.0f, 22.0f, - })); + 21.0f, 22.0f, + 23.0f, 24.0f, + 25.0f, 26.0f, + 27.0f, 28.0f, + 29.0f, 30.0f, + 31.0f, 32.0f + }, + qScale, qOffset)); - armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset); + TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset); LayerTestResult result(outputTensorInfo); @@ -1588,62 +1674,67 @@ LayerTestResult Concat4dDiffShapeDim0TestImpl( true); result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - 1.0f, 2.0f, - 3.0f, 4.0f, - 5.0f, 6.0f, - 7.0f, 8.0f, - 9.0f, 10.0f, - 11.0f, 12.0f, - - 11.0f, 12.0f, - 13.0f, 14.0f, - 15.0f, 16.0f, - 17.0f, 18.0f, - 19.0f, 20.0f, - 21.0f, 22.0f, - - 21.0f, 22.0f, - 23.0f, 24.0f, - 25.0f, 26.0f, - 27.0f, 28.0f, - 29.0f, 30.0f, - 31.0f, 32.0f - })); + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + 1.0f, 2.0f, + 3.0f, 4.0f, + 5.0f, 6.0f, + 7.0f, 8.0f, + 9.0f, 10.0f, + 11.0f, 12.0f, + + 11.0f, 12.0f, + 13.0f, 14.0f, + 15.0f, 16.0f, + 17.0f, 18.0f, + 19.0f, 20.0f, + 21.0f, 22.0f, + + 21.0f, 22.0f, + 23.0f, 24.0f, + 25.0f, 26.0f, + 27.0f, 28.0f, + 29.0f, 30.0f, + 31.0f, 32.0f + }, + qScale, qOffset)); return result; } -template> +template> LayerTestResult Concat4dDiffShapeDim1TestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - unsigned int dimension = 1; - armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset); - - auto input0 = MakeTensor(inputTensorInfo0, QuantizedVector(qScale, qOffset, { - 1.0f, 2.0f, - 3.0f, 4.0f, - 5.0f, 6.0f, - 7.0f, 8.0f, - 9.0f, 10.0f, - 11.0f, 12.0f - })); - - armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset); + constexpr unsigned int dimension = 1u; - auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector(qScale, qOffset, { - 11.0f, 12.0f, - 13.0f, 14.0f, - 15.0f, 16.0f, - 17.0f, 18.0f, - - })); + TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset); + auto input0 = MakeTensor(inputTensorInfo0, QuantizedVector( + { + 1.0f, 2.0f, + 3.0f, 4.0f, + 5.0f, 6.0f, + 7.0f, 8.0f, + 9.0f, 10.0f, + 11.0f, 12.0f + }, + qScale, qOffset)); + + TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset); + + auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector( + { + 11.0f, 12.0f, + 13.0f, 14.0f, + 15.0f, 16.0f, + 17.0f, 18.0f, + }, + qScale, qOffset)); - armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset); + TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset); LayerTestResult result(outputTensorInfo); @@ -1659,57 +1750,61 @@ LayerTestResult Concat4dDiffShapeDim1TestImpl( true); result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - 1.0f, 2.0f, - 3.0f, 4.0f, - 5.0f, 6.0f, - 7.0f, 8.0f, - 9.0f, 10.0f, - 11.0f, 12.0f, - 11.0f, 12.0f, - 13.0f, 14.0f, - 15.0f, 16.0f, - 17.0f, 18.0f - })); + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + 1.0f, 2.0f, + 3.0f, 4.0f, + 5.0f, 6.0f, + 7.0f, 8.0f, + 9.0f, 10.0f, + 11.0f, 12.0f, + 11.0f, 12.0f, + 13.0f, 14.0f, + 15.0f, 16.0f, + 17.0f, 18.0f + }, + qScale, qOffset)); return result; } -template> +template> LayerTestResult Concat4dDiffShapeDim2TestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset) { - unsigned int dimension = 2; - armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset); + constexpr unsigned int dimension = 2u; - auto input0 = MakeTensor(inputTensorInfo0, QuantizedVector(qScale, qOffset, { - 1.0f, 2.0f, - 3.0f, 4.0f, - 5.0f, 6.0f, - 7.0f, 8.0f, - 9.0f, 10.0f, - 11.0f, 12.0f - })); - - armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset); - - auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector(qScale, qOffset, { - 11.0f, 12.0f, - 13.0f, 14.0f, - 15.0f, 16.0f, - 17.0f, 18.0f, - 19.0f, 20.0f, - 21.0f, 22.0f, - 23.0f, 24.0f, - 25.0f, 26.0f, - 27.0f, 28.0f - })); + TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset); + auto input0 = MakeTensor(inputTensorInfo0, QuantizedVector( + { + 1.0f, 2.0f, + 3.0f, 4.0f, + 5.0f, 6.0f, + 7.0f, 8.0f, + 9.0f, 10.0f, + 11.0f, 12.0f + }, + qScale, qOffset)); - armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset); + TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset); + auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector( + { + 11.0f, 12.0f, + 13.0f, 14.0f, + 15.0f, 16.0f, + 17.0f, 18.0f, + 19.0f, 20.0f, + 21.0f, 22.0f, + 23.0f, 24.0f, + 25.0f, 26.0f, + 27.0f, 28.0f + }, + qScale, qOffset)); + TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset); LayerTestResult result(outputTensorInfo); std::vector output; @@ -1723,64 +1818,69 @@ LayerTestResult Concat4dDiffShapeDim2TestImpl( dimension, true); - result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - 1.0f, 2.0f, - 3.0f, 4.0f, - 11.0f, 12.0f, - 13.0f, 14.0f, - 15.0f, 16.0f, - - 5.0f, 6.0f, - 7.0f, 8.0f, - 17.0f, 18.0f, - 19.0f, 20.0f, - 21.0f, 22.0f, - - 9.0f, 10.0f, - 11.0f, 12.0f, - 23.0f, 24.0f, - 25.0f, 26.0f, - 27.0f, 28.0f - })); + result.output = MakeTensor(outputTensorInfo, output); + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + 1.0f, 2.0f, + 3.0f, 4.0f, + 11.0f, 12.0f, + 13.0f, 14.0f, + 15.0f, 16.0f, + + 5.0f, 6.0f, + 7.0f, 8.0f, + 17.0f, 18.0f, + 19.0f, 20.0f, + 21.0f, 22.0f, + + 9.0f, 10.0f, + 11.0f, 12.0f, + 23.0f, 24.0f, + 25.0f, 26.0f, + 27.0f, 28.0f + }, + qScale, qOffset)); return result; } -template> +template> LayerTestResult Concat4dDiffShapeDim3TestImpl( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, int32_t qOffset, bool useSubtensor) { - unsigned int dimension = 3; - armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset); - - auto input0 = MakeTensor(inputTensorInfo0, QuantizedVector(qScale, qOffset, { - 1.0f, 2.0f, - 3.0f, 4.0f, - 5.0f, 6.0f, - 7.0f, 8.0f, - 9.0f, 10.0f, - 11.0f, 12.0f - })); + constexpr unsigned int dimension = 3u; - armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset); - - auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector(qScale, qOffset, { - 11.0f, 12.0f, 13.0f, - 14.0f, 15.0f, 16.0f, + TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset); + auto input0 = MakeTensor(inputTensorInfo0, QuantizedVector( + { + 1.0f, 2.0f, + 3.0f, 4.0f, + 5.0f, 6.0f, + 7.0f, 8.0f, + 9.0f, 10.0f, + 11.0f, 12.0f + }, + qScale, qOffset)); + + TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset); + auto input1 = MakeTensor(inputTensorInfo1, QuantizedVector( + { + 11.0f, 12.0f, 13.0f, + 14.0f, 15.0f, 16.0f, - 17.0f, 18.0f, 19.0f, - 20.0f, 21.0f, 22.0f, + 17.0f, 18.0f, 19.0f, + 20.0f, 21.0f, 22.0f, - 23.0f, 24.0f, 25.0f, - 26.0f, 27.0f, 28.0f - })); + 23.0f, 24.0f, 25.0f, + 26.0f, 27.0f, 28.0f + }, + qScale, qOffset)); - armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset); + TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset); LayerTestResult result(outputTensorInfo); @@ -1796,30 +1896,32 @@ LayerTestResult Concat4dDiffShapeDim3TestImpl( useSubtensor); result.output = MakeTensor(outputTensorInfo, output); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, { - 1.0f, 2.0f, 11.0f, 12.0f, 13.0f, - 3.0f, 4.0f, 14.0f, 15.0f, 16.0f, - 5.0f, 6.0f, 17.0f, 18.0f, 19.0f, - 7.0f, 8.0f, 20.0f, 21.0f, 22.0f, - 9.0f, 10.0f, 23.0f, 24.0f, 25.0f, - 11.0f, 12.0f, 26.0f, 27.0f, 28.0f - })); + result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( + { + 1.0f, 2.0f, 11.0f, 12.0f, 13.0f, + 3.0f, 4.0f, 14.0f, 15.0f, 16.0f, + 5.0f, 6.0f, 17.0f, 18.0f, 19.0f, + 7.0f, 8.0f, 20.0f, 21.0f, 22.0f, + 9.0f, 10.0f, 23.0f, 24.0f, 25.0f, + 11.0f, 12.0f, 26.0f, 27.0f, 28.0f + }, + qScale, qOffset)); return result; } -template +template LayerTestResult ConcatDifferentInputOutputQParamTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) { // Defines the tensor descriptors. - armnn::TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType); - armnn::TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType); - armnn::TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType); + TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType); + TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType); + TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType); - std::vector inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()}); + std::vector inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()}); // Quantized input1 tensor. const float inputScale1 = 0.5f; @@ -1894,31 +1996,31 @@ LayerTestResult ConcatDifferentInputOutputQParamTest( inputTensorInfo2.SetQuantizationOffset(inputOffset2); std::vector wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0]. - armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); + ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); std::vector wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1]. - armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2); + ConcatQueueDescriptor::ViewOrigin window2(wOrigin2); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors(); - std::unique_ptr inputHandle1 = + std::unique_ptr inputHandle1 = subTensorsSupported ? workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) : workloadFactory.CreateTensorHandle(inputTensorInfo1); - std::unique_ptr inputHandle2 = + std::unique_ptr inputHandle2 = subTensorsSupported ? workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) : workloadFactory.CreateTensorHandle(inputTensorInfo2); - armnn::ConcatQueueDescriptor data; - armnn::OriginsDescriptor desc = armnn::CreateDescriptorForConcatenation( + ConcatQueueDescriptor data; + OriginsDescriptor desc = CreateDescriptorForConcatenation( inputTensorShapes.begin(),inputTensorShapes.end(), 2); data.m_Parameters = desc; - armnn::WorkloadInfo info; + WorkloadInfo info; AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); @@ -1926,7 +2028,7 @@ LayerTestResult ConcatDifferentInputOutputQParamTest( data.m_ViewOrigins.push_back(window1); data.m_ViewOrigins.push_back(window2); - std::unique_ptr workload = workloadFactory.CreateConcat(data, info); + std::unique_ptr workload = workloadFactory.CreateConcat(data, info); inputHandle1->Allocate(); inputHandle2->Allocate(); @@ -1947,16 +2049,16 @@ LayerTestResult ConcatDifferentInputOutputQParamTest( // Explicit template specializations // -template LayerTestResult, 3> -ConcatDifferentInputOutputQParamTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, +template LayerTestResult, 3> +ConcatDifferentInputOutputQParamTest( + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor); -template LayerTestResult, 3> -ConcatDifferentInputOutputQParamTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, +template LayerTestResult, 3> +ConcatDifferentInputOutputQParamTest( + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor); // @@ -1964,8 +2066,8 @@ ConcatDifferentInputOutputQParamTest( // LayerTestResult ConcatTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { unsigned int outputWidth = 3; unsigned int outputHeight = 6; @@ -1980,9 +2082,9 @@ LayerTestResult ConcatTest( unsigned int inputChannels2 = 1; // Define the tensor descriptors. - armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32); - armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32); - armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32); + TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::Float32); + TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32); + TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32); LayerTestResult ret(outputTensorInfo); @@ -2041,27 +2143,27 @@ LayerTestResult ConcatTest( ); std::vector wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0]. - armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); + ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); std::vector wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1]. - armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2); + ConcatQueueDescriptor::ViewOrigin window2(wOrigin2); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); bool subTensorsSupported = workloadFactory.SupportsSubTensors(); - std::unique_ptr inputHandle1 = + std::unique_ptr inputHandle1 = subTensorsSupported ? workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) : workloadFactory.CreateTensorHandle(inputTensorInfo1); - std::unique_ptr inputHandle2 = + std::unique_ptr inputHandle2 = subTensorsSupported ? workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) : workloadFactory.CreateTensorHandle(inputTensorInfo2); - armnn::ConcatQueueDescriptor data; - armnn::WorkloadInfo info; + ConcatQueueDescriptor data; + WorkloadInfo info; AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); @@ -2069,7 +2171,7 @@ LayerTestResult ConcatTest( data.m_ViewOrigins.push_back(window1); data.m_ViewOrigins.push_back(window2); - std::unique_ptr workload = workloadFactory.CreateConcat(data, info); + std::unique_ptr workload = workloadFactory.CreateConcat(data, info); inputHandle1->Allocate(); inputHandle2->Allocate(); @@ -2087,156 +2189,156 @@ LayerTestResult ConcatTest( } LayerTestResult Concat1dTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat1dTestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concat1dTestImpl(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult Concat2dDim0Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat2dDim0TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concat2dDim0TestImpl(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult Concat2dDim1Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat2dDim1TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concat2dDim1TestImpl(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult Concat2dDim0DiffInputDimsTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat2dDim0DiffInputDimsTestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concat2dDim0DiffInputDimsTestImpl(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult Concat2dDim1DiffInputDimsTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat2dDim1DiffInputDimsTestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concat2dDim1DiffInputDimsTestImpl(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult Concat3dDim0Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat3dDim0TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concat3dDim0TestImpl(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult Concat3dDim1Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat3dDim1TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concat3dDim1TestImpl(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult Concat3dDim2Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) { - return Concat3dDim2TestImpl(workloadFactory, memoryManager, useSubtensor, 0.0f, 0); + return Concat3dDim2TestImpl(workloadFactory, memoryManager, useSubtensor, 0.0f, 0); } LayerTestResult Concat3dDim0DiffInputDimsTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat3dDim0DiffInputDimsTestImpl( + return Concat3dDim0DiffInputDimsTestImpl( workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult Concat3dDim1DiffInputDimsTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat3dDim1DiffInputDimsTestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concat3dDim1DiffInputDimsTestImpl(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult Concat3dDim2DiffInputDimsTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) { - return Concat3dDim2DiffInputDimsTestImpl( + return Concat3dDim2DiffInputDimsTestImpl( workloadFactory, memoryManager, useSubtensor, 0.0f, 0); } LayerTestResult Concat4dDim0Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat4dDim0TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concat4dDim0TestImpl(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult Concat4dDim1Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat4dDim1TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concat4dDim1TestImpl(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult Concat4dDim2Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat4dDim2TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concat4dDim2TestImpl(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult Concat4dDim3Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) { - return Concat4dDim3TestImpl(workloadFactory, memoryManager, 0.0f, 0, useSubtensor); + return Concat4dDim3TestImpl(workloadFactory, memoryManager, 0.0f, 0, useSubtensor); } LayerTestResult Concat4dDiffShapeDim0Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat4dDiffShapeDim0TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concat4dDiffShapeDim0TestImpl(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult Concat4dDiffShapeDim1Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat4dDiffShapeDim1TestImpl( + return Concat4dDiffShapeDim1TestImpl( workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult Concat4dDiffShapeDim2Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat4dDiffShapeDim2TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concat4dDiffShapeDim2TestImpl(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult Concat4dDiffShapeDim3Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) { - return Concat4dDiffShapeDim3TestImpl( + return Concat4dDiffShapeDim3TestImpl( workloadFactory, memoryManager, 0.0f, 0, useSubtensor); } -LayerTestResult ConcatFloat16Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +LayerTestResult ConcatFloat16Test( + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat3dDim1TestImpl(workloadFactory, memoryManager, 0.0f, 0); + return Concat3dDim1TestImpl(workloadFactory, memoryManager, 0.0f, 0); } LayerTestResult ConcatUint8DifferentQParamsTest( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { unsigned int outputWidth = 3; unsigned int outputHeight = 6; @@ -2251,9 +2353,9 @@ LayerTestResult ConcatUint8DifferentQParamsTest( unsigned int inputChannels2 = 1; // Defines the tensor descriptors. - armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8); - armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8); - armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8); + TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedAsymm8); + TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedAsymm8); + TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedAsymm8); // Quantized input1 tensor. Range [-3, 1] const float inputScale1 = 0.015686f; @@ -2332,27 +2434,27 @@ LayerTestResult ConcatUint8DifferentQParamsTest( inputTensorInfo2.SetQuantizationOffset(inputOffset2); std::vector wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0]. - armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); + ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); std::vector wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1]. - armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2); + ConcatQueueDescriptor::ViewOrigin window2(wOrigin2); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); bool subTensorsSupported = workloadFactory.SupportsSubTensors(); - std::unique_ptr inputHandle1 = + std::unique_ptr inputHandle1 = subTensorsSupported ? workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) : workloadFactory.CreateTensorHandle(inputTensorInfo1); - std::unique_ptr inputHandle2 = + std::unique_ptr inputHandle2 = subTensorsSupported ? workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) : workloadFactory.CreateTensorHandle(inputTensorInfo2); - armnn::ConcatQueueDescriptor data; - armnn::WorkloadInfo info; + ConcatQueueDescriptor data; + WorkloadInfo info; AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); @@ -2360,7 +2462,7 @@ LayerTestResult ConcatUint8DifferentQParamsTest( data.m_ViewOrigins.push_back(window1); data.m_ViewOrigins.push_back(window2); - std::unique_ptr workload = workloadFactory.CreateConcat(data, info); + std::unique_ptr workload = workloadFactory.CreateConcat(data, info); inputHandle1->Allocate(); inputHandle2->Allocate(); @@ -2378,8 +2480,8 @@ LayerTestResult ConcatUint8DifferentQParamsTest( } LayerTestResult ConcatUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { unsigned int outputWidth = 3; unsigned int outputHeight = 6; @@ -2394,9 +2496,9 @@ LayerTestResult ConcatUint8Test( unsigned int inputChannels2 = 1; // Defines the tensor descriptors. - armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8); - armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8); - armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8); + TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedAsymm8); + TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedAsymm8); + TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedAsymm8); // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them. const float scale = 0.13497836f; @@ -2466,29 +2568,29 @@ LayerTestResult ConcatUint8Test( ); std::vector wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0]. - armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); + ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); std::vector wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1]. - armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2); + ConcatQueueDescriptor::ViewOrigin window2(wOrigin2); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); bool subTensorsSupported = workloadFactory.SupportsSubTensors(); - std::unique_ptr inputHandle1 = + std::unique_ptr inputHandle1 = subTensorsSupported ? workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) : workloadFactory.CreateTensorHandle(inputTensorInfo1); - std::unique_ptr inputHandle2 = + std::unique_ptr inputHandle2 = subTensorsSupported ? workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) : workloadFactory.CreateTensorHandle(inputTensorInfo2); - armnn::ConcatQueueDescriptor data; - armnn::WorkloadInfo info; + ConcatQueueDescriptor data; + WorkloadInfo info; AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); @@ -2496,7 +2598,7 @@ LayerTestResult ConcatUint8Test( data.m_ViewOrigins.push_back(window1); data.m_ViewOrigins.push_back(window2); - std::unique_ptr workload = workloadFactory.CreateConcat(data, info); + std::unique_ptr workload = workloadFactory.CreateConcat(data, info); inputHandle1->Allocate(); inputHandle2->Allocate(); @@ -2514,8 +2616,8 @@ LayerTestResult ConcatUint8Test( } LayerTestResult ConcatUint16Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { unsigned int outputWidth = 3; unsigned int outputHeight = 6; @@ -2530,9 +2632,9 @@ LayerTestResult ConcatUint16Test( unsigned int inputChannels2 = 1; // Defines the tensor descriptors. - armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16); - armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16); - armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16); + TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedSymm16); + TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedSymm16); + TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedSymm16); // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them. const float scale = 0.13497836f; @@ -2599,29 +2701,29 @@ LayerTestResult ConcatUint16Test( })); std::vector wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0]. - armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); + ConcatQueueDescriptor::ViewOrigin window1(wOrigin1); std::vector wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1]. - armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2); + ConcatQueueDescriptor::ViewOrigin window2(wOrigin2); - std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); bool subTensorsSupported = workloadFactory.SupportsSubTensors(); - std::unique_ptr inputHandle1 = + std::unique_ptr inputHandle1 = subTensorsSupported ? workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) : workloadFactory.CreateTensorHandle(inputTensorInfo1); - std::unique_ptr inputHandle2 = + std::unique_ptr inputHandle2 = subTensorsSupported ? workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) : workloadFactory.CreateTensorHandle(inputTensorInfo2); - armnn::ConcatQueueDescriptor data; - armnn::WorkloadInfo info; + ConcatQueueDescriptor data; + WorkloadInfo info; AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get()); AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); @@ -2629,7 +2731,7 @@ LayerTestResult ConcatUint16Test( data.m_ViewOrigins.push_back(window1); data.m_ViewOrigins.push_back(window2); - std::unique_ptr workload = workloadFactory.CreateConcat(data, info); + std::unique_ptr workload = workloadFactory.CreateConcat(data, info); inputHandle1->Allocate(); inputHandle2->Allocate(); @@ -2647,147 +2749,147 @@ LayerTestResult ConcatUint16Test( } LayerTestResult Concat1dUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat1dTestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concat1dTestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concat2dDim0Uint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat2dDim0TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concat2dDim0TestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concat2dDim1Uint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat2dDim1TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concat2dDim1TestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concat2dDim0DiffInputDimsUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat2dDim0DiffInputDimsTestImpl( + return Concat2dDim0DiffInputDimsTestImpl( workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concat2dDim1DiffInputDimsUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat2dDim1DiffInputDimsTestImpl( + return Concat2dDim1DiffInputDimsTestImpl( workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concat3dDim0Uint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat3dDim0TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concat3dDim0TestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concat3dDim1Uint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat3dDim1TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concat3dDim1TestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concat3dDim2Uint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) { - return Concat3dDim2TestImpl( + return Concat3dDim2TestImpl( workloadFactory, memoryManager, useSubtensor, 0.5f, -1); } LayerTestResult Concat3dDim0DiffInputDimsUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat3dDim0TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concat3dDim0TestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concat3dDim1DiffInputDimsUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat3dDim1DiffInputDimsTestImpl( + return Concat3dDim1DiffInputDimsTestImpl( workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concat3dDim2DiffInputDimsUint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) { - return Concat3dDim2DiffInputDimsTestImpl( + return Concat3dDim2DiffInputDimsTestImpl( workloadFactory, memoryManager, useSubtensor, 0.5f, -1); } LayerTestResult Concat4dDim0Uint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat4dDim0TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concat4dDim0TestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concat4dDim1Uint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat4dDim1TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concat4dDim1TestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concat4dDim2Uint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat4dDim2TestImpl(workloadFactory, memoryManager, 0.5f, -1); + return Concat4dDim2TestImpl(workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concat4dDim3Uint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) { - return Concat4dDim3TestImpl( + return Concat4dDim3TestImpl( workloadFactory, memoryManager, 0.5f, -1, useSubtensor); } LayerTestResult Concat4dDiffShapeDim0Uint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat4dDiffShapeDim0TestImpl( + return Concat4dDiffShapeDim0TestImpl( workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concat4dDiffShapeDim1Uint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat4dDiffShapeDim1TestImpl( + return Concat4dDiffShapeDim1TestImpl( workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concat4dDiffShapeDim2Uint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { - return Concat4dDiffShapeDim2TestImpl( + return Concat4dDiffShapeDim2TestImpl( workloadFactory, memoryManager, 0.5f, -1); } LayerTestResult Concat4dDiffShapeDim3Uint8Test( - armnn::IWorkloadFactory& workloadFactory, - const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + IWorkloadFactory& workloadFactory, + const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor) { - return Concat4dDiffShapeDim3TestImpl( + return Concat4dDiffShapeDim3TestImpl( workloadFactory, memoryManager, 0.5f, -1, useSubtensor); } diff --git a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp index c3cacd5..3f22c31 100644 --- a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp @@ -6,6 +6,7 @@ #include "ConstantTestImpl.hpp" #include +#include #include #include @@ -53,43 +54,45 @@ LayerTestResult ConstantTestImpl( } auto input = MakeTensor(inputTensorInfo, std::vector( - QuantizedVector(qScale, qOffset, { - // Batch 0, Channel 0 - 235.0f, 46.0f, 178.0f, - 100.0f, 123.0f, 19.0f, - 172.0f, 74.0f, 250.0f, - 6.0f, 195.0f, 80.0f, - - // Batch 0, Channel 1 - 113.0f, 95.0f, 202.0f, - 77.0f, 114.0f, 71.0f, - 122.0f, 246.0f, 166.0f, - 82.0f, 28.0f, 37.0f, - - // Batch 0, Channel 2 - 56.0f, 170.0f, 162.0f, - 194.0f, 89.0f, 254.0f, - 12.0f, 209.0f, 200.0f, - 1.0f, 64.0f, 54.0f, - - // Batch 1, Channel 0 - 67.0f, 90.0f, 49.0f, - 7.0f, 163.0f, 18.0f, - 25.0f, 117.0f, 103.0f, - 247.0f, 59.0f, 189.0f, - - // Batch 1, Channel 1 - 239.0f, 104.0f, 199.0f, - 17.0f, 124.0f, 153.0f, - 222.0f, 217.0f, 75.0f, - 32.0f, 126.0f, 21.0f, - - // Batch 1, Channel 2 - 97.0f, 145.0f, 215.0f, - 115.0f, 116.0f, 238.0f, - 226.0f, 16.0f, 132.0f, - 92.0f, 125.0f, 88.0f, - }))); + armnnUtils::QuantizedVector( + { + // Batch 0, Channel 0 + 235.0f, 46.0f, 178.0f, + 100.0f, 123.0f, 19.0f, + 172.0f, 74.0f, 250.0f, + 6.0f, 195.0f, 80.0f, + + // Batch 0, Channel 1 + 113.0f, 95.0f, 202.0f, + 77.0f, 114.0f, 71.0f, + 122.0f, 246.0f, 166.0f, + 82.0f, 28.0f, 37.0f, + + // Batch 0, Channel 2 + 56.0f, 170.0f, 162.0f, + 194.0f, 89.0f, 254.0f, + 12.0f, 209.0f, 200.0f, + 1.0f, 64.0f, 54.0f, + + // Batch 1, Channel 0 + 67.0f, 90.0f, 49.0f, + 7.0f, 163.0f, 18.0f, + 25.0f, 117.0f, 103.0f, + 247.0f, 59.0f, 189.0f, + + // Batch 1, Channel 1 + 239.0f, 104.0f, 199.0f, + 17.0f, 124.0f, 153.0f, + 222.0f, 217.0f, 75.0f, + 32.0f, 126.0f, 21.0f, + + // Batch 1, Channel 2 + 97.0f, 145.0f, 215.0f, + 115.0f, 116.0f, 238.0f, + 226.0f, 16.0f, 132.0f, + 92.0f, 125.0f, 88.0f, + }, + qScale, qOffset))); LayerTestResult result(outputTensorInfo); result.outputExpected = input; diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp index 01c1b18..198904e 100644 --- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp @@ -7,13 +7,13 @@ #include #include +#include #include #include #include -#include #include #include @@ -62,6 +62,8 @@ static std::vector ConvInput3x8x16({ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }); +using namespace armnnUtils; + // // Helper templates // @@ -73,7 +75,7 @@ boost::multi_array GetBias2(bool biasEnabled, float qScale) if(biasEnabled) { armnn::TensorInfo biasDesc({static_cast(Bias2.size())}, ArmnnType); - boost::multi_array bias = MakeTensor(biasDesc, QuantizedVector(qScale, 0.0f, Bias2)); + boost::multi_array bias = MakeTensor(biasDesc, QuantizedVector(Bias2, qScale, 0.0f)); return bias; } else @@ -89,7 +91,7 @@ boost::multi_array GetBias4(bool biasEnabled, float qScale) if(biasEnabled) { armnn::TensorInfo biasDesc({static_cast(Bias4.size())}, ArmnnType); - boost::multi_array bias = MakeTensor(biasDesc, QuantizedVector(qScale, 0.0f, Bias4)); + boost::multi_array bias = MakeTensor(biasDesc, QuantizedVector(Bias4, qScale, 0.0f)); return bias; } else @@ -105,7 +107,7 @@ boost::multi_array GetBias8(bool biasEnabled, float qScale) if(biasEnabled) { armnn::TensorInfo biasDesc({static_cast(Bias4.size())}, ArmnnType); - boost::multi_array bias = MakeTensor(biasDesc, QuantizedVector(qScale, 0.0f, Bias8)); + boost::multi_array bias = MakeTensor(biasDesc, QuantizedVector(Bias8, qScale, 0.0f)); return bias; } else @@ -492,35 +494,39 @@ LayerTestResult Convolution1dTestImpl( biasInfo.SetQuantizationOffset(0); } - std::vector inputData( - QuantizedVector(inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(), { - 5.0f, -2.0f, 2.5f, 0.0f, 1.0f, - -3.0f, 3.2f, 5.0f, 2.0f, 3.0f, - })); + std::vector inputData = QuantizedVector( + { + 5.0f, -2.0f, 2.5f, 0.0f, 1.0f, + -3.0f, 3.2f, 5.0f, 2.0f, 3.0f, + }, + inputInfo.GetQuantizationScale(), + inputInfo.GetQuantizationOffset()); - std::vector kernelData( - QuantizedVector(kernelInfo.GetQuantizationScale(), kernelInfo.GetQuantizationOffset(), { - 1.0f, 0.0f, 0.0f, - 0.0f, 2.0f, -1.5f, + std::vector kernelData = QuantizedVector( + { + 1.0f, 0.0f, 0.0f, + 0.0f, 2.0f, -1.5f, - 0.0f, 0.0f, 0.0f, - 0.2f, 0.2f, 0.2f, + 0.0f, 0.0f, 0.0f, + 0.2f, 0.2f, 0.2f, - 0.5f, 0.0f, 0.5f, - 0.0f, -1.0f, 0.0f - })); + 0.5f, 0.0f, 0.5f, + 0.0f, -1.0f, 0.0f + }, + kernelInfo.GetQuantizationScale(), + kernelInfo.GetQuantizationOffset()); - std::vector biasData( - QuantizedVector(biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(), { - 1.0f, 0.0f, 0.0f - })); + std::vector biasData = + QuantizedVector({ 1.0f, 0.0f, 0.0f }, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset()); - std::vector outputData( - QuantizedVector(outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), { - 4.5f, -10.8f, 5.0f + 6.4f - 7.5f, -2.0f + 10.0f -3.0f, 2.5f + 4.0f - 4.5f, 6.0f, 1.0f, + std::vector outputData = QuantizedVector( + { + 4.5f, -10.8f, 5.0f + 6.4f - 7.5f, -2.0f + 10.0f -3.0f, 2.5f + 4.0f - 4.5f, 6.0f, 1.0f, -0.6f, -0.6f + 0.64f, -0.6f + 0.64f + 1.0f, 0.64f + 1.0f + 0.4f, 1.0f + 0.4f + 0.6f, 0.4f + 0.6f, 0.6f, - 2.5f, -1.0f + 3.0f, 1.25f - 3.2f + 2.5f, -1.0f - 5.0f, 1.25f + 0.5f - 2.0f, -3.0f, 0.5f - })); + 2.5f, -1.0f + 3.0f, 1.25f - 3.2f + 2.5f, -1.0f - 5.0f, 1.25f + 0.5f - 2.0f, -3.0f, 0.5f + }, + outputInfo.GetQuantizationScale(), + outputInfo.GetQuantizationOffset()); // Optionally apply bias to output image. if(biasEnabled) @@ -698,54 +704,55 @@ LayerTestResult SimpleConvolution2d3x5TestCommon( { // Use common single-batch 3-channel 16x8 image. armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType); - boost::multi_array input = MakeTensor(inputDesc, QuantizedVector(qScale, qOffset, ConvInput3x8x16)); + boost::multi_array input = MakeTensor(inputDesc, QuantizedVector(ConvInput3x8x16, qScale, qOffset)); // Use a 2-element batch with 3-channel 3x5 kernels. armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType); boost::multi_array kernel = MakeTensor(kernelDesc, std::vector( - QuantizedVector(qScale, qOffset, { - 1, 1, 1, + QuantizedVector({ + 1, 1, 1, 1, -1, 1, - 1, 1, 1, - 1, 1, 1, - 1, 1, 1, - - 0, 0, 0, - 0, 0, 0, - 0, 0, 0, - 0, 0, 0, - 0, 0, 0, - - 2, 2, 2, - 2, 2, 2, - 2, 2, 2, - 2, 2, 2, - 2, 2, 2, - - - 0, 0, 0, - 0, 0, 0, - 0, 0, 0, - 0, 0, 0, - 0, 0, 0, - - 1, 1, 1, - 1, 1, 1, - 1, 1, 1, - 1, 1, 1, - 1, 1, 1, - - 0, 0, 0, - 0, 0, 0, - 0, 0, 0, - 0, 0, 0, - 0, 0, 0 - }))); + 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 2, 2, 2, + 2, 2, 2, + 2, 2, 2, + 2, 2, 2, + 2, 2, 2, + + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + 0, 0, 0 + }, + qScale, qOffset))); // Expected output is 2 batch elements of a 1-channel 14x4 image. armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType); boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - QuantizedVector(qScale, qOffset, { + QuantizedVector({ -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, @@ -757,7 +764,8 @@ LayerTestResult SimpleConvolution2d3x5TestCommon( 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }))); + }, + qScale, qOffset))); return SimpleConvolution2dTestImpl( workloadFactory, @@ -785,42 +793,43 @@ LayerTestResult SimpleConvolution2d3x3TestCommon( // Use common single-batch 3-channel 16x8 image. armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType); - boost::multi_array input = MakeTensor(inputDesc, QuantizedVector(qScale, qOffset, ConvInput3x8x16)); + boost::multi_array input = MakeTensor(inputDesc, QuantizedVector(ConvInput3x8x16, qScale, qOffset)); // Use a 2-element batch of 3-channel 3x3 kernels. armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType); boost::multi_array kernel = MakeTensor(kernelDesc, std::vector( - QuantizedVector(qScale, qOffset, { - 1, 1, 1, + QuantizedVector({ + 1, 1, 1, 1, -1, 1, - 1, 1, 1, + 1, 1, 1, - 0, 0, 0, - 0, 0, 0, - 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, - 2, 2, 2, - 2, 2, 2, - 2, 2, 2, + 2, 2, 2, + 2, 2, 2, + 2, 2, 2, - 0, 0, 0, - 0, 0, 0, - 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, - 1, 1, 1, - 1, 1, 1, - 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + 1, 1, 1, - 0, 0, 0, - 0, 0, 0, - 0, 0, 0 - }))); + 0, 0, 0, + 0, 0, 0, + 0, 0, 0 + }, + qScale, qOffset))); // Expected output is 1 batch of a 2-channel 14x6 image. armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType); boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - QuantizedVector(qScale, qOffset, { + QuantizedVector({ -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f, @@ -834,7 +843,8 @@ LayerTestResult SimpleConvolution2d3x3TestCommon( 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }))); + }, + qScale, qOffset))); return SimpleConvolution2dTestImpl( workloadFactory, @@ -860,19 +870,21 @@ LayerTestResult Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest // Use a single-batch 1-channel 3x3 image as input. armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType); boost::multi_array input = MakeTensor(inputDesc, std::vector( - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 11,21,31, 12,22,32, 13,23,33 - }))); + }, + qScale, qOffset))); // Use 1 batch of a 1-channel 2x2 kernel. armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType); boost::multi_array kernel = MakeTensor(kernelDesc, std::vector( - QuantizedVector(qScale, qOffset, { + QuantizedVector({ -11,-21, -12,-22, - }))); + }, + qScale, qOffset))); // Expected output is 1 batch of a 1-channel 6x8 image. // Manually calculated like this: @@ -885,7 +897,7 @@ LayerTestResult Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest //[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..] armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType); boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 0, 0, 0, 0, 0, 0, -242, -594, -934, -372, 0, 0, -495, -1190, -1850, -725, 0, 0, @@ -894,7 +906,8 @@ LayerTestResult Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }))); + }, + qScale, qOffset))); return SimpleConvolution2dTestImpl( workloadFactory, @@ -924,35 +937,37 @@ LayerTestResult SimpleConvolution2dAsymmetricPaddingTestCommon( // Use a single-batch 1-channel 5x5 image as input. armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType); boost::multi_array input = MakeTensor(inputDesc, std::vector( - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 11,21,31,41,51, 12,22,32,42,52, 13,23,33,43,53, 14,24,34,44,54, 15,25,35,45,55, - }))); + }, qScale, qOffset))); // Use 1 batch of a 1-channel 4x4 kernel. armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType); boost::multi_array kernel = MakeTensor(kernelDesc, std::vector( - QuantizedVector(qScale, qOffset, { + QuantizedVector({ -11,-21,-31,-41, -12,-22,-32,-42, -13,-23,-33,-43, -14,-24,-34,-44, - }))); + }, + qScale, qOffset))); // Expected output is 1 batch of a 1-channel 5x5 image. armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType); std::vector myVec(outputDesc.GetNumElements(), 0); boost::multi_array expectedOutput = MakeTensor(outputDesc, std::vector( - QuantizedVector(qScale, qOffset, { + QuantizedVector({ -7140, -10580, -13940, -9300, -5230, -9590, -14120, -18520, -12290, -6860, -9980, -14560, -18960, -12560, -7000, -7518, -10904, -14144, -9318, -5152, -5032, -7256, -9376, -6142, -3368, - }))); + }, + qScale, qOffset))); return SimpleConvolution2dTestImpl( workloadFactory, @@ -1025,17 +1040,18 @@ LayerTestResult Convolution2d3x3DilationTestCommon( outputTensorInfo.SetQuantizationOffset(qOffset); auto input = MakeTensor(inputTensorInfo, - std::vector(QuantizedVector(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputNoQuantizedValues))); + std::vector(QuantizedVector(inputNoQuantizedValues, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset()))); auto kernel = MakeTensor(kernelTensorInfo, - std::vector(QuantizedVector(kernelTensorInfo.GetQuantizationScale(), - kernelTensorInfo.GetQuantizationOffset(), - kernelNoQuantizedValues))); - auto expectedOutput = MakeTensor(outputTensorInfo, - std::vector(QuantizedVector(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputExpectedNoQuantizedValues))); + std::vector(QuantizedVector(kernelNoQuantizedValues, + kernelTensorInfo.GetQuantizationScale(), + kernelTensorInfo.GetQuantizationOffset()))); + auto expectedOutput = + MakeTensor(outputTensorInfo, + std::vector(QuantizedVector(outputExpectedNoQuantizedValues, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset()))); return SimpleConvolution2dTestImpl( workloadFactory, @@ -1539,15 +1555,18 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( biasDesc.SetQuantizationOffset(0); } std::vector inputData = std::vector( - QuantizedVector(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), { - 1.f, 2.f, 1.f, - 2.f, 1.f, 2.f, - 1.f, 2.f, 1.f, - - 1.f, 2.f, 1.f, - 2.f, 1.f, 2.f, - 1.f, 2.f, 1.f, - })); + QuantizedVector({ + 1.f, 2.f, 1.f, + 2.f, 1.f, 2.f, + 1.f, 2.f, 1.f, + + 1.f, 2.f, 1.f, + 2.f, 1.f, 2.f, + 1.f, 2.f, 1.f, + }, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); + // at this point if we require it permute the input data const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; if (layout == armnn::DataLayout::NHWC) @@ -1558,27 +1577,32 @@ LayerTestResult DepthwiseConvolution2dDepthMul1TestImpl( } auto input = MakeTensor(inputTensorInfo, inputData); - std::vector biasV(QuantizedVector(biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(), - {0, 2})); + std::vector biasV(QuantizedVector({ 0, 2 }, + biasDesc.GetQuantizationScale(), + biasDesc.GetQuantizationOffset())); + auto bias = MakeTensor(biasDesc, biasV); std::vector kernelData = std::vector( - QuantizedVector(kernelDesc.GetQuantizationScale(), kernelDesc.GetQuantizationOffset(), { - 1.f, 0.f, 1.f, - 0.f, 0.f, 0.f, - -1.f, 0.f, -1.f, - - 1.f, 0.f, 1.f, - 0.f, 0.f, 0.f, - -1.f, 0.f, -1.f, - })); + QuantizedVector({ + 1.f, 0.f, 1.f, + 0.f, 0.f, 0.f, + -1.f, 0.f, -1.f, + + 1.f, 0.f, 1.f, + 0.f, 0.f, 0.f, + -1.f, 0.f, -1.f, + }, + kernelDesc.GetQuantizationScale(), + kernelDesc.GetQuantizationOffset())); + auto kernel = MakeTensor(kernelDesc, kernelData); // Manually calculated. std::vector outputImage( - QuantizedVector(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - {0.f, 0.f}) + QuantizedVector({ 0.f, 0.f }, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset()) ); // Optionally apply bias to output image. @@ -1686,24 +1710,27 @@ LayerTestResult DepthwiseConvolution2dTestImpl( // NOTE: originalInputData is in NCHW format std::vector originalInputData = std::vector( - QuantizedVector(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), { - 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, - 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, - 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, - 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, - 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, - 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, - 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, - 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, - 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - })); + QuantizedVector({ + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f + }, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); + std::vector inputData = originalInputData; // at this point if we require it permute the input data const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; @@ -1714,70 +1741,76 @@ LayerTestResult DepthwiseConvolution2dTestImpl( } auto input = MakeTensor(inputTensorInfo, inputData); - std::vector biasV(QuantizedVector(biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(), - {0, 2, 1, -1})); + std::vector biasV = QuantizedVector({ 0, 2, 1, -1 }, + biasDesc.GetQuantizationScale(), + biasDesc.GetQuantizationOffset()); + auto bias = MakeTensor(biasDesc, biasV); std::vector kernelData = std::vector( - QuantizedVector(kernelDesc.GetQuantizationScale(), kernelDesc.GetQuantizationOffset(), { - 1, 1, 1, - 1, -1, 1, - 1, 1, 1, - 1, 1, 1, - 1, 1, 1, - - 2, 2, 2, - 2, 2, 2, - 2, 2, 2, - 2, 2, 2, - 2, 2, 2, - - 0, 0, 0, - 0, -1, 0, - 0, 0, 0, - 0, 0, 0, - 0, 0, 0, - - 0, 0, 0, - 0, 0, 0, - 0, 1, 0, - 0, 0, 0, - 0, 0, 0 - - })); + QuantizedVector({ + 1, 1, 1, + 1, -1, 1, + 1, 1, 1, + 1, 1, 1, + 1, 1, 1, + + 2, 2, 2, + 2, 2, 2, + 2, 2, 2, + 2, 2, 2, + 2, 2, 2, + + 0, 0, 0, + 0, -1, 0, + 0, 0, 0, + 0, 0, 0, + 0, 0, 0, + + 0, 0, 0, + 0, 0, 0, + 0, 1, 0, + 0, 0, 0, + 0, 0, 0 + }, + kernelDesc.GetQuantizationScale(), + kernelDesc.GetQuantizationOffset())); + auto kernel = MakeTensor(kernelDesc, kernelData); // Manually calculated. std::vector originalOutputImage = std::vector( - QuantizedVector(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), { - 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, - 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, - 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, - 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, - 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, - 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, + QuantizedVector({ + 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, 3.5f, + 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, + 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, + 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, + 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, 6.5f, + 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, 5.0f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, - 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, - 8.0f, 8.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 8.0f, 8.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 10.0f, 10.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, - 8.0f, 8.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, - - 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, - 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, - 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, - 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, - 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, - 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f - })); + 8.0f, 8.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f + }, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); // Optionally apply bias to output image. if(biasEnabled) @@ -2016,8 +2049,7 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( // Use a single-batch 2-channel 5x5 image as input. armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType); auto input = MakeTensor(inputTensorInfo, std::vector( - QuantizedVector(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), - { + QuantizedVector({ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, @@ -2029,13 +2061,14 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49 - }))); + }, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset()))); // Use a depth multiplier of 1 on a 2-channel 4x4 kernel. armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType); auto kernel = MakeTensor(kernelTensorInfo, std::vector( - QuantizedVector(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), - { + QuantizedVector({ 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, @@ -2045,14 +2078,15 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 - }))); + }, + kernelTensorInfo.GetQuantizationScale(), + kernelTensorInfo.GetQuantizationOffset()))); // Expected output is 1 batch of a 2-channel 5x5 image. // Calculated using the python tensorflow library with strideX=1, strideY=1. armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType); boost::multi_array expectedOutput = MakeTensor(outputTensorInfo, std::vector( - QuantizedVector(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), - { + QuantizedVector({ 1062, 1580, 1850, 1530, 1117, 2140, 3108, 3500, 2842, 2042, 3580, 5068, 5460, 4342, 3062, @@ -2064,7 +2098,9 @@ LayerTestResult DepthwiseConvolution2dAsymmetricTestCommon( 3390, 4886, 5022, 4068, 2916, 3566, 5056, 5182, 4133, 2922, 3100, 4352, 4452, 3517, 2465 - }))); + }, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset()))); return DepthwiseConvolution2dAsymmetricTestImpl( workloadFactory, @@ -2097,8 +2133,7 @@ LayerTestResult DepthwiseConvolution2dNhwcTestCommon( armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType); auto input = MakeTensor(inputTensorInfo, std::vector( - QuantizedVector(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), - { + QuantizedVector({ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, @@ -2110,12 +2145,13 @@ LayerTestResult DepthwiseConvolution2dNhwcTestCommon( 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49 - }))); + }, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset()))); armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType); auto kernel = MakeTensor(kernelTensorInfo, std::vector( - QuantizedVector(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), - { + QuantizedVector({ 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, @@ -2125,12 +2161,13 @@ LayerTestResult DepthwiseConvolution2dNhwcTestCommon( 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 - }))); + }, + kernelTensorInfo.GetQuantizationScale(), + kernelTensorInfo.GetQuantizationOffset()))); armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType); boost::multi_array expectedOutput = MakeTensor(outputTensorInfo, std::vector( - QuantizedVector(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), - { + QuantizedVector({ 1062, 1580, 1850, 1530, 1117, 2140, 3108, 3500, 2842, 2042, 3580, 5068, 5460, 4342, 3062, @@ -2142,7 +2179,9 @@ LayerTestResult DepthwiseConvolution2dNhwcTestCommon( 3390, 4886, 5022, 4068, 2916, 3566, 5056, 5182, 4133, 2922, 3100, 4352, 4452, 3517, 2465 - }))); + }, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset()))); return DepthwiseConvolution2dTestImpl( workloadFactory, @@ -2175,27 +2214,29 @@ LayerTestResult SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon( armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType); auto input = MakeTensor(inputTensorInfo, std::vector( - QuantizedVector(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), - { - 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1, 1, 1, 0, 0, 0, - 0, 0, 0, 1, 1, 1, 0, 0, 0, - 0, 0, 0, 1, 1, 1, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0 - }))); + QuantizedVector({ + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0 + }, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset()))); armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType); auto kernel = MakeTensor(kernelTensorInfo, std::vector( - QuantizedVector(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), - { - 1, 2, 3, - 4, 5, 6, - 7, 8, 9 - }))); + QuantizedVector({ + 1, 2, 3, + 4, 5, 6, + 7, 8, 9 + }, + kernelTensorInfo.GetQuantizationScale(), + kernelTensorInfo.GetQuantizationOffset()))); uint32_t padLeft = 0; uint32_t padTop = 0; @@ -2209,12 +2250,13 @@ LayerTestResult SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTestCommon( // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s. armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType); boost::multi_array expectedOutput = MakeTensor(outputTensorInfo, std::vector( - QuantizedVector(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), - { - 5, 5, 5, - 5, 5, 5, - 5, 5, 5 - }))); + QuantizedVector({ + 5, 5, 5, + 5, 5, 5, + 5, 5, 5 + }, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset()))); return DepthwiseConvolution2dTestImpl( workloadFactory, @@ -2284,17 +2326,18 @@ LayerTestResult DepthwiseConvolution2d3x3DilationTestCommon( outputTensorInfo.SetQuantizationOffset(qOffset); auto input = MakeTensor(inputTensorInfo, - std::vector(QuantizedVector(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputNoQuantizedValues))); + std::vector(QuantizedVector(inputNoQuantizedValues, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset()))); auto kernel = MakeTensor(kernelTensorInfo, - std::vector(QuantizedVector(kernelTensorInfo.GetQuantizationScale(), - kernelTensorInfo.GetQuantizationOffset(), - kernelNoQuantizedValues))); - auto expectedOutput = MakeTensor(outputTensorInfo, - std::vector(QuantizedVector(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputExpectedNoQuantizedValues))); + std::vector(QuantizedVector(kernelNoQuantizedValues, + kernelTensorInfo.GetQuantizationScale(), + kernelTensorInfo.GetQuantizationOffset()))); + auto expectedOutput = + MakeTensor(outputTensorInfo, + std::vector(QuantizedVector(outputExpectedNoQuantizedValues, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset()))); uint32_t padLeft = 0; uint32_t padTop = 0; diff --git a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp index 1997c4b..023bbae 100644 --- a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp @@ -5,6 +5,7 @@ #include "DebugTestImpl.hpp" +#include #include #include @@ -40,11 +41,11 @@ LayerTestResult DebugTestImpl( } boost::multi_array input = - MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, inputData)); + MakeTensor(inputTensorInfo, armnnUtils::QuantizedVector(inputData, qScale, qOffset)); LayerTestResult ret(outputTensorInfo); ret.outputExpected = - MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, outputExpectedData)); + MakeTensor(outputTensorInfo, armnnUtils::QuantizedVector(outputExpectedData, qScale, qOffset)); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); diff --git a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp index e21a4b6..4e8c938 100644 --- a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp @@ -5,7 +5,7 @@ #include "DepthToSpaceTestImpl.hpp" -#include +#include #include @@ -44,10 +44,12 @@ LayerTestResult DepthToSpaceTestImpl( outputInfo.SetQuantizationOffset(qOffset); } - boost::multi_array input = MakeTensor(inputInfo, QuantizedVector(qScale, qOffset, inputData)); + boost::multi_array input = + MakeTensor(inputInfo, armnnUtils::QuantizedVector(inputData, qScale, qOffset)); LayerTestResult result(outputInfo); - result.outputExpected = MakeTensor(outputInfo, QuantizedVector(qScale, qOffset, expectedOutputData)); + result.outputExpected = + MakeTensor(outputInfo, armnnUtils::QuantizedVector(expectedOutputData, qScale, qOffset)); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputInfo); diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp index c84b941..cf101ee 100644 --- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp @@ -7,6 +7,8 @@ #include +#include + #include #include @@ -191,15 +193,17 @@ LayerTestResult FullyConnectedLargeTestCommon( LayerTestResult result(outputTensorInfo); boost::multi_array input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, { + armnnUtils::QuantizedVector({ 1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f, - }) + }, + qScale, qOffset) ); boost::multi_array weights = MakeTensor(weightsDesc, - QuantizedVector(qScale, qOffset, { + armnnUtils::QuantizedVector({ 2.0f, 3.0f, 4.0f, 5.0f, 6.0f - }) + }, + qScale, qOffset) ); std::vector biasValues({900000.f}); @@ -215,10 +219,7 @@ LayerTestResult FullyConnectedLargeTestCommon( ); result.outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, { - 965432.0f, - }) - ); + armnnUtils::QuantizedVector({ 965432.0f }, qScale, qOffset)); return result; } diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp index 4e9cbbf..d25fcea 100644 --- a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp @@ -5,6 +5,7 @@ #include "InstanceNormalizationTestImpl.hpp" +#include #include #include @@ -14,7 +15,6 @@ #include #include -#include #include #include @@ -35,12 +35,12 @@ LayerTestResult InstanceNormTestImpl( float qScale = 0.0f, int32_t qOffset = 0) { - auto inputTensor = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, inputValues)); + auto inputTensor = MakeTensor(inputTensorInfo, + armnnUtils::QuantizedVector(inputValues, qScale, qOffset)); LayerTestResult result(outputTensorInfo); - - result.outputExpected = - MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, expectedOutputValues)); + result.outputExpected = MakeTensor(outputTensorInfo, + armnnUtils::QuantizedVector(expectedOutputValues, qScale, qOffset)); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); diff --git a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp index 5c75b6f..569f5af 100644 --- a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp @@ -6,6 +6,7 @@ #include "L2NormalizationTestImpl.hpp" #include +#include #include #include @@ -44,10 +45,10 @@ LayerTestResult L2NormalizationTestImpl( inputData = tmp; } - auto inputTensor = MakeTensor(inputTensorInfo, QuantizedVector( - inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto inputTensor = MakeTensor(inputTensorInfo, + armnnUtils::QuantizedVector(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); std::vector expectedOutputData = expectedOutputValues; if (layout == armnn::DataLayout::NHWC) @@ -59,10 +60,11 @@ LayerTestResult L2NormalizationTestImpl( } LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( - outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - expectedOutputData)); + result.outputExpected = + MakeTensor(outputTensorInfo, + armnnUtils::QuantizedVector(expectedOutputData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -693,16 +695,10 @@ LayerTestResult L2Normalization2dShapeTest( const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0); const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0); - auto inputTensor = MakeTensor(inputTensorInfo, QuantizedVector( - inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto inputTensor = MakeTensor(inputTensorInfo, inputData); LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector( - outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - expectedOutputData)); + result.outputExpected = MakeTensor(outputTensorInfo, expectedOutputData); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp index 0b73d37..4c340c8 100644 --- a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp @@ -6,6 +6,7 @@ #include "LogSoftmaxTestImpl.hpp" #include +#include #include #include @@ -14,7 +15,6 @@ #include #include -#include #include #include @@ -39,7 +39,7 @@ LayerTestResult LogSoftmaxTestImpl( { LayerTestResult result(outputInfo); result.outputExpected = - MakeTensor(outputInfo, QuantizedVector(qScale, qOffset, expectedOutputValues)); + MakeTensor(outputInfo, armnnUtils::QuantizedVector(expectedOutputValues, qScale, qOffset)); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputInfo); @@ -54,7 +54,7 @@ LayerTestResult LogSoftmaxTestImpl( inputHandle->Allocate(); outputHandle->Allocate(); - auto inputTensor = MakeTensor(inputInfo, QuantizedVector(qScale, qOffset, inputValues)); + auto inputTensor = MakeTensor(inputInfo, armnnUtils::QuantizedVector(inputValues, qScale, qOffset)); CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin()); workload->Execute(); diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp index c07f623..6cea777 100644 --- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp @@ -5,11 +5,12 @@ #include "LstmTestImpl.hpp" +#include + #include #include -#include #include #include @@ -1963,13 +1964,19 @@ LayerTestResult LstmLayerInt16NoCifgNoPeepholeNoProjectionTest( const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8; armnn::TensorInfo inputDesc({2, 2}, datatype); - boost::multi_array input = MakeTensor(inputDesc, QuantizedVector(qScale, qOffset, - std::vector{2., 3., 3., 4.})); + boost::multi_array input = MakeTensor( + inputDesc, + armnnUtils::QuantizedVector({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset)); armnn::TensorInfo outputDesc({2, 4}, datatype); - boost::multi_array expectedOutput = MakeTensor(outputDesc, QuantizedVector(qScale, - qOffset, std::vector({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f, - -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}))); + boost::multi_array expectedOutput = MakeTensor( + outputDesc, + armnnUtils::QuantizedVector( + { + -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f, + -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f + }, + qScale, qOffset)); return LstmNoCifgNoPeepholeNoProjectionTestImpl( workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype); @@ -1987,14 +1994,21 @@ LayerTestResult LstmLayerInt16WithCifgWithPeepholeNoProjectionTest( const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8; armnn::TensorInfo inputDesc({ 2, 2 }, datatype); - boost::multi_array input = MakeTensor(inputDesc, QuantizedVector(qScale, qOffset, - std::vector({ 2., 3., 3., 4. }))); + boost::multi_array input = + MakeTensor( + inputDesc, + armnnUtils::QuantizedVector({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset)); armnn::TensorInfo outputDesc({ 2, 4 }, datatype); - boost::multi_array expectedOutput = MakeTensor(outputDesc, QuantizedVector(qScale, - qOffset, std::vector( - {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f, - -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}))); + boost::multi_array expectedOutput = + MakeTensor( + outputDesc, + armnnUtils::QuantizedVector( + { + -0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f, + -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f + }, + qScale, qOffset)); return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl( workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype); @@ -2011,20 +2025,32 @@ LayerTestResult LstmLayerInt16NoCifgWithPeepholeWithProjectionTest( const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8; armnn::TensorInfo inputDesc({ 2, 5 }, datatype); - boost::multi_array input = MakeTensor(inputDesc, QuantizedVector(qScale, - qOffset, std::vector( - {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f, - 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}))); + boost::multi_array input = + MakeTensor( + inputDesc, + armnnUtils::QuantizedVector( + { + 0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f, + 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f + }, + qScale, qOffset)); armnn::TensorInfo outputDesc({ 2, 16 }, datatype); - boost::multi_array expectedOutput = MakeTensor(outputDesc, QuantizedVector(qScale, - qOffset, std::vector( - {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f, - -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f, - -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f, - 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f, - -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f, - 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f}))); + boost::multi_array expectedOutput = + MakeTensor( + outputDesc, + armnnUtils::QuantizedVector( + { + -0.00396806f, 0.02935200f, -0.00279226f, 0.01599770f, + -0.00835576f, -0.02117790f, 0.02835120f, -0.01145970f, + 0.00907307f, -0.02440040f, -0.01521910f, -0.02590630f, + 0.00914318f, 0.00415118f, 0.01714700f, 0.01342030f, + -0.01386900f, 0.02872680f, -0.00334693f, 0.00733398f, + -0.02879260f, -0.01869260f, 0.01936620f, -0.01154370f, + 0.00422612f, -0.03452320f, 0.00223253f, -0.00957321f, + 0.02106240f, 0.01333100f, 0.01509540f, 0.02168000f + }, + qScale, qOffset)); return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl( workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype); @@ -2040,13 +2066,20 @@ LayerTestResult LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16Const const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16 armnn::TensorInfo inputDesc({2, 2}, datatype); - boost::multi_array input = MakeTensor(inputDesc, QuantizedVector(qScale, - qOffset, std::vector{2., 3., 3., 4.})); + boost::multi_array input = + MakeTensor(inputDesc, + armnnUtils::QuantizedVector({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset)); armnn::TensorInfo outputDesc({2, 4}, datatype); - boost::multi_array expectedOutput = MakeTensor(outputDesc, QuantizedVector(qScale, - qOffset, std::vector({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f, - -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}))); + boost::multi_array expectedOutput = + MakeTensor( + outputDesc, + armnnUtils::QuantizedVector( + { + -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f, + -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f + }, + qScale, qOffset)); return LstmNoCifgNoPeepholeNoProjectionTestImpl( workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype); diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp index 82b772e..0f9a30e 100644 --- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp @@ -5,6 +5,8 @@ #include "PadTestImpl.hpp" +#include + #include #include @@ -28,28 +30,27 @@ LayerTestResult Pad2dTestCommon( const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); - std::vector inputValues( - QuantizedVector(qScale, qOffset, - { - // Height (3) x Width (3) - 4, 8, 6, - 7, 4, 4, - 3, 2, 4 - })); + std::vector inputValues = armnnUtils::QuantizedVector( + { + // Height (3) x Width (3) + 4, 8, 6, + 7, 4, 4, + 3, 2, 4 + }, + qScale, qOffset); auto p = customPaddingValue; - std::vector expectedOutputValues; - expectedOutputValues = ( - QuantizedVector(qScale, qOffset, - { - p, p, p, p, p, p, p, - p, p, p, p, p, p, p, - p, p, 4, 8, 6, p, p, - p, p, 7, 4, 4, p, p, - p, p, 3, 2, 4, p, p, - p, p, p, p, p, p, p, - p, p, p, p, p, p, p - })); + std::vector expectedOutputValues = armnnUtils::QuantizedVector( + { + p, p, p, p, p, p, p, + p, p, p, p, p, p, p, + p, p, 4, 8, 6, p, p, + p, p, 7, 4, 4, p, p, + p, p, 3, 2, 4, p, p, + p, p, p, p, p, p, p, + p, p, p, p, p, p, p + }, + qScale, qOffset); auto inputTensor = MakeTensor(inputTensorInfo, std::vector(inputValues)); @@ -100,41 +101,39 @@ LayerTestResult Pad3dTestCommon( const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); - std::vector inputValues( - QuantizedVector(qScale,qOffset, - { - // Channel 0, Height (2) x Width (2) - 0, 4, - 2, 5, - - // Channel 1, Height (2) x Width (2) - 6, 1, - 5, 2 - })); - - std::vector expectedOutputValues( - QuantizedVector(qScale,qOffset, - { - - 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, - 0, 0, 0, 4, 0, 0, - 0, 0, 2, 5, 0, 0, - 0, 0, 0, 0, 0, 0, - - 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, - 0, 0, 6, 1, 0, 0, - 0, 0, 5, 2, 0, 0, - 0, 0, 0, 0, 0, 0, - - 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0 - - })); + std::vector inputValues = armnnUtils::QuantizedVector( + { + // Channel 0, Height (2) x Width (2) + 0, 4, + 2, 5, + + // Channel 1, Height (2) x Width (2) + 6, 1, + 5, 2 + }, + qScale, qOffset); + + std::vector expectedOutputValues = armnnUtils::QuantizedVector( + { + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 4, 0, 0, + 0, 0, 2, 5, 0, 0, + 0, 0, 0, 0, 0, 0, + + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + 0, 0, 6, 1, 0, 0, + 0, 0, 5, 2, 0, 0, + 0, 0, 0, 0, 0, 0, + + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + }, + qScale, qOffset); auto inputTensor = MakeTensor(inputTensorInfo, std::vector(inputValues)); @@ -185,193 +184,193 @@ LayerTestResult Pad4dTestCommon( const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); - std::vector inputValues( - QuantizedVector(qScale,qOffset, - { - // Batch 0, Channel 0, Height (3) x Width (2) - 0, 1, - 2, 3, - 4, 5, - - // Batch 0, Channel 1, Height (3) x Width (2) - 6, 7, - 8, 9, - 10, 11, - - // Batch 1, Channel 0, Height (3) x Width (2) - 12, 13, - 14, 15, - 16, 17, - - // Batch 1, Channel 1, Height (3) x Width (2) - 18, 19, - 20, 21, - 22, 23 - })); - - std::vector expectedOutputValues( - QuantizedVector(qScale,qOffset, - { - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 1, 0, - 0, 2, 3, 0, - 0, 4, 5, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 6, 7, 0, - 0, 8, 9, 0, - 0, 10, 11, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 12, 13, 0, - 0, 14, 15, 0, - 0, 16, 17, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 18, 19, 0, - 0, 20, 21, 0, - 0, 22, 23, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0 - })); + std::vector inputValues = armnnUtils::QuantizedVector( + { + // Batch 0, Channel 0, Height (3) x Width (2) + 0, 1, + 2, 3, + 4, 5, + + // Batch 0, Channel 1, Height (3) x Width (2) + 6, 7, + 8, 9, + 10, 11, + + // Batch 1, Channel 0, Height (3) x Width (2) + 12, 13, + 14, 15, + 16, 17, + + // Batch 1, Channel 1, Height (3) x Width (2) + 18, 19, + 20, 21, + 22, 23 + }, + qScale, qOffset); + + std::vector expectedOutputValues = armnnUtils::QuantizedVector( + { + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 1, 0, + 0, 2, 3, 0, + 0, 4, 5, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 6, 7, 0, + 0, 8, 9, 0, + 0, 10, 11, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 12, 13, 0, + 0, 14, 15, 0, + 0, 16, 17, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 18, 19, 0, + 0, 20, 21, 0, + 0, 22, 23, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0 + }, + qScale, qOffset); auto inputTensor = MakeTensor(inputTensorInfo, std::vector(inputValues)); diff --git a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp index ef48c97..fe0d076 100644 --- a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp @@ -12,7 +12,6 @@ #include #include -#include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp index f250fa5..fcc8980 100644 --- a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp @@ -9,12 +9,12 @@ #include #include +#include #include #include #include -#include #include #include @@ -25,6 +25,8 @@ namespace { +using namespace armnnUtils; + template> LayerTestResult SimplePooling2dTestImpl( armnn::IWorkloadFactory& workloadFactory, @@ -187,7 +189,7 @@ LayerTestResult SimpleMaxPooling2dSize3x3Stride2x4TestCommon( inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end()); std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator); - auto input = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, inputData)); + auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputData, qScale, qOffset)); // These were calculated manually. auto shape(GetTensorShapeAsArray<4>(outputTensorInfo)); @@ -195,7 +197,7 @@ LayerTestResult SimpleMaxPooling2dSize3x3Stride2x4TestCommon( if (forceNoPadding) { outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 8.0f, 8.0f, 8.0f, 9.0f, 7.0f, 9.0f, 9.0f, 9.0f, 9.0f, @@ -211,12 +213,13 @@ LayerTestResult SimpleMaxPooling2dSize3x3Stride2x4TestCommon( 0.0f, 0.0f, -3.0f, -1.0f, 0.0f, 0.0f, -1.0f, -1.0f, -1.0f - })); + }, + qScale, qOffset)); } else { outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f, 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f, 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f, @@ -232,7 +235,8 @@ LayerTestResult SimpleMaxPooling2dSize3x3Stride2x4TestCommon( 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f, 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f - })); + }, + qScale, qOffset)); } return SimplePooling2dTestImpl( @@ -267,7 +271,7 @@ LayerTestResult SimpleMaxPooling2dTestCommon( } std::vector inputData( - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 1.0f, 2.0f, 5.0f, 6.0f, 3.0f, 4.0f, 7.0f, 8.0f, 9.0f, 10.0f, 13.0f, 14.0f, @@ -277,16 +281,18 @@ LayerTestResult SimpleMaxPooling2dTestCommon( 19.0f, 20.0f, 23.0f, 24.0f, 25.0f, 26.0f, 29.0f, 30.0f, 27.0f, 28.0f, 31.0f, 32.0f, - })); + }, + qScale, qOffset)); std::vector outputData( - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 4.0f, 8.0f, 12.0f, 16.0f, 20.0f, 24.0f, 28.0f, 32.0f, - })); + }, + qScale, qOffset)); const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; if (dataLayout == armnn::DataLayout::NHWC) @@ -336,7 +342,7 @@ LayerTestResult SimpleAveragePooling2dTestCommon( } std::vector inputData( - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 2.0f, 2.0f, 6.0f, 6.0f, 4.0f, 4.0f, 8.0f, 8.0f, 10.0f, 12.0f, 14.0f, 16.0f, @@ -346,16 +352,18 @@ LayerTestResult SimpleAveragePooling2dTestCommon( 20.0f, 18.0f, 22.0f, 24.0f, 26.0f, 28.0f, 0.0f, 0.0f, 26.0f, 28.0f, 0.0f, 0.0f, - })); + }, + qScale, qOffset)); std::vector outputData( - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 3.0f, 7.0f, 11.0f, 15.0f, 19.0f, 23.0f, 27.0f, 0.0f, - })); + }, + qScale, qOffset)); const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; if (dataLayout == armnn::DataLayout::NHWC) @@ -447,7 +455,7 @@ LayerTestResult SimpleL2Pooling2dTestCommon( armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType); std::vector inputData( - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 1.0f, 7.0f, 5.0f, 5.0f, 1.0f, 7.0f, 5.0f, 5.0f, 3.0f, 3.0f, 1.0f, 1.0f, @@ -457,16 +465,18 @@ LayerTestResult SimpleL2Pooling2dTestCommon( 1.0f, 7.0f, 2.0f, 0.0f, 0.0f, 2.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, - })); + }, + qScale, qOffset)); std::vector outputData( - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 5.0f, 5.0f, 3.0f, 1.0f, 5.0f, 1.0f, 1.0f, 1.0f, - })); + }, + qScale, qOffset)); const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; if (dataLayout == armnn::DataLayout::NHWC) @@ -503,19 +513,21 @@ LayerTestResult L2Pooling2dSize3Stride1TestCommon( armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType); auto input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, - })); + }, + qScale, qOffset)); armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType); auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 3.0f, 3.0f, 3.0f, 3.0f, - })); + }, + qScale, qOffset)); return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); @@ -536,7 +548,7 @@ LayerTestResult L2Pooling2dSize3Stride3TestCommon( armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType); auto input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, @@ -546,15 +558,17 @@ LayerTestResult L2Pooling2dSize3Stride3TestCommon( 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, - })); + }, + qScale, qOffset)); armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType); auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, 3.0f, - })); + }, + qScale, qOffset)); return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); @@ -575,7 +589,7 @@ LayerTestResult L2Pooling2dSize3Stride4TestCommon( armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType); auto input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f, 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f, 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f, @@ -583,14 +597,16 @@ LayerTestResult L2Pooling2dSize3Stride4TestCommon( 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f, 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f, 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f, - })); + }, + qScale, qOffset)); armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType); auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 3.0f, 3.0f, 3.0f, 3.0f, - })); + }, + qScale, qOffset)); return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); @@ -611,7 +627,7 @@ LayerTestResult L2Pooling2dSize7TestCommon( armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType); auto input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f, @@ -619,13 +635,15 @@ LayerTestResult L2Pooling2dSize7TestCommon( 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, - })); + }, + qScale, qOffset)); armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType); auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 3.0f, - })); + }, + qScale, qOffset)); return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); @@ -646,7 +664,7 @@ LayerTestResult L2Pooling2dSize9TestCommon( armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType); auto input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, @@ -656,13 +674,15 @@ LayerTestResult L2Pooling2dSize9TestCommon( 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, - })); + }, + qScale, qOffset)); armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType); auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 3.0f, - })); + }, + qScale, qOffset)); return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); @@ -693,15 +713,17 @@ LayerTestResult AsymmetricNonSquarePooling2dTestCommon( // Construct input data. auto input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 1.0f, 3.0f, 4.0f, - })); + }, + qScale, qOffset)); // These were calculated manually. auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 0.0f, 3.0f, 0.0f, 3.0f, - })); + }, + qScale, qOffset)); return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); @@ -883,11 +905,11 @@ LayerTestResult SimpleMaxPooling2dSize2x2Stride2x2TestCommon( outputTensorInfo.SetQuantizationOffset(qOffset); } - auto input = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, inputData)); + auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputData, qScale, qOffset)); auto outputExpected = MakeTensor(outputTensorInfo, - forceNoPadding ? QuantizedVector(qScale, qOffset, expectedOutputDataNoPadding) : - QuantizedVector(qScale, qOffset, expectedOutputDataWithPadding)); + forceNoPadding ? QuantizedVector(expectedOutputDataNoPadding, qScale, qOffset) : + QuantizedVector(expectedOutputDataWithPadding, qScale, qOffset)); return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); @@ -961,11 +983,11 @@ LayerTestResult IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon( outputTensorInfo.SetQuantizationOffset(qOffset); } - auto input = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, inputData)); + auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputData, qScale, qOffset)); auto outputExpected = MakeTensor(outputTensorInfo, - forceNoPadding ? QuantizedVector(qScale, qOffset, expectedOutputDataNoPadding) : - QuantizedVector(qScale, qOffset, expectedOutputDataWithPadding)); + forceNoPadding ? QuantizedVector(expectedOutputDataNoPadding, qScale, qOffset) : + QuantizedVector(expectedOutputDataWithPadding, qScale, qOffset)); return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); @@ -1002,19 +1024,21 @@ LayerTestResult IgnorePaddingSimpleMaxPooling2dTestCommon( } auto input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ -1.0f, -2.0f, 3.0f, 4.0f, -1.0f, -2.0f, 3.0f, 4.0f, 1.0f, 2.0f, -3.0f, -4.0f, 1.0f, 2.0f, -3.0f, -4.0f, - })); + }, + qScale, qOffset)); auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ -1.0f, 3.0f, 4.0f, 1.0f, 3.0f, 4.0f, 1.0f, 2.0f, -4.0f, - })); + }, + qScale, qOffset)); return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); @@ -1050,20 +1074,22 @@ LayerTestResult IgnorePaddingMaxPooling2dSize3TestCommon( } auto input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ -1.0f, -2.0f, 3.0f, 4.0f, -1.0f, -2.0f, 3.0f, 4.0f, 1.0f, 2.0f, -3.0f, -4.0f, 1.0f, 2.0f, -3.0f, -4.0f, - })); + }, + qScale, qOffset)); auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ -1.0f, 3.0f, 4.0f, 4.0f, 2.0f, 3.0f, 4.0f, 4.0f, 2.0f, 3.0f, 4.0f, 4.0f, 2.0f, 2.0f, 2.0f, -3.0f, - })); + }, + qScale, qOffset)); return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); @@ -1099,19 +1125,21 @@ LayerTestResult IgnorePaddingSimpleAveragePooling2dTestCommon( } auto input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 12.0f, 20.0f, 32.0f, 40.0f, 12.0f, 20.0f, 32.0f, 40.0f, 12.0f, 20.0f, 32.0f, 40.0f, 12.0f, 20.0f, 32.0f, 40.0f, - })); + }, + qScale, qOffset)); auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 3.0f, 13.0f, 10.0f, 6.0f, 26.0f, 20.0f, 3.0f, 13.0f, 10.0f, - })); + }, + qScale, qOffset)); return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); @@ -1148,18 +1176,20 @@ LayerTestResult IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon( } auto input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, - })); + }, + qScale, qOffset)); auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 2.0f, 3.5f, 2.0f, 3.5f - })); + }, + qScale, qOffset)); return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); @@ -1195,20 +1225,22 @@ LayerTestResult IgnorePaddingAveragePooling2dSize3TestCommon( } auto input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 9.0f, 27.0f, 18.0f, 36.0f, 18.0f, 9.0f, 18.0f, 9.0f, 27.0f, 18.0f, 9.0f, 27.0f, 9.0f, 27.0f, 9.0f, 18.0f, - })); + }, + qScale, qOffset)); auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 7.0f, 11.0f, 13.0f, 9.0f, 12.0f, 17.0f, 19.0f, 13.0f, 12.0f, 16.0f, 16.0f, 10.0f, 9.0f, 11.0f, 12.0f, 7.0f, - })); + }, + qScale, qOffset)); return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); @@ -1244,19 +1276,21 @@ LayerTestResult IgnorePaddingSimpleL2Pooling2dTestCommon( } auto input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 2.0f, 4.0f, 8.0f, 16.0f, 4.0f, 2.0f, 2.0f, 4.0f, 8.0f, 2.0f, 4.0f, 2.0f, 16.0f, 2.0f, 2.0f, 8.0f, - })); + }, + qScale, qOffset)); auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 1.0f, 4.4721f, 8.0f, 4.4721f, 2.6457f, 2.236f, 8.0f, 1.4142f, 4.0f, - })); + }, + qScale, qOffset)); return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); @@ -1292,20 +1326,22 @@ LayerTestResult IgnorePaddingL2Pooling2dSize3TestCommon( } auto input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, - })); + }, + qScale, qOffset)); auto outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(qScale, qOffset, { + QuantizedVector({ 1.0540f, 1.7638f, 2.5385f, 2.3570f, 1.2909f, 2.1602f, 3.1091f, 2.8867f, 1.2909f, 2.1602f, 3.1091f, 2.8867f, 1.0540f, 1.7638f, 2.5385f, 2.3570f, - })); + }, + qScale, qOffset)); return SimplePooling2dTestImpl( workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected); diff --git a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp index 18a5bd0..dc9b908 100644 --- a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp @@ -7,6 +7,7 @@ #include "LayerTestResult.hpp" +#include #include #include @@ -57,18 +58,22 @@ LayerTestResult PreluTest( 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, -1.0f, -2.0f, 0.0f, -2.0f, -4.0f }; - auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); - auto alpha = MakeTensor(alphaTensorInfo, QuantizedVector(alphaTensorInfo.GetQuantizationScale(), - alphaTensorInfo.GetQuantizationOffset(), - alphaData)); + auto input = MakeTensor(inputTensorInfo, + armnnUtils::QuantizedVector(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); + + auto alpha = MakeTensor(alphaTensorInfo, + armnnUtils::QuantizedVector(alphaData, + alphaTensorInfo.GetQuantizationScale(), + alphaTensorInfo.GetQuantizationOffset())); LayerTestResult result(outputTensorInfo); - result.outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputExpectedData)); + result.outputExpected = + MakeTensor(outputTensorInfo, + armnnUtils::QuantizedVector(outputExpectedData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr alphaHandle = workloadFactory.CreateTensorHandle(alphaTensorInfo); diff --git a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp index bb2392f..56ce51a 100644 --- a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp @@ -8,6 +8,7 @@ #include "LayerTestResult.hpp" #include +#include #include #include @@ -76,9 +77,10 @@ LayerTestResult ResizeBilinearNopTest( inputData = tmp; } - auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor(inputTensorInfo, + armnnUtils::QuantizedVector(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult result(outputTensorInfo); result.outputExpected = input; @@ -174,15 +176,16 @@ LayerTestResult SimpleResizeBilinearTest( outputData = tmp1; } - auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor(inputTensorInfo, + armnnUtils::QuantizedVector(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult result(outputTensorInfo); result.outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputData)); + armnnUtils::QuantizedVector(outputData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -278,15 +281,16 @@ LayerTestResult ResizeBilinearSqMinTest( outputData = tmp1; } - auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor(inputTensorInfo, + armnnUtils::QuantizedVector(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult result(outputTensorInfo); result.outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputData)); + armnnUtils::QuantizedVector(outputData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -377,15 +381,16 @@ LayerTestResult ResizeBilinearMinTest( outputData = tmp1; } - auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor(inputTensorInfo, + armnnUtils::QuantizedVector(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult result(outputTensorInfo); result.outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputData)); + armnnUtils::QuantizedVector(outputData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -484,15 +489,16 @@ LayerTestResult ResizeBilinearMagTest( outputData = tmp1; } - auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor(inputTensorInfo, + armnnUtils::QuantizedVector(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult result(outputTensorInfo); result.outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputData)); + armnnUtils::QuantizedVector(outputData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -573,9 +579,10 @@ LayerTestResult ResizeNearestNeighborNopTest( inputData = tmp; } - auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor(inputTensorInfo, + armnnUtils::QuantizedVector(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult result(outputTensorInfo); result.outputExpected = input; @@ -670,15 +677,16 @@ LayerTestResult SimpleResizeNearestNeighborTest( outputData = tmp1; } - auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor(inputTensorInfo, + armnnUtils::QuantizedVector(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult result(outputTensorInfo); result.outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputData)); + armnnUtils::QuantizedVector(outputData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -773,15 +781,16 @@ LayerTestResult ResizeNearestNeighborSqMinTest( outputData = tmp1; } - auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor(inputTensorInfo, + armnnUtils::QuantizedVector(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult result(outputTensorInfo); result.outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputData)); + armnnUtils::QuantizedVector(outputData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -871,15 +880,16 @@ LayerTestResult ResizeNearestNeighborMinTest( outputData = tmp1; } - auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor(inputTensorInfo, + armnnUtils::QuantizedVector(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult result(outputTensorInfo); result.outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputData)); + armnnUtils::QuantizedVector(outputData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -978,15 +988,16 @@ LayerTestResult ResizeNearestNeighborMagTest( outputData = tmp1; } - auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), - inputTensorInfo.GetQuantizationOffset(), - inputData)); + auto input = MakeTensor(inputTensorInfo, + armnnUtils::QuantizedVector(inputData, + inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset())); LayerTestResult result(outputTensorInfo); result.outputExpected = MakeTensor(outputTensorInfo, - QuantizedVector(outputTensorInfo.GetQuantizationScale(), - outputTensorInfo.GetQuantizationOffset(), - outputData)); + armnnUtils::QuantizedVector(outputData, + outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset())); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); diff --git a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp index f0479c8..a60b189 100644 --- a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp @@ -5,6 +5,7 @@ #include "SliceTestImpl.hpp" +#include #include #include @@ -39,11 +40,11 @@ LayerTestResult SliceTestImpl( } boost::multi_array input = - MakeTensor(inputInfo, QuantizedVector(qScale, qOffset, inputData)); + MakeTensor(inputInfo, armnnUtils::QuantizedVector(inputData, qScale, qOffset)); LayerTestResult result(outputInfo); result.outputExpected = - MakeTensor(outputInfo, QuantizedVector(qScale, qOffset, expectedOutputData)); + MakeTensor(outputInfo, armnnUtils::QuantizedVector(expectedOutputData, qScale, qOffset)); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputInfo); diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp index c0b62aa..a5f6477 100644 --- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp @@ -5,13 +5,13 @@ #include "SoftmaxTestImpl.hpp" +#include #include #include #include -#include #include #include @@ -85,8 +85,7 @@ LayerTestResult SimpleSoftmaxBaseTestImpl( LayerTestResult ret(outputTensorInfo); // Each row is independently softmax'd. - auto input = MakeTensor(inputTensorInfo, std::vector( - QuantizedVector(qScale, qOffset, inputData))); + auto input = MakeTensor(inputTensorInfo, armnnUtils::QuantizedVector(inputData, qScale, qOffset)); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); @@ -111,8 +110,7 @@ LayerTestResult SimpleSoftmaxBaseTestImpl( CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get()); - std::vector expectedOutput = std::vector( - QuantizedVector(qScale, qOffset, outputData)); + std::vector expectedOutput = armnnUtils::QuantizedVector(outputData, qScale, qOffset); ret.outputExpected = MakeTensor(outputTensorInfo, expectedOutput); return ret; diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp index 094ed23..f815604 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp @@ -6,6 +6,7 @@ #include "SpaceToBatchNdTestImpl.hpp" #include +#include #include #include @@ -55,10 +56,12 @@ LayerTestResult SpaceToBatchNdTestImpl( outputTensorInfo.SetQuantizationOffset(qOffset); } - boost::multi_array input = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, inputData)); + boost::multi_array input = MakeTensor(inputTensorInfo, + armnnUtils::QuantizedVector(inputData, qScale, qOffset)); LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, outputExpectedData)); + ret.outputExpected = MakeTensor(outputTensorInfo, + armnnUtils::QuantizedVector(outputExpectedData, qScale, qOffset)); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp index 48e157d..0541323 100644 --- a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp @@ -6,6 +6,7 @@ #include "SpaceToDepthTestImpl.hpp" #include +#include #include #include @@ -56,10 +57,12 @@ LayerTestResult SpaceToDepthTestImpl( outputTensorInfo.SetQuantizationOffset(qOffset); } - boost::multi_array input = MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, inputData)); + boost::multi_array input = MakeTensor(inputTensorInfo, + armnnUtils::QuantizedVector(inputData, qScale, qOffset)); LayerTestResult ret(outputTensorInfo); - ret.outputExpected = MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, outputExpectedData)); + ret.outputExpected = MakeTensor(outputTensorInfo, + armnnUtils::QuantizedVector(outputExpectedData, qScale, qOffset)); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp index 1716091..7aebdd0 100644 --- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp @@ -5,11 +5,11 @@ #include "SplitterTestImpl.hpp" +#include #include #include -#include #include #include @@ -80,7 +80,7 @@ std::vector> SplitterTestCommon( LayerTestResult ret4(outputTensorInfo4); auto input = MakeTensor(inputTensorInfo, std::vector( - QuantizedVector(qScale, qOffset, { + armnnUtils::QuantizedVector({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, @@ -101,24 +101,26 @@ std::vector> SplitterTestCommon( 76.0f, 77.0f, 78.0f, 79.0f, 80.0f, 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, - }) + }, + qScale, qOffset) )); // Channel 0 of the original input. ret1.outputExpected = MakeTensor(outputTensorInfo1, std::vector( - QuantizedVector(qScale, qOffset, { + armnnUtils::QuantizedVector({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, - }) + }, + qScale, qOffset) )); // Channel 1 & 2 of the original input. ret2.outputExpected = MakeTensor(outputTensorInfo2, std::vector( - QuantizedVector(qScale, qOffset, { + armnnUtils::QuantizedVector({ 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, @@ -132,31 +134,34 @@ std::vector> SplitterTestCommon( 76.0f, 77.0f, 78.0f, 79.0f, 80.0f, 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, - }) + }, + qScale, qOffset) )); // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input). ret3.outputExpected = MakeTensor(outputTensorInfo3, std::vector( - QuantizedVector(qScale, qOffset, { + armnnUtils::QuantizedVector({ 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0f, - }) + }, + qScale, qOffset) )); // Channel 1 of return 2. ret4.outputExpected = MakeTensor(outputTensorInfo4, std::vector( - QuantizedVector(qScale, qOffset, { + armnnUtils::QuantizedVector({ 61.0f, 62.0f, 63.0f, 64.0f, 65.0f, 66.0f, 67.0f, 68.0f, 69.0f, 70.0f, 71.0f, 72.0f, 73.0f, 74.0f, 75.0f, 76.0f, 77.0f, 78.0f, 79.0f, 80.0f, 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, - }) + }, + qScale, qOffset) )); // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins @@ -253,29 +258,31 @@ LayerTestResult CopyViaSplitterTestImpl( float qScale, int32_t qOffset) { const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType, qScale, qOffset); - auto input = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, - { - 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, - 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, - 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, - 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, - 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, - 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, - - 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, - 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, - 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, - 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, - 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, - 56.0f, 57.0f, 58.0f, 59.0f, 60.0f, - - 61.0f, 62.0f, 63.0f, 64.0f, 65.0f, - 66.0f, 67.0f, 68.0f, 69.0f, 70.0f, - 71.0f, 72.0f, 73.0f, 74.0f, 75.0f, - 76.0f, 77.0f, 78.0f, 79.0f, 80.0f, - 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, - 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, - })); + auto input = MakeTensor( + tensorInfo, + armnnUtils::QuantizedVector({ + 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, + 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, + 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, + 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, + 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, + 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, + + 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, + 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, + 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, + 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, + 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, + 56.0f, 57.0f, 58.0f, 59.0f, 60.0f, + + 61.0f, 62.0f, 63.0f, 64.0f, 65.0f, + 66.0f, 67.0f, 68.0f, 69.0f, 70.0f, + 71.0f, 72.0f, 73.0f, 74.0f, 75.0f, + 76.0f, 77.0f, 78.0f, 79.0f, 80.0f, + 81.0f, 82.0f, 83.0f, 84.0f, 85.0f, + 86.0f, 87.0f, 88.0f, 89.0f, 90.0f, + }, + qScale, qOffset)); std::vector origin = { 0, 0, 0 }; armnn::SplitterQueueDescriptor::ViewOrigin window(origin); diff --git a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp index b32e622..515b5a0 100644 --- a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp +++ b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp @@ -5,6 +5,7 @@ #include "StridedSliceTestImpl.hpp" +#include #include #include @@ -39,11 +40,11 @@ LayerTestResult StridedSliceTestImpl( } boost::multi_array input = - MakeTensor(inputTensorInfo, QuantizedVector(qScale, qOffset, inputData)); + MakeTensor(inputTensorInfo, armnnUtils::QuantizedVector(inputData, qScale, qOffset)); LayerTestResult ret(outputTensorInfo); ret.outputExpected = - MakeTensor(outputTensorInfo, QuantizedVector(qScale, qOffset, outputExpectedData)); + MakeTensor(outputTensorInfo, armnnUtils::QuantizedVector(outputExpectedData, qScale, qOffset)); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp index 7391f9c..a2b477c 100644 --- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp +++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp @@ -8,12 +8,12 @@ #include #include +#include #include #include #include -#include #include #include @@ -146,14 +146,16 @@ LayerTestResult TransposeConvolution2dTest( TensorData input = { inputInfo, - QuantizedVector(inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(), inputData) + armnnUtils::QuantizedVector(inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset()) }; // set up weights TensorData weights = { weightsInfo, - QuantizedVector(weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(), weightsData) + armnnUtils::QuantizedVector(weightsData, + weightsInfo.GetQuantizationScale(), + weightsInfo.GetQuantizationOffset()) }; // set up biases @@ -164,7 +166,9 @@ LayerTestResult TransposeConvolution2dTest( TensorData biases = { biasesInfo, - QuantizedVector(biasesInfo.GetQuantizationScale(), biasesInfo.GetQuantizationOffset(), biasesData) + armnnUtils::QuantizedVector(biasesData, + biasesInfo.GetQuantizationScale(), + biasesInfo.GetQuantizationOffset()) }; optionalBiases = Optional>(biases); @@ -186,9 +190,9 @@ LayerTestResult TransposeConvolution2dTest( LayerTestResult testResult(outputInfo); testResult.output = MakeTensor(outputInfo, output.second); testResult.outputExpected = MakeTensor(outputInfo, - QuantizedVector(outputInfo.GetQuantizationScale(), - outputInfo.GetQuantizationOffset(), - expectedOutputData)); + armnnUtils::QuantizedVector(expectedOutputData, + outputInfo.GetQuantizationScale(), + outputInfo.GetQuantizationOffset())); return testResult; } diff --git a/src/backends/cl/test/Fp16SupportTest.cpp b/src/backends/cl/test/Fp16SupportTest.cpp index dac1ebc..f117c92 100644 --- a/src/backends/cl/test/Fp16SupportTest.cpp +++ b/src/backends/cl/test/Fp16SupportTest.cpp @@ -12,7 +12,6 @@ #include #include #include -#include #include #include diff --git a/src/backends/cl/test/OpenClTimerTest.cpp b/src/backends/cl/test/OpenClTimerTest.cpp index 1eeb9ed..13620c4 100644 --- a/src/backends/cl/test/OpenClTimerTest.cpp +++ b/src/backends/cl/test/OpenClTimerTest.cpp @@ -51,40 +51,27 @@ BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm) const unsigned int height = 3; const unsigned int channels = 2; const unsigned int num = 1; - int32_t qOffset = 0; - float qScale = 0.f; - TensorInfo inputTensorInfo({num, channels, height, width}, DataType::Float32); + TensorInfo inputTensorInfo( {num, channels, height, width}, DataType::Float32); TensorInfo outputTensorInfo({num, channels, height, width}, DataType::Float32); TensorInfo tensorInfo({channels}, DataType::Float32); - // Set quantization parameters if the requested type is a quantized type. - if(IsQuantizedType()) - { - inputTensorInfo.SetQuantizationScale(qScale); - inputTensorInfo.SetQuantizationOffset(qOffset); - outputTensorInfo.SetQuantizationScale(qScale); - outputTensorInfo.SetQuantizationOffset(qOffset); - tensorInfo.SetQuantizationScale(qScale); - tensorInfo.SetQuantizationOffset(qOffset); - } - auto input = MakeTensor(inputTensorInfo, - QuantizedVector(qScale, qOffset, - { - 1.f, 4.f, - 4.f, 2.f, - 1.f, 6.f, - - 1.f, 1.f, - 4.f, 1.f, - -2.f, 4.f - })); + { + 1.f, 4.f, + 4.f, 2.f, + 1.f, 6.f, + + 1.f, 1.f, + 4.f, 1.f, + -2.f, 4.f + }); + // these values are per-channel of the input - auto mean = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {3, -2})); - auto variance = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {4, 9})); - auto beta = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {3, 2})); - auto gamma = MakeTensor(tensorInfo, QuantizedVector(qScale, qOffset, {2, 1})); + auto mean = MakeTensor(tensorInfo, { 3.f, -2.f }); + auto variance = MakeTensor(tensorInfo, { 4.f, 9.f }); + auto beta = MakeTensor(tensorInfo, { 3.f, 2.f }); + auto gamma = MakeTensor(tensorInfo, { 2.f, 1.f }); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); diff --git a/tests/DeepSpeechV1Database.hpp b/tests/DeepSpeechV1Database.hpp index 037c810..182f373 100644 --- a/tests/DeepSpeechV1Database.hpp +++ b/tests/DeepSpeechV1Database.hpp @@ -11,7 +11,6 @@ #include #include -#include #include #include @@ -202,4 +201,3 @@ std::unique_ptr DeepSpeechV1Database::GetTestCaseData( } } // anonymous namespace - diff --git a/tests/MobileNetSsdDatabase.hpp b/tests/MobileNetSsdDatabase.hpp index 349d1ad..1a99ed7 100644 --- a/tests/MobileNetSsdDatabase.hpp +++ b/tests/MobileNetSsdDatabase.hpp @@ -2,24 +2,23 @@ // Copyright © 2017 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // + #pragma once +#include "InferenceTestImage.hpp" #include "ObjectDetectionCommon.hpp" -#include -#include -#include +#include #include -#include #include #include #include +#include #include - -#include "InferenceTestImage.hpp" +#include namespace { @@ -97,7 +96,7 @@ std::unique_ptr MobileNetSsdDatabase::GetTestCaseData( // Get image data as a vector of floats std::vector floatImageData = GetImageDataAsNormalizedFloats(ImageChannelLayout::Rgb, image); - imageData = QuantizedVector(m_Scale, m_Offset, floatImageData); + imageData = armnnUtils::QuantizedVector(floatImageData, m_Scale, m_Offset); } catch (const InferenceTestImageException& e) { -- 2.7.4