IVGCVSW-2144: Adding TensorUtils class
authorNina Drozd <nina.drozd@arm.com>
Mon, 19 Nov 2018 13:03:36 +0000 (13:03 +0000)
committerAron Virginas-Tar <aron.virginas-tar@arm.com>
Tue, 20 Nov 2018 13:04:34 +0000 (13:04 +0000)
* helper methods for creating TensorShape and TensorInfo objects

Change-Id: I371fc7aea08ca6bbb9c205a143ce36e8353a1c48

Android.mk
CMakeLists.txt
src/armnn/test/TensorHelpers.hpp
src/armnnUtils/TensorUtils.cpp [new file with mode: 0644]
src/armnnUtils/TensorUtils.hpp [new file with mode: 0644]
src/backends/backendsCommon/test/Conv2dTestImpl.hpp
src/backends/backendsCommon/test/LayerTests.cpp
src/backends/backendsCommon/test/Pooling2dTestImpl.hpp

index af585af..988d6f0 100644 (file)
@@ -76,6 +76,7 @@ LOCAL_SRC_FILES := \
         src/armnnUtils/Logging.cpp \
         src/armnnUtils/Permute.cpp \
         src/armnnUtils/ParserHelper.cpp \
+        src/armnnUtils/TensorUtils.cpp \
         src/armnn/layers/ActivationLayer.cpp \
         src/armnn/layers/AdditionLayer.cpp \
         src/armnn/layers/ArithmeticBaseLayer.cpp \
index 4f13496..c6af71b 100644 (file)
@@ -46,6 +46,8 @@ list(APPEND armnnUtils_sources
     src/armnnUtils/VerificationHelpers.cpp
     src/armnnUtils/ParserHelper.hpp
     src/armnnUtils/ParserHelper.cpp
+    src/armnnUtils/TensorUtils.hpp
+    src/armnnUtils/TensorUtils.cpp
     )
 if(BUILD_TF_PARSER OR BUILD_CAFFE_PARSER)
     list(APPEND armnnUtils_sources
index 238232a..06818d3 100644 (file)
@@ -210,22 +210,3 @@ boost::multi_array<T, n> MakeRandomTensor(const armnn::TensorInfo& tensorInfo,
     int32_t qOffset = tensorInfo.GetQuantizationOffset();
     return MakeTensor<T, n>(tensorInfo, QuantizedVector<T>(qScale, qOffset, init));
 }
-
-template<typename T>
-armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches,
-                                unsigned int numberOfChannels,
-                                unsigned int height,
-                                unsigned int width,
-                                const armnn::DataLayout dataLayout)
-{
-    switch (dataLayout)
-    {
-        case armnn::DataLayout::NCHW:
-            return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, armnn::GetDataType<T>());
-        case armnn::DataLayout::NHWC:
-            return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, armnn::GetDataType<T>());
-        default:
-            throw armnn::InvalidArgumentException("unknown data layout ["
-                                                  + std::to_string(static_cast<int>(dataLayout)) + "]");
-    }
-}
diff --git a/src/armnnUtils/TensorUtils.cpp b/src/armnnUtils/TensorUtils.cpp
new file mode 100644 (file)
index 0000000..2c25eec
--- /dev/null
@@ -0,0 +1,31 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TensorUtils.hpp"
+
+namespace armnnUtils
+{
+
+armnn::TensorShape GetTensorShape(unsigned int numberOfBatches,
+                                  unsigned int numberOfChannels,
+                                  unsigned int height,
+                                  unsigned int width,
+                                  const armnn::DataLayout dataLayout)
+{
+    switch (dataLayout)
+    {
+        case armnn::DataLayout::NCHW:
+            return armnn::TensorShape({numberOfBatches, numberOfChannels, height, width});
+        case armnn::DataLayout::NHWC:
+            return armnn::TensorShape({numberOfBatches, height, width, numberOfChannels});
+        default:
+            throw armnn::InvalidArgumentException("Unknown data layout ["
+                                                  + std::to_string(static_cast<int>(dataLayout)) +
+                                                  "]", CHECK_LOCATION());
+    }
+}
+
+}
+
diff --git a/src/armnnUtils/TensorUtils.hpp b/src/armnnUtils/TensorUtils.hpp
new file mode 100644 (file)
index 0000000..6461b37
--- /dev/null
@@ -0,0 +1,37 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/TypesUtils.hpp>
+
+namespace armnnUtils
+{
+armnn::TensorShape GetTensorShape(unsigned int numberOfBatches,
+                                  unsigned int numberOfChannels,
+                                  unsigned int height,
+                                  unsigned int width,
+                                  const armnn::DataLayout dataLayout);
+
+template<typename T>
+armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches,
+                                unsigned int numberOfChannels,
+                                unsigned int height,
+                                unsigned int width,
+                                const armnn::DataLayout dataLayout)
+{
+    switch (dataLayout)
+    {
+        case armnn::DataLayout::NCHW:
+            return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, armnn::GetDataType<T>());
+        case armnn::DataLayout::NHWC:
+            return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, armnn::GetDataType<T>());
+        default:
+            throw armnn::InvalidArgumentException("Unknown data layout ["
+                                                  + std::to_string(static_cast<int>(dataLayout)) +
+                                                  "]", CHECK_LOCATION());
+    }
+}
+} // namespace armnnUtils
\ No newline at end of file
index 6685a8e..d137c80 100755 (executable)
@@ -5,6 +5,7 @@
 #pragma once
 
 #include "WorkloadTestUtils.hpp"
+#include "TensorUtils.hpp"
 
 #include <string>
 #include <armnn/ArmNN.hpp>
@@ -108,10 +109,12 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
 
 
     // Note these tensors will use two (identical) batches.
-    armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(2*inputNum, inputChannels, inputHeight, inputWidth, layout);
-    armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(
-            2*outputNum, outputChannels, outputHeight, outputWidth, layout);
-    armnn::TensorInfo kernelDesc = GetTensorInfo<T>(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout);
+    armnn::TensorInfo inputTensorInfo =
+            armnnUtils::GetTensorInfo<T>(2*inputNum, inputChannels, inputHeight, inputWidth, layout);
+    armnn::TensorInfo outputTensorInfo =
+            armnnUtils::GetTensorInfo<T>(2*outputNum, outputChannels, outputHeight, outputWidth, layout);
+    armnn::TensorInfo kernelDesc =
+            armnnUtils::GetTensorInfo<T>(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout);
     armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
 
     // Set quantization parameters if the requested type is a quantized type.
@@ -354,9 +357,12 @@ LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
     BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
 
     // Creates the tensors.
-    armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
-    armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
-    armnn::TensorInfo kernelDesc = GetTensorInfo<T>(kernelChanMul, kernelChannels, kernelHeight, kernelWidth, layout);
+    armnn::TensorInfo inputTensorInfo =
+            armnnUtils::GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
+    armnn::TensorInfo outputTensorInfo =
+            armnnUtils::GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
+    armnn::TensorInfo kernelDesc =
+            armnnUtils::GetTensorInfo<T>(kernelChanMul, kernelChannels, kernelHeight, kernelWidth, layout);
     armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
 
     // Set quantization parameters if the requested type is a quantized type.
@@ -483,9 +489,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
     unsigned int outputChannels = kernelChannels;
     unsigned int outputNum = inputNum;
 
-    armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
-    armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
-    armnn::TensorInfo kernelDesc = GetTensorInfo<T>(1, outputChannels, kernelHeight, kernelWidth, layout);
+    armnn::TensorInfo inputTensorInfo =
+            armnnUtils::GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
+    armnn::TensorInfo outputTensorInfo =
+            armnnUtils::GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
+    armnn::TensorInfo kernelDesc = armnnUtils::GetTensorInfo<T>(1, outputChannels, kernelHeight, kernelWidth, layout);
     armnn::TensorInfo biasDesc({ outputChannels }, armnn::GetDataType<B>());
 
     // Set quantization parameters if the requested type is a quantized type.
@@ -629,11 +637,11 @@ LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
     unsigned int outputChannels  = inputChannels * depthMultiplier;
     unsigned int outputBatchSize = inputBatchSize;
 
-    armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(
+    armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(
             inputBatchSize, inputChannels, inputHeight, inputWidth, layout);
-    armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(
+    armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(
             outputBatchSize, outputChannels, outputHeight, outputWidth, layout);
-    armnn::TensorInfo kernelDesc = GetTensorInfo<T>(
+    armnn::TensorInfo kernelDesc = armnnUtils::GetTensorInfo<T>(
             depthMultiplier, inputChannels, kernelHeight, kernelWidth, layout);
     armnn::TensorInfo biasDesc({outputChannels}, armnn::GetDataType<B>());
 
index ecd09ca..f10d14e 100755 (executable)
@@ -4,6 +4,7 @@
 //
 #include "LayerTests.hpp"
 #include "WorkloadTestUtils.hpp"
+#include "TensorUtils.hpp"
 
 #include "test/TensorHelpers.hpp"
 #include "TensorCopyUtils.hpp"
@@ -68,24 +69,6 @@ static std::vector<float> ConvInput3x8x16({
 // 2-channel bias used by a number of Conv2d tests.
 static std::vector<float> Bias2({0, 2});
 
-armnn::TensorShape GetTestTensorShape(unsigned int numberOfBatches,
-                                      unsigned int numberOfChannels,
-                                      unsigned int height,
-                                      unsigned int width,
-                                      const armnn::DataLayoutIndexed& dataLayout)
-{
-    switch (dataLayout.GetDataLayout())
-    {
-        case armnn::DataLayout::NCHW:
-            return armnn::TensorShape({numberOfBatches, numberOfChannels, height, width});
-        case armnn::DataLayout::NHWC:
-            return armnn::TensorShape({numberOfBatches, height, width, numberOfChannels});
-        default:
-            throw armnn::InvalidArgumentException("unknown data layout ["
-                                                  + std::to_string(static_cast<int>(dataLayout.GetDataLayout())) + "]");
-    }
-}
-
 // Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
 template<typename T>
 boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
@@ -3859,8 +3842,8 @@ LayerTestResult<float, 4> ResizeBilinearNopTest(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout)
 {
-    const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
-    const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+    const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+    const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
 
     std::vector<float> inputData({
         1.0f, 2.0f, 3.0f, 4.0f,
@@ -3913,8 +3896,8 @@ LayerTestResult<float, 4> SimpleResizeBilinearTest(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout)
 {
-    const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
-    const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
+    const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
+    const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
 
     std::vector<float> inputData({
           1.0f, 255.0f,
@@ -3979,8 +3962,8 @@ LayerTestResult<float, 4> ResizeBilinearSqMinTest(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout)
 {
-    const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
-    const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
+    const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+    const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
 
     std::vector<float> inputData({
         1.0f, 2.0f, 3.0f, 4.0f,
@@ -4045,8 +4028,8 @@ LayerTestResult<float, 4> ResizeBilinearMinTest(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout)
 {
-    const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
-    const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
+    const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
+    const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
 
     std::vector<float> inputData({
           1.0f,   2.0f,   3.0f,   5.0f,   8.0f,
@@ -4109,8 +4092,8 @@ LayerTestResult<float, 4> ResizeBilinearMagTest(
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     const armnn::DataLayout dataLayout)
 {
-    const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
-    const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
+    const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
+    const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
 
     std::vector<float> inputData({
           1.0f,   2.0f,
@@ -4741,7 +4724,7 @@ LayerTestResult<float, 4> L2Normalization1dTest(
     unsigned int width = 1;
 
 
-    const armnn::TensorShape inputOutputShape = GetTestTensorShape(
+    const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
             numberOfBatches, numberOfChannels, height, width, layout);
     std::vector<float> inputValues
     {
@@ -4810,7 +4793,7 @@ LayerTestResult<float, 4> L2Normalization2dTest(
     unsigned int height = 1;
     unsigned int width = 5;
 
-    const armnn::TensorShape inputOutputShape = GetTestTensorShape(
+    const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
             numberOfBatches, numberOfChannels, height, width, layout);
     std::vector<float> inputValues
     {
@@ -4855,7 +4838,7 @@ LayerTestResult<float, 4> L2Normalization3dTest(
     unsigned int height = 4;
     unsigned int width = 3;
 
-    const armnn::TensorShape inputOutputShape = GetTestTensorShape(
+    const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
             numberOfBatches, numberOfChannels, height, width, layout);
     std::vector<float> inputValues
     {
@@ -4920,7 +4903,7 @@ LayerTestResult<float, 4> L2Normalization4dTest(
     unsigned int height = 4;
     unsigned int width = 3;
 
-    const armnn::TensorShape inputOutputShape = GetTestTensorShape(
+    const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
             numberOfBatches, numberOfChannels, height, width, layout);
     std::vector<float> inputValues
     {
index 9050fc6..0f33ac0 100644 (file)
@@ -5,6 +5,7 @@
 #pragma once
 
 #include "WorkloadTestUtils.hpp"
+#include "TensorUtils.hpp"
 
 #include "QuantizeHelper.hpp"
 
@@ -50,10 +51,10 @@ LayerTestResult<T, 4> SimplePooling2dTestImpl(
     unsigned int outputChannels  = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
     unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
 
-    armnn::TensorInfo inputTensorInfo GetTensorInfo<T>(inputBatchSize, inputChannels, inputHeight,
-                                                         inputWidth, dataLayout);
-    armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(outputBatchSize, outputChannels, outputHeight,
-                                                          outputWidth, dataLayout);
+    armnn::TensorInfo inputTensorInfo  = armnnUtils::GetTensorInfo<T>(inputBatchSize, inputChannels, inputHeight,
+                                                                      inputWidth, dataLayout);
+    armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(outputBatchSize, outputChannels, outputHeight,
+                                                                      outputWidth, dataLayout);
 
     // Set quantization parameters if the requested type is a quantized type.
     if(armnn::IsQuantizedType<T>())
@@ -252,8 +253,8 @@ LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
     descriptor.m_DataLayout = dataLayout;
 
-    armnn::TensorInfo inputTensorInfo  = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
-    armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+    armnn::TensorInfo inputTensorInfo  = armnnUtils::GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
+    armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
 
     // Set quantization parameters if the requested type is a quantized type.
     if(armnn::IsQuantizedType<T>())
@@ -321,8 +322,8 @@ LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
     descriptor.m_DataLayout = dataLayout;
 
-    armnn::TensorInfo inputTensorInfo  = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
-    armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+    armnn::TensorInfo inputTensorInfo  = armnnUtils::GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
+    armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
 
     // Set quantization parameters if the requested type is a quantized type.
     if(armnn::IsQuantizedType<T>())
@@ -441,8 +442,8 @@ LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
     descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
     descriptor.m_DataLayout = dataLayout;
 
-    armnn::TensorInfo inputTensorInfo  = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
-    armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+    armnn::TensorInfo inputTensorInfo  = armnnUtils::GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
+    armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
 
     std::vector<T> inputData(
         QuantizedVector<T>(qScale, qOffset, {