IVGCVSW-5383 TfLiteDelegate: Implement Pad and PadV2 operators
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Thu, 17 Dec 2020 12:17:58 +0000 (12:17 +0000)
committerJim Flynn <jim.flynn@arm.com>
Thu, 17 Dec 2020 15:39:37 +0000 (15:39 +0000)
 * Add Pad and PadV2 operators support to Armnn Delegate
 * Add dimension check to CompareOutputData test utility
 * Unit tests

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I9d00eb08f71e791498908fcbdb9de561e1c01aef

delegate/CMakeLists.txt
delegate/TensorFlowLiteDelegateSupport.md
delegate/src/Pad.hpp
delegate/src/test/PadTest.cpp [new file with mode: 0644]
delegate/src/test/PadTestHelper.hpp [new file with mode: 0644]
delegate/src/test/TestUtils.hpp

index 9be2f44..c052be2 100644 (file)
@@ -117,6 +117,8 @@ if(BUILD_UNIT_TESTS)
         src/test/GatherTestHelper.hpp
         src/test/LogicalTest.cpp
         src/test/LogicalTestHelper.hpp
+        src/test/PadTest.cpp
+        src/test/PadTestHelper.hpp
         src/test/Pooling2dTest.cpp
         src/test/Pooling2dTestHelper.hpp
         src/test/QuantizationTest.cpp
index 7531834..e506b85 100644 (file)
@@ -68,6 +68,8 @@ The Arm NN SDK TensorFlow Lite delegate currently supports the following operato
 
 * NOT_EQUAL
 
+* PAD
+
 * QUANTIZE
 
 * RESHAPE
index 2134232..6149819 100644 (file)
@@ -5,8 +5,6 @@
 
 #pragma once
 
-#include <armnn/utility/IgnoreUnused.hpp>
-
 #include <tensorflow/lite/builtin_ops.h>
 #include <tensorflow/lite/c/builtin_op_data.h>
 #include <tensorflow/lite/c/common.h>
@@ -19,15 +17,121 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
                               TfLiteContext* tfLiteContext,
                               TfLiteNode* tfLiteNode,
                               int nodeIndex,
-                              int32_t padOperatorCode)
+                              int32_t tfLitePadOperatorCode)
 {
-    armnn::IgnoreUnused(delegateData,
-                        tfLiteContext,
-                        tfLiteNode,
-                        nodeIndex,
-                        padOperatorCode);
+    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+    switch(tfLitePadOperatorCode)
+    {
+        case kTfLiteBuiltinPad:
+            TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+            break;
+        case kTfLiteBuiltinPadv2:
+            TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
+            break;
+        default:
+            return kTfLiteError;
+    }
+
+    const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+    const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+    const TfLiteTensor& tfLitepaddingTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+
+    if (IsDynamicTensor(tfLiteInputTensor))
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+            tfLitePadOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+    if (IsDynamicTensor(tfLiteOutputTensor))
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+            tfLitePadOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+    const armnn::TensorInfo& paddingTensorInfo = GetTensorInfoForTfLiteTensor(tfLitepaddingTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+    // Get the padding data from the input tensor
+    auto* paddingData = tflite::GetTensorData<int32_t>(&tfLitepaddingTensor);
+
+    size_t step = 2;
+    armnn::PadDescriptor descriptor;
+    for (unsigned int i = 0; i < paddingTensorInfo.GetNumElements() / step; ++i)
+    {
+        descriptor.m_PadList.emplace_back(paddingData[i * step], paddingData[i * step + 1]);
+    }
+
+    if (tfLitePadOperatorCode == kTfLiteBuiltinPad && inputTensorInfo.IsQuantized())
+    {
+        descriptor.m_PadValue = inputTensorInfo.GetQuantizationOffset();
+    }
+    else if (tfLitePadOperatorCode == kTfLiteBuiltinPadv2)
+    {
+        const TfLiteTensor& tfLitepaddingValue = tfLiteTensors[tfLiteNode->inputs->data[2]];
+        armnn::TensorInfo paddingValueTensorInfo = GetTensorInfoForTfLiteTensor(tfLitepaddingValue);
+        if (paddingValueTensorInfo.GetNumElements() != 1)
+        {
+            TF_LITE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnDelegate: Multiple padding value are not supported in operator #%d node #%d: ",
+                tfLitePadOperatorCode, nodeIndex);
+            return kTfLiteError;
+        }
+        // Get the padding value from the input tensor
+        switch (tfLitepaddingValue.type)
+        {
+            case kTfLiteFloat32:
+                descriptor.m_PadValue = tflite::GetTensorData<float>(&tfLitepaddingValue)[0];
+                break;
+            case kTfLiteUInt8:
+                descriptor.m_PadValue = tflite::GetTensorData<uint8>(&tfLitepaddingValue)[0];
+                break;
+            case kTfLiteInt8:
+                descriptor.m_PadValue = tflite::GetTensorData<int8>(&tfLitepaddingValue)[0];
+                break;
+            case kTfLiteInt16:
+                descriptor.m_PadValue = tflite::GetTensorData<int16>(&tfLitepaddingValue)[0];
+                break;
+            default:
+                TF_LITE_MAYBE_KERNEL_LOG(
+                    tfLiteContext,
+                    "TfLiteArmnnDelegate: Padding value datatype is not supported in operator #%d node #%d: ",
+                    tfLitePadOperatorCode, nodeIndex);
+                return kTfLiteError;
+        }
+    }
+
+    if (!delegateData.m_Network)
+    {
+        bool isSupported = false;
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   tfLiteContext,
+                                   IsPadSupported,
+                                   delegateData.m_Backends,
+                                   isSupported,
+                                   inputTensorInfo,
+                                   outputTensorInfo,
+                                   descriptor);
+
+        return isSupported ? kTfLiteOk : kTfLiteError;
+    }
+
+    armnn::IConnectableLayer* padLayer = delegateData.m_Network->AddPadLayer(descriptor);
+    ARMNN_ASSERT(padLayer != nullptr);
+
+    armnn::IOutputSlot& outputSlot = padLayer->GetOutputSlot(0);
+    outputSlot.SetTensorInfo(outputTensorInfo);
 
-    return kTfLiteError;
+    return Connect(padLayer, tfLiteNode, delegateData);
 }
 
 } // namespace armnnDelegate
diff --git a/delegate/src/test/PadTest.cpp b/delegate/src/test/PadTest.cpp
new file mode 100644 (file)
index 0000000..4721b68
--- /dev/null
@@ -0,0 +1,606 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "PadTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void Pad2dTest(std::vector<armnn::BackendId>& backends,
+               tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
+               float pad = 0.0f)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 2, 2, 2 };
+    std::vector<int32_t> outputShape { 3, 5, 6 };
+    std::vector<int32_t> paddingShape { 3, 2 };
+
+    std::vector<float> inputValues = { 0.0f,  4.0f,
+                                       2.0f, -5.0f,
+                                       6.0f,  1.0f,
+                                       5.0f, -2.0f };
+
+    std::vector<float> expectedOutputValues = { pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, 0.0f, 4.0f, pad, pad,
+                                                pad, pad, 2.0f, -5.0f, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, 6.0f, 1.0f, pad, pad,
+                                                pad, pad, 5.0f, -2.0f, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad };
+
+    std::vector<int32_t> paddingDim = { 0, 1, 2, 1, 2, 2 };
+
+    PadTest<float>(padOperatorCode,
+                   ::tflite::TensorType_FLOAT32,
+                   backends,
+                   inputShape,
+                   paddingShape,
+                   outputShape,
+                   inputValues,
+                   paddingDim,
+                   expectedOutputValues,
+                   pad);
+}
+
+void Pad3dTest(std::vector<armnn::BackendId>& backends,
+               tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
+               float pad = 0.0f)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 2, 2, 2 };
+    std::vector<int32_t> outputShape { 3, 5, 6 };
+    std::vector<int32_t> paddingShape { 3, 2 };
+
+    std::vector<float> inputValues = { 0.0f, 4.0f,
+                                       2.0f, 5.0f,
+                                       6.0f, 1.0f,
+                                       5.0f, 2.0f };
+
+    std::vector<float> expectedOutputValues = { pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, 0.0f, 4.0f, pad, pad,
+                                                pad, pad, 2.0f, 5.0f, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, 6.0f, 1.0f, pad, pad,
+                                                pad, pad, 5.0f, 2.0f, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad };
+
+    std::vector<int32_t> paddingDim = { 0, 1, 2, 1, 2, 2 };
+
+    PadTest<float>(padOperatorCode,
+                   ::tflite::TensorType_FLOAT32,
+                   backends,
+                   inputShape,
+                   paddingShape,
+                   outputShape,
+                   inputValues,
+                   paddingDim,
+                   expectedOutputValues,
+                   pad);
+}
+
+void Pad4dTest(std::vector<armnn::BackendId>& backends,
+               tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
+               float pad = 0.0f)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 2, 2, 3, 2 };
+    std::vector<int32_t> outputShape { 4, 5, 7, 4 };
+    std::vector<int32_t> paddingShape { 4, 2 };
+
+    std::vector<float> inputValues = { 0.0f,  1.0f,
+                                       2.0f,  3.0f,
+                                       4.0f,  5.0f,
+
+                                       6.0f,  7.0f,
+                                       8.0f,  9.0f,
+                                       10.0f, 11.0f,
+
+                                       12.0f, 13.0f,
+                                       14.0f, 15.0f,
+                                       16.0f, 17.0f,
+
+                                       18.0f, 19.0f,
+                                       20.0f, 21.0f,
+                                       22.0f, 23.0f };
+
+    std::vector<float> expectedOutputValues = { pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, 0.0f, 1.0f, pad,
+                                                pad, 2.0f, 3.0f, pad,
+                                                pad, 4.0f, 5.0f, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, 6.0f, 7.0f, pad,
+                                                pad, 8.0f, 9.0f, pad,
+                                                pad, 10.0f, 11.0f, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, 12.0f, 13.0f, pad,
+                                                pad, 14.0f, 15.0f, pad,
+                                                pad, 16.0f, 17.0f, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, 18.0f, 19.0f, pad,
+                                                pad, 20.0f, 21.0f, pad,
+                                                pad, 22.0f, 23.0f, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad };
+
+    std::vector<int32_t> paddingDim = { 1, 1, 2, 1, 3, 1, 1, 1 };
+
+    PadTest<float>(padOperatorCode,
+                   ::tflite::TensorType_FLOAT32,
+                   backends,
+                   inputShape,
+                   paddingShape,
+                   outputShape,
+                   inputValues,
+                   paddingDim,
+                   expectedOutputValues,
+                   pad);
+}
+
+void PadInt8Test(std::vector<armnn::BackendId>& backends,
+                 tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
+                 int8_t paddingValue = 0,
+                 int8_t p = 3,
+                 float quantizationScale = -2.0f,
+                 int32_t quantizationOffset = 3)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 2, 2, 2 };
+    std::vector<int32_t> outputShape { 3, 5, 6 };
+    std::vector<int32_t> paddingShape { 3, 2 };
+
+    std::vector<int8_t> inputValues = { 0,  4,
+                                        2, -5,
+                                        6,  1,
+                                        5, -2 };
+
+    std::vector<int8_t> expectedOutputValues = { p, p, p, p, p, p,
+                                                 p, p, p, p, p, p,
+                                                 p, p, 0, 4, p, p,
+                                                 p, p, 2, -5, p, p,
+                                                 p, p, p, p, p, p,
+
+                                                 p, p, p, p, p, p,
+                                                 p, p, p, p, p, p,
+                                                 p, p, 6, 1, p, p,
+                                                 p, p, 5, -2, p, p,
+                                                 p, p, p, p, p, p,
+
+                                                 p, p, p, p, p, p,
+                                                 p, p, p, p, p, p,
+                                                 p, p, p, p, p, p,
+                                                 p, p, p, p, p, p,
+                                                 p, p, p, p, p, p };
+
+    std::vector<int32_t> paddingDim = { 0, 1, 2, 1, 2, 2 };
+
+    PadTest<int8_t>(padOperatorCode,
+                    ::tflite::TensorType_INT8,
+                    backends,
+                    inputShape,
+                    paddingShape,
+                    outputShape,
+                    inputValues,
+                    paddingDim,
+                    expectedOutputValues,
+                    paddingValue,
+                    quantizationScale,
+                    quantizationOffset);
+}
+
+void PadUint8Test(std::vector<armnn::BackendId>& backends,
+                  tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
+                  uint8_t paddingValue = 0,
+                  uint8_t p = 3,
+                  float quantizationScale = -2.0f,
+                  int32_t quantizationOffset = 3)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 2, 2, 2 };
+    std::vector<int32_t> outputShape { 3, 5, 6 };
+    std::vector<int32_t> paddingShape { 3, 2 };
+
+    std::vector<uint8_t> inputValues = { 0, 4,
+                                         2, 5,
+                                         6, 1,
+                                         5, 2 };
+
+    std::vector<uint8_t> expectedOutputValues = { p, p, p, p, p, p,
+                                                  p, p, p, p, p, p,
+                                                  p, p, 0, 4, p, p,
+                                                  p, p, 2, 5, p, p,
+                                                  p, p, p, p, p, p,
+
+                                                  p, p, p, p, p, p,
+                                                  p, p, p, p, p, p,
+                                                  p, p, 6, 1, p, p,
+                                                  p, p, 5, 2, p, p,
+                                                  p, p, p, p, p, p,
+
+                                                  p, p, p, p, p, p,
+                                                  p, p, p, p, p, p,
+                                                  p, p, p, p, p, p,
+                                                  p, p, p, p, p, p,
+                                                  p, p, p, p, p, p };
+
+    std::vector<int32_t> paddingDim = { 0, 1, 2, 1, 2, 2 };
+
+    PadTest<uint8_t>(padOperatorCode,
+                     ::tflite::TensorType_UINT8,
+                     backends,
+                     inputShape,
+                     paddingShape,
+                     outputShape,
+                     inputValues,
+                     paddingDim,
+                     expectedOutputValues,
+                     paddingValue,
+                     quantizationScale,
+                     quantizationOffset);
+}
+
+TEST_SUITE("Pad_CpuRefTests")
+{
+
+TEST_CASE ("Pad2d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    Pad2dTest(backends);
+}
+
+TEST_CASE ("Pad3d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    Pad3dTest(backends);
+}
+
+TEST_CASE ("Pad4d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    Pad4dTest(backends);
+}
+
+TEST_CASE ("Pad_Int8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    PadInt8Test(backends);
+}
+
+TEST_CASE ("Pad_Uint8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    PadUint8Test(backends);
+}
+
+TEST_CASE ("PadV22d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    Pad2dTest(backends, tflite::BuiltinOperator_PADV2, -2.5);
+}
+
+TEST_CASE ("PadV23d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    Pad3dTest(backends, tflite::BuiltinOperator_PADV2, 2.0);
+}
+
+TEST_CASE ("PadV24d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    Pad4dTest(backends, tflite::BuiltinOperator_PADV2, -1.33);
+}
+
+TEST_CASE ("PadV2_Int8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    PadInt8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
+}
+
+TEST_CASE ("PadV2_Uint8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    PadUint8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
+}
+
+} // TEST_SUITE("Pad_CpuRefTests")
+
+TEST_SUITE("Pad_CpuAccTests")
+{
+
+TEST_CASE ("Pad2d_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    Pad2dTest(backends);
+}
+
+TEST_CASE ("Pad3d_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    Pad3dTest(backends);
+}
+
+TEST_CASE ("Pad4d_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    Pad4dTest(backends);
+}
+
+TEST_CASE ("Pad_Int8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    PadInt8Test(backends);
+}
+
+TEST_CASE ("Pad_Uint8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    PadUint8Test(backends);
+}
+
+TEST_CASE ("PadV22d_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    Pad2dTest(backends, tflite::BuiltinOperator_PADV2, -2.5);
+}
+
+TEST_CASE ("PadV23d_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    Pad3dTest(backends, tflite::BuiltinOperator_PADV2, 2.0);
+}
+
+TEST_CASE ("PadV24d_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    Pad4dTest(backends, tflite::BuiltinOperator_PADV2, -1.33);
+}
+
+TEST_CASE ("PadV2_Int8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    PadInt8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
+}
+
+TEST_CASE ("PadV2_Uint8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    PadUint8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
+}
+
+} // TEST_SUITE("Pad_CpuAccTests")
+
+TEST_SUITE("Pad_GpuAccTests")
+{
+
+TEST_CASE ("Pad2d_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    Pad2dTest(backends);
+}
+
+TEST_CASE ("Pad3d_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    Pad3dTest(backends);
+}
+
+TEST_CASE ("Pad4d_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    Pad4dTest(backends);
+}
+
+TEST_CASE ("Pad_Int8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    PadInt8Test(backends);
+}
+
+TEST_CASE ("Pad_Uint8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    PadUint8Test(backends);
+}
+
+TEST_CASE ("PadV22d_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    Pad2dTest(backends, tflite::BuiltinOperator_PADV2, -2.5);
+}
+
+TEST_CASE ("PadV23d_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    Pad3dTest(backends, tflite::BuiltinOperator_PADV2, 2.0);
+}
+
+TEST_CASE ("PadV24d_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    Pad4dTest(backends, tflite::BuiltinOperator_PADV2, -1.33);
+}
+
+TEST_CASE ("PadV2_Int8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    PadInt8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
+}
+
+TEST_CASE ("PadV2_Uint8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    PadUint8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
+}
+
+} // TEST_SUITE("Pad_GpuAccTests")
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/src/test/PadTestHelper.hpp b/delegate/src/test/PadTestHelper.hpp
new file mode 100644 (file)
index 0000000..7221ded
--- /dev/null
@@ -0,0 +1,214 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+template <typename T>
+std::vector<char> CreatePadTfLiteModel(
+    tflite::BuiltinOperator padOperatorCode,
+    tflite::TensorType tensorType,
+    const std::vector<int32_t>& inputTensorShape,
+    const std::vector<int32_t>& paddingTensorShape,
+    const std::vector<int32_t>& outputTensorShape,
+    const std::vector<int32_t>& paddingDim,
+    const std::vector<T> paddingValue,
+    float quantScale = 1.0f,
+    int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    auto inputTensor = CreateTensor(flatBufferBuilder,
+                                    flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                            inputTensorShape.size()),
+                                    tensorType,
+                                    0,
+                                    flatBufferBuilder.CreateString("input"),
+                                    quantizationParameters);
+
+    auto paddingTensor = CreateTensor(flatBufferBuilder,
+                                      flatBufferBuilder.CreateVector<int32_t>(paddingTensorShape.data(),
+                                                                              paddingTensorShape.size()),
+                                      tflite::TensorType_INT32,
+                                      1,
+                                      flatBufferBuilder.CreateString("padding"));
+
+    auto outputTensor = CreateTensor(flatBufferBuilder,
+                                     flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                             outputTensorShape.size()),
+                                     tensorType,
+                                     2,
+                                     flatBufferBuilder.CreateString("output"),
+                                     quantizationParameters);
+
+    std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, paddingTensor, outputTensor};
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(paddingDim.data()),
+                                                     sizeof(int32_t) * paddingDim.size())));
+    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+
+    std::vector<int32_t> operatorInputs;
+    std::vector<int> subgraphInputs;
+
+    tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_PadOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions;
+
+    if (padOperatorCode == tflite::BuiltinOperator_PAD)
+    {
+        operatorInputs = {{ 0, 1 }};
+        subgraphInputs = {{ 0, 1 }};
+        operatorBuiltinOptions = CreatePadOptions(flatBufferBuilder).Union();
+
+    }
+    else if (padOperatorCode == tflite::BuiltinOperator_PADV2)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(paddingValue.data()),
+                                                        sizeof(T))));
+
+        const std::vector<int32_t> shape = { 1 };
+        auto padValueTensor = CreateTensor(flatBufferBuilder,
+                                           flatBufferBuilder.CreateVector<int32_t>(shape.data(),
+                                                                                   shape.size()),
+                                           tensorType,
+                                           3,
+                                           flatBufferBuilder.CreateString("paddingValue"),
+                                           quantizationParameters);
+
+        tensors.push_back(padValueTensor);
+
+        operatorInputs = {{ 0, 1, 3 }};
+        subgraphInputs = {{ 0, 1, 3 }};
+
+        operatorBuiltinOptionsType = BuiltinOptions_PadV2Options;
+        operatorBuiltinOptions = CreatePadV2Options(flatBufferBuilder).Union();
+    }
+
+    // create operator
+    const std::vector<int32_t> operatorOutputs{{ 2 }};
+    flatbuffers::Offset <Operator> redefineOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOptions);
+
+    const std::vector<int> subgraphOutputs{{ 2 }};
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&redefineOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Pad Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
+                                                                         padOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void PadTest(tflite::BuiltinOperator padOperatorCode,
+             tflite::TensorType tensorType,
+             const std::vector<armnn::BackendId>& backends,
+             const std::vector<int32_t>& inputShape,
+             const std::vector<int32_t>& paddingShape,
+             std::vector<int32_t>& outputShape,
+             std::vector<T>& inputValues,
+             std::vector<int32_t>& paddingDim,
+             std::vector<T>& expectedOutputValues,
+             T paddingValue,
+             float quantScale = 1.0f,
+             int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreatePadTfLiteModel<T>(padOperatorCode,
+                                                            tensorType,
+                                                            inputShape,
+                                                            paddingShape,
+                                                            outputShape,
+                                                            paddingDim,
+                                                            {paddingValue},
+                                                            quantScale,
+                                                            quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    CHECK(tfLiteModel != nullptr);
+
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+          (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+          (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
+    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
+
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+}
+
+} // anonymous namespace
\ No newline at end of file
index 284eaa7..b165920 100644 (file)
@@ -61,6 +61,9 @@ void CompareOutputData(std::unique_ptr<tflite::Interpreter>& tfLiteInterpreter,
     auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId);
     auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
 
+    CHECK(expectedOutputShape.size() == tfLiteDelegateOutputTensor->dims->size);
+    CHECK(expectedOutputShape.size() == armnnDelegateOutputTensor->dims->size);
+
     for (size_t i = 0; i < expectedOutputShape.size(); i++)
     {
         CHECK(expectedOutputShape[i] == armnnDelegateOutputTensor->dims->data[i]);