IVGCVSW-5387 TfLiteDelegate: Implement the Pooling operators
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Mon, 9 Nov 2020 18:42:11 +0000 (18:42 +0000)
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Tue, 10 Nov 2020 17:00:23 +0000 (17:00 +0000)
 * Implement MaxPool2d operators
 * Add QAsymmS8 to armnn delegate
 * Unit tests

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I1815ade6ccda3e9331bd3a68e164be0f6947e9df

delegate/CMakeLists.txt
delegate/src/DelegateUtils.hpp
delegate/src/Pooling.hpp
delegate/src/test/Pooling2dTest.cpp [new file with mode: 0644]
delegate/src/test/Pooling2dTestHelper.hpp [new file with mode: 0644]

index 0a1a3e4..e05a0ba 100644 (file)
@@ -95,6 +95,8 @@ list(APPEND armnnDelegate_unittest_sources
         src/test/ElementwiseBinaryTestHelper.hpp
         src/test/ElementwiseUnaryTest.cpp
         src/test/ElementwiseUnaryTestHelper.hpp
+        src/test/Pooling2dTest.cpp
+        src/test/Pooling2dTestHelper.hpp
         src/test/QuantizationTest.cpp
         src/test/QuantizationTestHelper.hpp)
 
@@ -159,4 +161,4 @@ export(
         FILE        ${CMAKE_CURRENT_BINARY_DIR}/ArmnnDelegateTargets.cmake
         NAMESPACE   ArmnnDelegate::)
 
-####################################################
\ No newline at end of file
+####################################################
index f12aee9..729a8b4 100644 (file)
@@ -317,7 +317,14 @@ armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor)
             type = armnn::DataType::QAsymmU8;
             break;
         case kTfLiteInt8:
-            type = armnn::DataType::QSymmS8;
+            if (tfLiteTensor.params.zero_point == 0)
+            {
+                type = armnn::DataType::QSymmS8;
+            }
+            else
+            {
+                type = armnn::DataType::QAsymmS8;
+            }
             break;
         case kTfLiteInt16:
             type = armnn::DataType::QSymmS16;
index ff3d668..28e26f6 100644 (file)
 namespace armnnDelegate
 {
 
+void CalcPadding(uint32_t inputSize,
+                 uint32_t filterSize,
+                 uint32_t stride,
+                 uint32_t dilation,
+                 uint32_t& paddingFront,
+                 uint32_t& paddingBack,
+                 TfLitePadding padding)
+{
+    paddingFront = 0;
+    paddingBack = 0;
+    if (padding == kTfLitePaddingSame)
+    {
+        uint32_t outputSize = (inputSize + stride - 1) / stride;
+        uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
+        uint32_t temp = (outputSize - 1) * stride + dilatedSize;
+        if (temp > inputSize)
+        {
+            paddingFront = (temp - inputSize) / 2;
+            paddingBack = (temp - inputSize) - paddingFront;
+        }
+    }
+}
+
 TfLiteStatus VisitPoolingOperator(DelegateData& delegateData,
                                   TfLiteContext* tfLiteContext,
                                   TfLiteNode* tfLiteNode,
                                   int nodeIndex,
-                                  int32_t poolingOperatorCode)
+                                  int32_t tfLitePoolingOperatorCode)
 {
-    return kTfLiteError;
+    TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+    const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+    const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+    if (IsDynamicTensor(tfLiteInputTensor))
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+            tfLitePoolingOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+    if (IsDynamicTensor(tfLiteOutputTensor))
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+            tfLitePoolingOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+    armnn::PoolingAlgorithm poolingAlgorithm;
+    switch(tfLitePoolingOperatorCode)
+    {
+        case kTfLiteBuiltinMaxPool2d:
+            poolingAlgorithm = armnn::PoolingAlgorithm::Max;
+            break;
+        default:
+            return kTfLiteError;
+    }
+
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = poolingAlgorithm;
+
+    auto* params = reinterpret_cast<TfLitePoolParams*>(tfLiteNode->builtin_data);
+    descriptor.m_PoolWidth = params->filter_width;
+    descriptor.m_PoolHeight = params->filter_height;
+    descriptor.m_StrideX = params->stride_width;
+    descriptor.m_StrideY = params->stride_height;
+    descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+    unsigned int inputHeight = inputTensorInfo.GetShape()[1];
+    unsigned int inputWidth  = inputTensorInfo.GetShape()[2];
+
+    CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u,
+                descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
+    CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u,
+                descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   tfLiteContext,
+                                   IsPooling2dSupported,
+                                   delegateData.m_Backends,
+                                   isSupported,
+                                   inputTensorInfo,
+                                   outputTensorInfo,
+                                   descriptor);
+    };
+
+    if (!delegateData.m_Network)
+    {
+        validateFunc(outputTensorInfo, isSupported);
+        return isSupported ? kTfLiteOk : kTfLiteError;
+    }
+
+    armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor);
+    ARMNN_ASSERT(poolingLayer != nullptr);
+
+    armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
+    outputSlot.SetTensorInfo(outputTensorInfo);
+    Connect(poolingLayer, tfLiteNode, delegateData);
+
+    // Check activation
+    TfLiteFusedActivation activationType = params->activation;
+    return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
 }
 
 } // namespace armnnDelegate
diff --git a/delegate/src/test/Pooling2dTest.cpp b/delegate/src/test/Pooling2dTest.cpp
new file mode 100644 (file)
index 0000000..3671b0b
--- /dev/null
@@ -0,0 +1,361 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Pooling2dTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void MaxPool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 1, 2, 1 };
+
+    std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                       8.0f, 12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, -11.0f };
+
+    std::vector<float> expectedOutputValues = { 12.0f, 7.0f };
+
+    Pooling2dTest<float>(tflite::BuiltinOperator_MAX_POOL_2D,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         ::tflite::Padding_VALID,
+                         2,
+                         2,
+                         2,
+                         2);
+}
+
+void MaxPool2dInt8PaddingValidTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 1, 2, 1 };
+
+    std::vector<int8_t > inputValues = { -5, 8, -10, 7,
+                                         8, 12, -15, 2,
+                                         3, -4, -1, -11 };
+
+    std::vector<int8_t> expectedOutputValues = { 12, 7 };
+
+    Pooling2dTest<int8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
+                          ::tflite::TensorType_INT8,
+                          backends,
+                          inputShape,
+                          outputShape,
+                          inputValues,
+                          expectedOutputValues,
+                          ::tflite::Padding_VALID,
+                          2,
+                          2,
+                          2,
+                          2,
+                          tflite::ActivationFunctionType_NONE,
+                          2.5f,
+                          1);
+}
+
+void MaxPool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                       8.0f, 12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, -11.0f };
+
+    std::vector<float> expectedOutputValues = { 12.0f, 7.0f, 3.0f, -1.0f };
+
+    Pooling2dTest<float>(tflite::BuiltinOperator_MAX_POOL_2D,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         ::tflite::Padding_SAME,
+                         2,
+                         2,
+                         2,
+                         2);
+}
+
+void MaxPool2dInt8PaddingSameTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<int8_t> inputValues = { -5, 8, -10, 7,
+                                        8, 12, -15, 2,
+                                        3, -4, -1, -11 };
+
+    std::vector<int8_t> expectedOutputValues = { 12, 7, 3, -1 };
+
+    Pooling2dTest<int8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
+                          ::tflite::TensorType_INT8,
+                          backends,
+                          inputShape,
+                          outputShape,
+                          inputValues,
+                          expectedOutputValues,
+                          ::tflite::Padding_SAME,
+                          2,
+                          2,
+                          2,
+                          2,
+                          tflite::ActivationFunctionType_NONE,
+                          2.5f,
+                          1);
+}
+
+void MaxPool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+    std::vector<float> inputValues = { -5.0f, -8.0f, -10.0f, 7.0f,
+                                       -8.0f, -12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, -11.0f };
+
+    std::vector<float> expectedOutputValues = { 0.0f, 0.0f, 7.0f, 3.0f, 0.0f, 2.0f };
+
+    Pooling2dTest<float>(tflite::BuiltinOperator_MAX_POOL_2D,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         ::tflite::Padding_VALID,
+                         1,
+                         1,
+                         2,
+                         2,
+                         ::tflite::ActivationFunctionType_RELU);
+}
+
+void MaxPool2dInt8ReluTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+    std::vector<int8_t> inputValues = { -5, -8, -10, 7,
+                                        -8, -12, -15, 2,
+                                        3, -4, -1, -11 };
+
+    std::vector<int8_t> expectedOutputValues = { 1, 1, 7, 3, 1, 2 };
+
+    Pooling2dTest<int8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
+                          ::tflite::TensorType_INT8,
+                          backends,
+                          inputShape,
+                          outputShape,
+                          inputValues,
+                          expectedOutputValues,
+                          ::tflite::Padding_VALID,
+                          1,
+                          1,
+                          2,
+                          2,
+                          ::tflite::ActivationFunctionType_RELU,
+                          2.0f,
+                          1);
+}
+
+void MaxPool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<float> inputValues = { -5.0f, -8.0f, -10.0f, 7.0f,
+                                       -8.0f, -12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, -11.0f };
+
+    std::vector<float> expectedOutputValues = { 0.0f, 0.0f, 3.0f, 0.0f };
+
+    Pooling2dTest<float>(tflite::BuiltinOperator_MAX_POOL_2D,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         ::tflite::Padding_SAME,
+                         2,
+                         2,
+                         1,
+                         1,
+                         ::tflite::ActivationFunctionType_RELU6);
+}
+
+void MaxPool2dInt8Relu6Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<int8_t> inputValues = { -5, -8, -10, 7,
+                                        -8, -12, -15, 2,
+                                        3, -4, -1, -11 };
+
+    std::vector<int8_t> expectedOutputValues = { 1, 1, 3, 1 };
+
+    Pooling2dTest<int8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
+                          ::tflite::TensorType_INT8,
+                          backends,
+                          inputShape,
+                          outputShape,
+                          inputValues,
+                          expectedOutputValues,
+                          ::tflite::Padding_SAME,
+                          2,
+                          2,
+                          1,
+                          1,
+                          ::tflite::ActivationFunctionType_RELU6,
+                          2.0f,
+                          1);
+}
+
+TEST_SUITE("Pooling2dTest")
+{
+
+TEST_CASE ("MaxPooling2d_FP32_PaddingValid_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxPool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_PaddingValid_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxPool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_PaddingValid_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxPool2dInt8PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_PaddingValid_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxPool2dInt8PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_PaddingSame_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxPool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_PaddingSame_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxPool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_PaddingSame_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxPool2dInt8PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_PaddingSame_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxPool2dInt8PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_Relu_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxPool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_Relu_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxPool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_Relu_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxPool2dInt8ReluTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_Relu_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxPool2dInt8ReluTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_Relu6_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxPool2dFP32Relu6Test(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_Relu6_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxPool2dFP32Relu6Test(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_Relu6_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxPool2dInt8Relu6Test(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_Relu6_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    MaxPool2dInt8Relu6Test(backends);
+}
+
+}
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/src/test/Pooling2dTestHelper.hpp b/delegate/src/test/Pooling2dTestHelper.hpp
new file mode 100644 (file)
index 0000000..a344650
--- /dev/null
@@ -0,0 +1,212 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreatePooling2dTfLiteModel(
+    tflite::BuiltinOperator poolingOperatorCode,
+    tflite::TensorType tensorType,
+    const std::vector <int32_t>& inputTensorShape,
+    const std::vector <int32_t>& outputTensorShape,
+    tflite::Padding padding = tflite::Padding_SAME,
+    int32_t strideWidth = 0,
+    int32_t strideHeight = 0,
+    int32_t filterWidth = 0,
+    int32_t filterHeight = 0,
+    tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE,
+    float quantScale = 1.0f,
+    int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    std::array<flatbuffers::Offset<Tensor>, 2> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                      inputTensorShape.size()),
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("input"),
+                              quantizationParameters);
+
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    // create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_Pool2DOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions = CreatePool2DOptions(flatBufferBuilder,
+                                                                           padding,
+                                                                           strideWidth,
+                                                                           strideHeight,
+                                                                           filterWidth,
+                                                                           filterHeight,
+                                                                           fusedActivation).Union();
+
+    const std::vector<int32_t> operatorInputs{{0}};
+    const std::vector<int32_t> operatorOutputs{{1}};
+    flatbuffers::Offset <Operator> poolingOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{{0}};
+    const std::vector<int> subgraphOutputs{{1}};
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&poolingOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Pooling2d Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, poolingOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void Pooling2dTest(tflite::BuiltinOperator poolingOperatorCode,
+                   tflite::TensorType tensorType,
+                   std::vector<armnn::BackendId>& backends,
+                   std::vector<int32_t>& inputShape,
+                   std::vector<int32_t>& outputShape,
+                   std::vector<T>& inputValues,
+                   std::vector<T>& expectedOutputValues,
+                   tflite::Padding padding = tflite::Padding_SAME,
+                   int32_t strideWidth = 0,
+                   int32_t strideHeight = 0,
+                   int32_t filterWidth = 0,
+                   int32_t filterHeight = 0,
+                   tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE,
+                   float quantScale = 1.0f,
+                   int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreatePooling2dTfLiteModel(poolingOperatorCode,
+                                                               tensorType,
+                                                               inputShape,
+                                                               outputShape,
+                                                               padding,
+                                                               strideWidth,
+                                                               strideHeight,
+                                                               filterWidth,
+                                                               filterHeight,
+                                                               fusedActivation,
+                                                               quantScale,
+                                                               quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    CHECK(tfLiteModel != nullptr);
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
+    auto tfLiteDelegateInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        tfLiteDelegateInputData[i] = inputValues[i];
+    }
+
+    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
+    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        armnnDelegateInputData[i] = inputValues[i];
+    }
+
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+    // Compare output data
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+    auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
+    auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
+    auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId);
+
+    for (size_t i = 0; i < tfLiteDelegateOutputTensor->dims->size; i++)
+    {
+        CHECK(outputShape[i] == armnnDelegateOutputTensor->dims->data[i]);
+        CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]);
+    }
+
+    for (size_t i = 0; i < expectedOutputValues.size(); i++)
+    {
+        CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
+        CHECK(tfLiteDelegateOutputData[i] == expectedOutputValues[i]);
+        CHECK(tfLiteDelegateOutputData[i] == armnnDelegateOutputData[i]);
+    }
+}
+
+} // anonymous namespace
+
+
+
+