IVGCVSW-5380 'TfLiteDelegate: Implement the Comparison operators'
authorSadik Armagan <sadik.armagan@arm.com>
Mon, 9 Nov 2020 08:26:22 +0000 (08:26 +0000)
committerSadik Armagan <sadik.armagan@arm.com>
Tue, 10 Nov 2020 15:08:17 +0000 (15:08 +0000)
* Implemented Comparison Operators
* Added unit tests

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Icdc0f7c6a286a8364a2770b26d15e8958291dc2b

delegate/CMakeLists.txt
delegate/src/Comparison.hpp
delegate/src/DelegateUtils.hpp
delegate/src/test/ComparisonTest.cpp [new file with mode: 0644]
delegate/src/test/ComparisonTestHelper.hpp [new file with mode: 0644]

index acce828..8149764 100644 (file)
@@ -89,6 +89,8 @@ target_include_directories(armnnDelegate
 set(armnnDelegate_unittest_sources)
 list(APPEND armnnDelegate_unittest_sources
         src/test/ArmnnDelegateTest.cpp
+        src/test/ComparisonTest.cpp
+        src/test/ComparisonTestHelper.hpp
         src/test/ElementwiseBinaryTest.cpp
         src/test/ElementwiseBinaryTestHelper.hpp
         src/test/ElementwiseUnaryTest.cpp
index 19d8de1..f787a22 100644 (file)
@@ -5,6 +5,8 @@
 
 #pragma once
 
+#include "DelegateUtils.hpp"
+
 #include <tensorflow/lite/builtin_ops.h>
 #include <tensorflow/lite/c/builtin_op_data.h>
 #include <tensorflow/lite/c/common.h>
@@ -17,9 +19,110 @@ TfLiteStatus VisitComparisonOperator(DelegateData& delegateData,
                                      TfLiteContext* tfLiteContext,
                                      TfLiteNode* tfLiteNode,
                                      int nodeIndex,
-                                     int32_t comparisonOperatorCode)
+                                     int32_t tfLiteComparisonOperatorCode)
 {
-    return kTfLiteError;
+    TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+    const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+    const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
+    if (IsDynamicTensor(tfLiteInputTensor0))
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+            tfLiteComparisonOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]];
+    if (IsDynamicTensor(tfLiteInputTensor1))
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
+            tfLiteComparisonOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+    if (IsDynamicTensor(tfLiteOutputTensor))
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+            tfLiteContext,
+            "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
+            tfLiteComparisonOperatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
+    const armnn::TensorInfo& inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+    armnn::ComparisonOperation comparisonOperation = armnn::ComparisonOperation::Equal;
+    switch(tfLiteComparisonOperatorCode)
+    {
+        case kTfLiteBuiltinEqual:
+            comparisonOperation = armnn::ComparisonOperation::Equal;
+            break;
+        case kTfLiteBuiltinGreater:
+            comparisonOperation = armnn::ComparisonOperation::Greater;
+            break;
+        case kTfLiteBuiltinGreaterEqual:
+            comparisonOperation = armnn::ComparisonOperation::GreaterOrEqual;
+            break;
+        case kTfLiteBuiltinLess:
+            comparisonOperation = armnn::ComparisonOperation::Less;
+            break;
+        case kTfLiteBuiltinLessEqual:
+            comparisonOperation = armnn::ComparisonOperation::LessOrEqual;
+            break;
+        case kTfLiteBuiltinNotEqual:
+            comparisonOperation = armnn::ComparisonOperation::NotEqual;
+            break;
+        default:
+            return kTfLiteError;
+    }
+
+    armnn::ComparisonDescriptor descriptor(comparisonOperation);
+    bool isSupported = false;
+
+    auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   tfLiteContext,
+                                   IsComparisonSupported,
+                                   delegateData.m_Backends,
+                                   isSupported,
+                                   inputTensorInfo0,
+                                   inputTensorInfo1,
+                                   outputTensorInfo,
+                                   descriptor);
+    };
+
+    if (!delegateData.m_Network)
+    {
+        validateFunc(outputTensorInfo, isSupported);
+        return isSupported ? kTfLiteOk : kTfLiteError;
+    }
+
+    armnn::IConnectableLayer* comparisonLayer = delegateData.m_Network->AddComparisonLayer(descriptor);
+    ARMNN_ASSERT(comparisonLayer != nullptr);
+
+    armnn::IOutputSlot& outputSlot = comparisonLayer->GetOutputSlot(0);
+    outputSlot.SetTensorInfo(outputTensorInfo);
+
+    auto reshapeLayer = BroadcastTensor(inputTensorInfo0,
+                                        inputTensorInfo1,
+                                        comparisonLayer,
+                                        tfLiteContext,
+                                        tfLiteNode,
+                                        delegateData);
+    if (!reshapeLayer)
+    {
+        return kTfLiteError;
+    }
+    return kTfLiteOk;
 }
 
 } // namespace armnnDelegate
index fca6a6c..00279f6 100644 (file)
@@ -139,14 +139,7 @@ armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
     if (inputDimensions0 == inputDimensions1)
     {
         auto status = Connect(startLayer, tfLiteNode, delegateData);
-        if(status == kTfLiteOk)
-        {
-            return startLayer;
-        }
-        else
-        {
-            return nullptr;
-        }
+        return status == kTfLiteOk ? startLayer : nullptr;
     }
 
     unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
diff --git a/delegate/src/test/ComparisonTest.cpp b/delegate/src/test/ComparisonTest.cpp
new file mode 100644 (file)
index 0000000..0826535
--- /dev/null
@@ -0,0 +1,754 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ComparisonTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void EqualFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<float> input0Values =
+    {
+        1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
+        3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
+    };
+
+    std::vector<float> input1Values =
+    {
+        1.f, 1.f, 1.f, 1.f, 3.f, 3.f, 3.f, 3.f,
+        5.f, 5.f, 5.f, 5.f, 4.f, 4.f, 4.f, 4.f
+    };
+
+    std::vector<bool> expectedOutputValues =
+    {
+        1, 1, 1, 1, 0, 0, 0, 0,
+        0, 0, 0, 0, 1, 1, 1, 1
+    };
+
+
+    ComparisonTest<float>(tflite::BuiltinOperator_EQUAL,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void EqualBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
+
+    std::vector<float> input0Values
+    {
+        1.f, 2.f, 3.f,  4.f,  5.f,  6.f,
+        7.f, 8.f, 9.f, 10.f, 11.f, 12.f
+    };
+    std::vector<float> input1Values { 4.f, 5.f, 6.f };
+    // Set output data
+    std::vector<bool> expectedOutputValues
+    {
+        0, 0, 0, 1, 1, 1,
+        0, 0, 0, 0, 0, 0
+    };
+    ComparisonTest<float>(tflite::BuiltinOperator_EQUAL,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void EqualInt32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<int32_t> input0Values = { 1, 5, 6, 4 };
+
+    std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
+
+    std::vector<bool> expectedOutputValues = { 1, 0, 0, 1 };
+
+    ComparisonTest<int32_t>(tflite::BuiltinOperator_EQUAL,
+                            ::tflite::TensorType_INT32,
+                            backends,
+                            input0Shape,
+                            input1Shape,
+                            expectedOutputShape,
+                            input0Values,
+                            input1Values,
+                            expectedOutputValues);
+}
+
+void NotEqualFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<float> input0Values =
+    {
+        1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
+        3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
+    };
+
+    std::vector<float> input1Values =
+    {
+        1.f, 1.f, 1.f, 1.f, 3.f, 3.f, 3.f, 3.f,
+        5.f, 5.f, 5.f, 5.f, 4.f, 4.f, 4.f, 4.f
+    };
+
+    std::vector<bool> expectedOutputValues =
+    {
+        0, 0, 0, 0, 1, 1, 1, 1,
+        1, 1, 1, 1, 0, 0, 0, 0
+    };
+
+    ComparisonTest<float>(tflite::BuiltinOperator_NOT_EQUAL,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void NotEqualBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
+
+    std::vector<float> input0Values
+    {
+        1.f, 2.f, 3.f,  4.f,  5.f,  6.f,
+        7.f, 8.f, 9.f, 10.f, 11.f, 12.f
+    };
+    std::vector<float> input1Values { 4.f, 5.f, 6.f };
+    // Set output data
+    std::vector<bool> expectedOutputValues
+    {
+        1, 1, 1, 0, 0, 0,
+        1, 1, 1, 1, 1, 1
+    };
+    ComparisonTest<float>(tflite::BuiltinOperator_NOT_EQUAL,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void NotEqualInt32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<int32_t> input0Values = { 1, 5, 6, 4 };
+
+    std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
+
+    std::vector<bool> expectedOutputValues = { 0, 1, 1, 0 };
+
+    ComparisonTest<int32_t>(tflite::BuiltinOperator_NOT_EQUAL,
+                            ::tflite::TensorType_INT32,
+                            backends,
+                            input0Shape,
+                            input1Shape,
+                            expectedOutputShape,
+                            input0Values,
+                            input1Values,
+                            expectedOutputValues);
+}
+
+void GreaterFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<float> input0Values = { 1, 5, 6, 4 };
+
+    std::vector<float> input1Values = { 1, 3, 9, 4 };
+
+    std::vector<bool> expectedOutputValues = { 0, 1, 0, 0 };
+
+    ComparisonTest<float>(tflite::BuiltinOperator_GREATER,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void GreaterBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
+
+    std::vector<float> input0Values
+    {
+        1.f, 2.f, 3.f,  4.f,  5.f,  6.f,
+        7.f, 8.f, 9.f, 10.f, 11.f, 12.f
+    };
+    std::vector<float> input1Values { 4.f, 5.f, 6.f };
+
+    std::vector<bool> expectedOutputValues
+    {
+        0, 0, 0, 0, 0, 0,
+        1, 1, 1, 1, 1, 1
+    };
+    ComparisonTest<float>(tflite::BuiltinOperator_GREATER,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void GreaterInt32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<int32_t> input0Values = { 1, 5, 6, 4 };
+
+    std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
+
+    std::vector<bool> expectedOutputValues = { 0, 1, 0, 0 };
+
+    ComparisonTest<int32_t>(tflite::BuiltinOperator_GREATER,
+                            ::tflite::TensorType_INT32,
+                            backends,
+                            input0Shape,
+                            input1Shape,
+                            expectedOutputShape,
+                            input0Values,
+                            input1Values,
+                            expectedOutputValues);
+}
+
+void GreaterEqualFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<float> input0Values = { 1.f, 5.f, 6.f, 4.f };
+
+    std::vector<float> input1Values = { 1.f, 3.f, 9.f, 4.f };
+
+    std::vector<bool> expectedOutputValues = { true, true, false, true };
+
+    ComparisonTest<float>(tflite::BuiltinOperator_GREATER_EQUAL,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void GreaterEqualBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
+
+    std::vector<float> input0Values
+    {
+        1.f, 2.f, 3.f,  4.f,  5.f,  6.f,
+        7.f, 8.f, 9.f, 10.f, 11.f, 12.f
+    };
+    std::vector<float> input1Values { 4.f, 5.f, 6.f };
+    // Set output data
+    std::vector<bool> expectedOutputValues
+    {
+        0, 0, 0, 1, 1, 1,
+        1, 1, 1, 1, 1, 1
+    };
+
+    ComparisonTest<float>(tflite::BuiltinOperator_GREATER_EQUAL,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void GreaterEqualInt32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<int32_t> input0Values = { 1, 5, 6, 3 };
+
+    std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
+
+    std::vector<bool> expectedOutputValues = { 1, 1, 0, 0 };
+
+    ComparisonTest<int32_t>(tflite::BuiltinOperator_GREATER_EQUAL,
+                            ::tflite::TensorType_INT32,
+                            backends,
+                            input0Shape,
+                            input1Shape,
+                            expectedOutputShape,
+                            input0Values,
+                            input1Values,
+                            expectedOutputValues);
+}
+
+void LessFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<float> input0Values = { 1.f, 5.f, 6.f, 4.f };
+
+    std::vector<float> input1Values = { 1.f, 3.f, 9.f, 4.f };
+
+    std::vector<bool> expectedOutputValues = { false, false, true, false };
+
+    ComparisonTest<float>(tflite::BuiltinOperator_LESS,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void LessBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
+
+    std::vector<float> input0Values
+    {
+        1.f, 2.f, 3.f,  4.f,  5.f,  6.f,
+        7.f, 8.f, 9.f, 10.f, 11.f, 12.f
+    };
+    std::vector<float> input1Values { 4.f, 5.f, 6.f };
+
+    std::vector<bool> expectedOutputValues
+    {
+        true, true, true, false, false, false,
+        false, false, false, false, false, false
+    };
+
+    ComparisonTest<float>(tflite::BuiltinOperator_LESS,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void LessInt32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<int32_t> input0Values = { 1, 5, 6, 3 };
+
+    std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
+
+    std::vector<bool> expectedOutputValues = { false, false, true, true };
+
+    ComparisonTest<int32_t>(tflite::BuiltinOperator_LESS,
+                            ::tflite::TensorType_INT32,
+                            backends,
+                            input0Shape,
+                            input1Shape,
+                            expectedOutputShape,
+                            input0Values,
+                            input1Values,
+                            expectedOutputValues);
+}
+
+void LessEqualFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<float> input0Values = { 1.f, 5.f, 6.f, 4.f };
+
+    std::vector<float> input1Values = { 1.f, 3.f, 9.f, 4.f };
+
+    std::vector<bool> expectedOutputValues = { true, false, true, true };
+
+    ComparisonTest<float>(tflite::BuiltinOperator_LESS_EQUAL,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void LessEqualBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
+
+    std::vector<float> input0Values
+    {
+        1.f, 2.f, 3.f,  4.f,  5.f,  6.f,
+        7.f, 8.f, 9.f, 10.f, 11.f, 12.f
+    };
+    std::vector<float> input1Values { 4.f, 5.f, 6.f };
+
+    std::vector<bool> expectedOutputValues
+    {
+        true, true, true, true, true, true,
+        false, false, false, false, false, false
+    };
+
+    ComparisonTest<float>(tflite::BuiltinOperator_LESS_EQUAL,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void LessEqualInt32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<int32_t> input0Values = { 1, 5, 6, 3 };
+
+    std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
+
+    std::vector<bool> expectedOutputValues = { true, false, true, true };
+
+    ComparisonTest<int32_t>(tflite::BuiltinOperator_LESS_EQUAL,
+                            ::tflite::TensorType_INT32,
+                            backends,
+                            input0Shape,
+                            input1Shape,
+                            expectedOutputShape,
+                            input0Values,
+                            input1Values,
+                            expectedOutputValues);
+}
+
+TEST_SUITE("ComparisonTest")
+{
+
+TEST_CASE ("EQUAL_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    EqualFP32Test(backends);
+}
+
+TEST_CASE ("EQUAL_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    EqualFP32Test(backends);
+}
+
+TEST_CASE ("EQUAL_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    EqualBroadcastTest(backends);
+}
+
+TEST_CASE ("EQUAL_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    EqualBroadcastTest(backends);
+}
+
+TEST_CASE ("EQUAL_INT32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    EqualInt32Test(backends);
+}
+
+TEST_CASE ("EQUAL_INT32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    EqualInt32Test(backends);
+}
+
+TEST_CASE ("NOT_EQUAL_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    NotEqualFP32Test(backends);
+}
+
+TEST_CASE ("NOT_EQUAL_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    NotEqualFP32Test(backends);
+}
+
+TEST_CASE ("NOT_EQUAL_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    NotEqualBroadcastTest(backends);
+}
+
+TEST_CASE ("NOT_EQUAL_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    NotEqualBroadcastTest(backends);
+}
+
+TEST_CASE ("NOT_EQUAL_INT32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    NotEqualInt32Test(backends);
+}
+
+TEST_CASE ("NOT_EQUAL_INT32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    NotEqualInt32Test(backends);
+}
+
+TEST_CASE ("GREATER_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    GreaterFP32Test(backends);
+}
+
+TEST_CASE ("GREATER_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    GreaterFP32Test(backends);
+}
+
+TEST_CASE ("GREATER_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    GreaterBroadcastTest(backends);
+}
+
+TEST_CASE ("GREATER_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    GreaterBroadcastTest(backends);
+}
+
+TEST_CASE ("GREATER_INT32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    GreaterInt32Test(backends);
+}
+
+TEST_CASE ("GREATER_INT32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    GreaterInt32Test(backends);
+}
+TEST_CASE ("GREATER_EQUAL_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    GreaterEqualFP32Test(backends);
+}
+
+TEST_CASE ("GREATER_EQUAL_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    GreaterEqualFP32Test(backends);
+}
+
+TEST_CASE ("GREATER_EQUAL_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    GreaterEqualBroadcastTest(backends);
+}
+
+TEST_CASE ("GREATER_EQUAL_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    GreaterEqualBroadcastTest(backends);
+}
+
+TEST_CASE ("GREATER_EQUAL_INT32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    GreaterEqualInt32Test(backends);
+}
+
+TEST_CASE ("GREATER_EQUAL_INT32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    GreaterEqualInt32Test(backends);
+}
+TEST_CASE ("LESS_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    LessFP32Test(backends);
+}
+
+TEST_CASE ("LESS_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    LessFP32Test(backends);
+}
+
+TEST_CASE ("LESS_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    LessBroadcastTest(backends);
+}
+
+TEST_CASE ("LESS_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    LessBroadcastTest(backends);
+}
+
+TEST_CASE ("LESS_INT32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    LessInt32Test(backends);
+}
+
+TEST_CASE ("LESS_INT32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    LessInt32Test(backends);
+}
+TEST_CASE ("LESS_EQUAL_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    LessEqualFP32Test(backends);
+}
+
+TEST_CASE ("LESS_EQUAL_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    LessEqualFP32Test(backends);
+}
+
+TEST_CASE ("LESS_EQUAL_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    LessEqualBroadcastTest(backends);
+}
+
+TEST_CASE ("LESS_EQUAL_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    LessEqualBroadcastTest(backends);
+}
+
+TEST_CASE ("LESS_EQUAL_INT32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    LessEqualInt32Test(backends);
+}
+
+TEST_CASE ("LESS_EQUAL_INT32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                               armnn::Compute::CpuRef };
+    LessEqualInt32Test(backends);
+}
+
+} // End TEST_SUITE("ComparisonTest")
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/src/test/ComparisonTestHelper.hpp b/delegate/src/test/ComparisonTestHelper.hpp
new file mode 100644 (file)
index 0000000..0011c76
--- /dev/null
@@ -0,0 +1,236 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreateComparisonTfLiteModel(tflite::BuiltinOperator comparisonOperatorCode,
+                                              tflite::TensorType tensorType,
+                                              const std::vector <int32_t>& input0TensorShape,
+                                              const std::vector <int32_t>& input1TensorShape,
+                                              const std::vector <int32_t>& outputTensorShape,
+                                              float quantScale = 1.0f,
+                                              int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
+                                                                      input0TensorShape.size()),
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("input_0"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
+                                                                      input1TensorShape.size()),
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("input_1"),
+                              quantizationParameters);
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              ::tflite::TensorType_BOOL,
+                              0);
+
+    // create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_EqualOptions;;
+    flatbuffers::Offset<void> operatorBuiltinOptions = CreateEqualOptions(flatBufferBuilder).Union();
+    switch (comparisonOperatorCode)
+    {
+        case BuiltinOperator_EQUAL:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_EqualOptions;
+            operatorBuiltinOptions = CreateEqualOptions(flatBufferBuilder).Union();
+            break;
+        }
+        case BuiltinOperator_NOT_EQUAL:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_NotEqualOptions;
+            operatorBuiltinOptions = CreateNotEqualOptions(flatBufferBuilder).Union();
+            break;
+        }
+        case BuiltinOperator_GREATER:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_GreaterOptions;
+            operatorBuiltinOptions = CreateGreaterOptions(flatBufferBuilder).Union();
+            break;
+        }
+        case BuiltinOperator_GREATER_EQUAL:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_GreaterEqualOptions;
+            operatorBuiltinOptions = CreateGreaterEqualOptions(flatBufferBuilder).Union();
+            break;
+        }
+        case BuiltinOperator_LESS:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_LessOptions;
+            operatorBuiltinOptions = CreateLessOptions(flatBufferBuilder).Union();
+            break;
+        }
+        case BuiltinOperator_LESS_EQUAL:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_LessEqualOptions;
+            operatorBuiltinOptions = CreateLessEqualOptions(flatBufferBuilder).Union();
+            break;
+        }
+        default:
+            break;
+    }
+    const std::vector<int32_t> operatorInputs{ {0, 1} };
+    const std::vector<int32_t> operatorOutputs{{2}};
+    flatbuffers::Offset <Operator> comparisonOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{ {0, 1} };
+    const std::vector<int> subgraphOutputs{{2}};
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&comparisonOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Comparison Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, comparisonOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void ComparisonTest(tflite::BuiltinOperator comparisonOperatorCode,
+                    tflite::TensorType tensorType,
+                    std::vector<armnn::BackendId>& backends,
+                    std::vector<int32_t>& input0Shape,
+                    std::vector<int32_t>& input1Shape,
+                    std::vector<int32_t>& outputShape,
+                    std::vector<T>& input0Values,
+                    std::vector<T>& input1Values,
+                    std::vector<bool>& expectedOutputValues,
+                    float quantScale = 1.0f,
+                    int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateComparisonTfLiteModel(comparisonOperatorCode,
+                                                                tensorType,
+                                                                input0Shape,
+                                                                input1Shape,
+                                                                outputShape,
+                                                                quantScale,
+                                                                quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    auto tfLiteDelegateInput0Id = tfLiteInterpreter->inputs()[0];
+    auto tfLiteDelageInput0Data = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInput0Id);
+    for (unsigned int i = 0; i < input0Values.size(); ++i)
+    {
+        tfLiteDelageInput0Data[i] = input0Values[i];
+    }
+
+    auto tfLiteDelegateInput1Id = tfLiteInterpreter->inputs()[1];
+    auto tfLiteDelageInput1Data = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInput1Id);
+    for (unsigned int i = 0; i < input1Values.size(); ++i)
+    {
+        tfLiteDelageInput1Data[i] = input1Values[i];
+    }
+
+    auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0];
+    auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInput0Id);
+    for (unsigned int i = 0; i < input0Values.size(); ++i)
+    {
+        armnnDelegateInput0Data[i] = input0Values[i];
+    }
+
+    auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1];
+    auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInput1Id);
+    for (unsigned int i = 0; i < input1Values.size(); ++i)
+    {
+        armnnDelegateInput1Data[i] = input1Values[i];
+    }
+
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+    // Compare output data
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+    auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<bool>(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<bool>(armnnDelegateOutputId);
+
+    for (size_t i = 0; i < expectedOutputValues.size(); i++)
+    {
+        CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
+        CHECK(tfLiteDelageOutputData[i] == expectedOutputValues[i]);
+        CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]);
+    }
+}
+
+} // anonymous namespace
\ No newline at end of file