IVGCVSW-5625 Add support for Float16 to Delegate
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Fri, 18 Dec 2020 16:13:06 +0000 (16:13 +0000)
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Wed, 13 Jan 2021 11:53:53 +0000 (11:53 +0000)
 * Float16 unit tests for Reshape
 * Remove unsupported data type from Pad

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: Ib1804bb6e708a0552fb40d05fe8a6511936f9793

delegate/CMakeLists.txt
delegate/src/Pad.hpp
delegate/src/test/PadTestHelper.hpp
delegate/src/test/RedefineTestHelper.hpp
delegate/src/test/ReshapeTest.cpp
delegate/src/test/TestUtils.cpp
delegate/src/test/TestUtils.hpp

index c052be2..9f64353 100644 (file)
@@ -93,6 +93,15 @@ target_compile_options(flatbuffer_headers INTERFACE -Wno-sign-conversion)
 
 target_link_libraries(armnnDelegate PUBLIC flatbuffer_headers)
 
+# Add libraries from armnn third-party libraries
+# Third-party header files are not warning clean
+# We can't change compilation flags on header files directly, so we need to add them to an interface library first
+add_library(thirdparty_headers INTERFACE)
+target_include_directories(thirdparty_headers INTERFACE $<BUILD_INTERFACE:${ARMNN_SOURCE_DIR}/third-party>
+                                                        $<INSTALL_INTERFACE:include/thirdparty_headers>)
+
+target_compile_options(thirdparty_headers INTERFACE -Wno-old-style-cast)
+
 option(BUILD_UNIT_TESTS "Build unit tests" ON)
 if(BUILD_UNIT_TESTS)
     set(armnnDelegate_unittest_sources)
@@ -140,7 +149,7 @@ if(BUILD_UNIT_TESTS)
     target_include_directories(DelegateUnitTests PRIVATE third-party)
 
     # Add half library from armnn third-party libraries
-    target_include_directories(DelegateUnitTests PRIVATE ${ARMNN_SOURCE_DIR}/third-party)
+    target_link_libraries(DelegateUnitTests PRIVATE thirdparty_headers)
 
     target_link_libraries(DelegateUnitTests PRIVATE armnnDelegate)
     target_link_libraries(DelegateUnitTests PRIVATE Armnn::armnnUtils)
index 6149819..431b8d3 100644 (file)
@@ -98,9 +98,6 @@ TfLiteStatus VisitPadOperator(DelegateData& delegateData,
             case kTfLiteInt8:
                 descriptor.m_PadValue = tflite::GetTensorData<int8>(&tfLitepaddingValue)[0];
                 break;
-            case kTfLiteInt16:
-                descriptor.m_PadValue = tflite::GetTensorData<int16>(&tfLitepaddingValue)[0];
-                break;
             default:
                 TF_LITE_MAYBE_KERNEL_LOG(
                     tfLiteContext,
index 7221ded..025d13d 100644 (file)
@@ -73,7 +73,7 @@ std::vector<char> CreatePadTfLiteModel(
     buffers.push_back(
         CreateBuffer(flatBufferBuilder,
                      flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(paddingDim.data()),
-                                                     sizeof(int32_t) * paddingDim.size())));
+                                                    sizeof(int32_t) * paddingDim.size())));
     buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
 
     std::vector<int32_t> operatorInputs;
index ca8246c..6f06157 100644 (file)
@@ -136,7 +136,7 @@ void RedefineTest(tflite::BuiltinOperator redefineOperatorCode,
                   tflite::TensorType tensorType,
                   const std::vector<armnn::BackendId>& backends,
                   const std::vector<int32_t>& inputShape,
-                  const std::vector<int32_t>& outputShape,
+                  std::vector<int32_t>& outputShape,
                   std::vector<T>& inputValues,
                   std::vector<T>& expectedOutputValues,
                   std::vector<int32_t>& targetShape,
@@ -186,28 +186,7 @@ void RedefineTest(tflite::BuiltinOperator redefineOperatorCode,
     CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
     CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
 
-    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
-    auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
-    auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
-    auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId);
-
-    CHECK(outputShape.size() == tfLiteDelegateOutputTensor->dims->size);
-    CHECK(outputShape.size() == armnnDelegateOutputTensor->dims->size);
-
-    for (size_t i = 0; i < static_cast<size_t>(tfLiteDelegateOutputTensor->dims->size); i++)
-    {
-        CHECK(outputShape[i] == armnnDelegateOutputTensor->dims->data[i]);
-        CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]);
-    }
-
-    for (size_t i = 0; i < expectedOutputValues.size(); i++)
-    {
-        CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
-        CHECK(tfLiteDelegateOutputData[i] == expectedOutputValues[i]);
-        CHECK(tfLiteDelegateOutputData[i] == armnnDelegateOutputData[i]);
-    }
+    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
 }
 
 } // anonymous namespace
\ No newline at end of file
index 715fed6..11449e2 100644 (file)
 
 #include <doctest/doctest.h>
 
+#include <half/half.hpp>
+
+using Half = half_float::half;
+
 namespace armnnDelegate
 {
 
@@ -41,6 +45,34 @@ void ReshapeSimpleTest(std::vector<armnn::BackendId>& backends, bool useOption =
                         useOption);
 }
 
+using namespace half_float::literal;
+
+void ReshapeSimpleFloat16Test(std::vector<armnn::BackendId>& backends, bool useOption = true)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 3, 2, 2 };
+    std::vector<int32_t> targetShape { 1, 3, 2, 2 };
+
+    std::vector<Half> inputValues = { 5._h, -8._h, -10._h, 7._h,
+                                      8._h, 12._h, -15._h, 2._h,
+                                      3._h, -4._h, -1._h, -11._h };
+
+    std::vector<Half> expectedOutputValues = { 5._h, -8._h, -10._h, 7._h,
+                                               8._h, 12._h, -15._h, 2._h,
+                                               3._h, -4._h, -1._h, -11._h };
+
+    RedefineTest<Half>(tflite::BuiltinOperator_RESHAPE,
+                        ::tflite::TensorType_FLOAT16,
+                        backends,
+                        inputShape,
+                        outputShape,
+                        inputValues,
+                        expectedOutputValues,
+                        targetShape,
+                        useOption);
+}
+
 void ReshapeReduceDimTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
 {
     // Set input data
@@ -242,6 +274,12 @@ TEST_CASE ("Reshape_Uint8_GpuAcc_Test")
     ReshapeUint8Test(backends);
 }
 
+TEST_CASE ("Reshape_Float16_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ReshapeSimpleFloat16Test(backends);
+}
+
 TEST_CASE ("Reshape_Simple_ShapeTensor_GpuAcc_Test")
 {
     std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
@@ -278,6 +316,12 @@ TEST_CASE ("Reshape_Uint8_ShapeTensor_GpuAcc_Test")
     ReshapeUint8Test(backends, false);
 }
 
+TEST_CASE ("Reshape_Float16_ShapeTensor_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ReshapeSimpleFloat16Test(backends, false);
+}
+
 } // TEST_SUITE("Reshape_GpuAccTests")
 
 TEST_SUITE("Reshape_CpuAccTests")
@@ -319,6 +363,12 @@ TEST_CASE ("Reshape_Uint8_CpuAcc_Test")
     ReshapeUint8Test(backends);
 }
 
+TEST_CASE ("Reshape_Float16_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ReshapeSimpleFloat16Test(backends);
+}
+
 TEST_CASE ("Reshape_Simple_ShapeTensor_CpuAcc_Test")
 {
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
@@ -355,6 +405,12 @@ TEST_CASE ("Reshape_Uint8_ShapeTensor_CpuAcc_Test")
     ReshapeUint8Test(backends, false);
 }
 
+TEST_CASE ("Reshape_Float16_ShapeTensor_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ReshapeSimpleFloat16Test(backends, false);
+}
+
 } // TEST_SUITE("Reshape_CpuAccTests")
 
 TEST_SUITE("Reshape_CpuRefTests")
@@ -402,6 +458,12 @@ TEST_CASE ("Reshape_Int16_CpuRef_Test")
     ReshapeInt16Test(backends);
 }
 
+TEST_CASE ("Reshape_Float16_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeSimpleFloat16Test(backends);
+}
+
 TEST_CASE ("Reshape_Simple_ShapeTensor_CpuRef_Test")
 {
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
@@ -444,6 +506,12 @@ TEST_CASE ("Reshape_Int16_ShapeTensor_CpuRef_Test")
     ReshapeInt16Test(backends, false);
 }
 
+TEST_CASE ("Reshape_Float16_ShapeTensor_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeSimpleFloat16Test(backends, false);
+}
+
 } // TEST_SUITE("Reshape_CpuRefTests")
 
 } // namespace armnnDelegate
\ No newline at end of file
index 31c05a6..2787147 100644 (file)
@@ -8,8 +8,6 @@
 namespace armnnDelegate
 {
 
-
-
 void CompareData(bool tensor1[], bool tensor2[], size_t tensorSize)
 {
     auto compareBool = [](auto a, auto b) {return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0)));};
@@ -63,4 +61,69 @@ void CompareData(int8_t tensor1[], int8_t tensor2[], size_t tensorSize)
     }
 }
 
+void CompareData(Half tensor1[], Half tensor2[], size_t tensorSize)
+{
+    for (size_t i = 0; i < tensorSize; i++)
+    {
+        CHECK(tensor1[i] == doctest::Approx( tensor2[i] ));
+    }
+}
+
+void CompareData(TfLiteFloat16 tensor1[], TfLiteFloat16 tensor2[], size_t tensorSize)
+{
+    for (size_t i = 0; i < tensorSize; i++)
+    {
+        CHECK(tensor1[i].data == tensor2[i].data);
+    }
+}
+
+void CompareData(TfLiteFloat16 tensor1[], Half tensor2[], size_t tensorSize)
+{
+    for (size_t i = 0; i < tensorSize; i++)
+    {
+        CHECK(tensor1[i].data == half_float::detail::float2half<std::round_indeterminate, float>(tensor2[i]));
+    }
+}
+
+template <>
+void CompareOutputData(std::unique_ptr<tflite::Interpreter>& tfLiteInterpreter,
+                       std::unique_ptr<tflite::Interpreter>& armnnDelegateInterpreter,
+                       std::vector<int32_t>& expectedOutputShape,
+                       std::vector<Half>& expectedOutputValues,
+                       unsigned int outputIndex)
+{
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
+    auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId);
+    auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<TfLiteFloat16>(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[outputIndex];
+    auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId);
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<TfLiteFloat16>(armnnDelegateOutputId);
+
+        CHECK(expectedOutputShape.size() == tfLiteDelegateOutputTensor->dims->size);
+        CHECK(expectedOutputShape.size() == armnnDelegateOutputTensor->dims->size);
+
+    for (size_t i = 0; i < expectedOutputShape.size(); i++)
+    {
+        CHECK(armnnDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
+        CHECK(tfLiteDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
+        CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]);
+    }
+
+    armnnDelegate::CompareData(armnnDelegateOutputData, expectedOutputValues.data(), expectedOutputValues.size());
+    armnnDelegate::CompareData(tfLiteDelegateOutputData, expectedOutputValues.data(), expectedOutputValues.size());
+    armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+}
+
+template <>
+void FillInput<Half>(std::unique_ptr<tflite::Interpreter>& interpreter, int inputIndex, std::vector<Half>& inputValues)
+{
+    auto tfLiteDelegateInputId = interpreter->inputs()[inputIndex];
+    auto tfLiteDelageInputData = interpreter->typed_tensor<TfLiteFloat16>(tfLiteDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        tfLiteDelageInputData[i].data = half_float::detail::float2half<std::round_indeterminate, float>(inputValues[i]);
+
+    }
+}
+
 } // namespace armnnDelegate
\ No newline at end of file
index b165920..ad7600d 100644 (file)
@@ -5,10 +5,15 @@
 
 #pragma once
 
+#include <tensorflow/lite/c/common.h>
 #include <tensorflow/lite/interpreter.h>
 
 #include <doctest/doctest.h>
 
+#include <half/half.hpp>
+
+using Half = half_float::half;
+
 namespace armnnDelegate
 {
 
@@ -43,6 +48,14 @@ void CompareData(uint8_t tensor1[], uint8_t tensor2[], size_t tensorSize);
 /// Can be used to compare int16_t data coming from a tflite interpreter with a tolerance of 1
 void CompareData(int16_t tensor1[], int16_t tensor2[], size_t tensorSize);
 
+/// Can be used to compare Half (Float16) data with a tolerance of limit_of_float*100
+void CompareData(Half tensor1[], Half tensor2[], size_t tensorSize);
+
+/// Can be used to compare TfLiteFloat16 data coming from a tflite interpreter
+void CompareData(TfLiteFloat16 tensor1[], TfLiteFloat16 tensor2[], size_t tensorSize);
+
+/// Can be used to compare Half (Float16) data and TfLiteFloat16 data coming from a tflite interpreter
+void CompareData(TfLiteFloat16 tensor1[], Half tensor2[], size_t tensorSize);
 
 /// Can be used to compare the output tensor shape and values
 /// from armnnDelegateInterpreter and tfLiteInterpreter.