src/test/Pooling2dTestHelper.hpp
src/test/QuantizationTest.cpp
src/test/QuantizationTestHelper.hpp
+ src/test/RedefineTestHelper.hpp
+ src/test/ReshapeTest.cpp
src/test/ResizeTest.cpp
src/test/ResizeTestHelper.hpp
src/test/SoftmaxTest.cpp
#include <armnn/utility/IgnoreUnused.hpp>
+#include "DelegateUtils.hpp"
+
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
#include <tensorflow/lite/c/common.h>
#include <tensorflow/lite/minimal_logging.h>
+#include <numeric>
namespace armnnDelegate
{
+TfLiteStatus CreateOutputTensorShape(const armnn::TensorInfo& inputTensorInfo,
+ const std::vector<int32_t>& targetShape,
+ armnn::ReshapeDescriptor& reshapeDesc)
+{
+ std::vector<unsigned int> outputDims(targetShape.begin(), targetShape.end());
+ const auto stretchDim = std::find(targetShape.begin(), targetShape.end(), -1);
+
+ if (stretchDim != targetShape.end())
+ {
+ if (std::find(std::next(stretchDim), targetShape.end(), -1) != targetShape.end())
+ {
+ // Return kTfLiteError and log the error after returning
+ return kTfLiteError;
+ }
+
+ auto targetNumElements =
+ armnn::numeric_cast<unsigned int>(
+ std::accumulate(targetShape.begin(), targetShape.end(), -1, std::multiplies<int32_t>()));
+
+ auto stretchIndex = static_cast<size_t>(std::distance(targetShape.begin(), stretchDim));
+ outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
+ }
+
+ armnn::TensorShape outputShape = armnn::TensorShape(static_cast<unsigned int>(outputDims.size()),
+ outputDims.data());
+ reshapeDesc.m_TargetShape = outputShape;
+ return kTfLiteOk;
+}
+
TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
TfLiteContext* tfLiteContext,
TfLiteNode* tfLiteNode,
int nodeIndex,
int32_t operatorCode)
{
- armnn::IgnoreUnused(delegateData,
- tfLiteContext,
- tfLiteNode,
- nodeIndex,
- operatorCode);
+ auto numInputs = tfLiteNode->inputs->size;
- return kTfLiteError;
+ if (numInputs == 2)
+ {
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ }
+ else
+ {
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+ }
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (IsDynamicTensor(tfLiteInputTensor0))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in "
+ "operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in "
+ "operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+ armnn::ReshapeDescriptor reshapeDesc;
+
+ // The new shape can be defined by either a second input tensor or by a builtin option, we need to check for both.
+ if (numInputs == 2)
+ {
+ const TfLiteTensor& tfLiteShapeInputTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (IsDynamicTensor(tfLiteShapeInputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in "
+ "operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ // Get the shape data out of the input tensor
+ std::vector<int32_t> targetShape;
+ auto* shapeTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteShapeInputTensor);
+ auto shapeTensorNumValues = tfLiteShapeInputTensor.dims->data[0];
+ for (auto i=0; i < shapeTensorNumValues; ++i)
+ {
+ targetShape.push_back(*(shapeTensorDataPtr+i));
+ }
+
+ // Use the data to create the required tensor shape.
+ if (CreateOutputTensorShape(inputTensorInfo0, targetShape, reshapeDesc) != kTfLiteOk)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnDelegate: At most one component of shape can be -1 in: "
+ "operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ }
+ else if (tfLiteNode->builtin_data)
+ {
+ std::vector<int32_t> targetShape;
+ TfLiteReshapeParams* reshapeOptions =
+ reinterpret_cast<TfLiteReshapeParams*>(tfLiteNode->builtin_data);
+ for (int i=0; i < reshapeOptions->num_dimensions; ++i)
+ {
+ targetShape.push_back(reshapeOptions->shape[i]);
+ }
+ if (CreateOutputTensorShape(inputTensorInfo0, targetShape, reshapeDesc) != kTfLiteOk)
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnDelegate: At most one component of shape can be -1 in: "
+ "operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+ }
+ else
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "Target shape not defined in reshape parameters or input tensor. "
+ "At least one method required in operator #%d node #%d: ",
+ operatorCode, nodeIndex);
+ }
+
+ bool isSupported = false;
+ auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ tfLiteContext,
+ IsReshapeSupported,
+ delegateData.m_Backends,
+ isSupported,
+ inputTensorInfo0,
+ outInfo,
+ reshapeDesc);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
}
TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData,
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreateRedefineTfLiteModel(
+ tflite::BuiltinOperator redefineOperatorCode,
+ tflite::TensorType tensorType,
+ const std::vector<int32_t>& inputTensorShape,
+ const std::vector<int32_t>& outputTensorShape,
+ const std::vector<int32_t>& targetShape,
+ bool useOption = true,
+ float quantScale = 1.0f,
+ int quantOffset = 0)
+{
+ using namespace tflite;
+ flatbuffers::FlatBufferBuilder flatBufferBuilder;
+ std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+ buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+ buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+
+ auto quantizationParameters =
+ CreateQuantizationParameters(flatBufferBuilder,
+ 0,
+ 0,
+ flatBufferBuilder.CreateVector<float>({ quantScale }),
+ flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+ auto inputTensor = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+ inputTensorShape.size()),
+ tensorType,
+ 0,
+ flatBufferBuilder.CreateString("input"),
+ quantizationParameters);
+
+ auto outputTensor = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+ outputTensorShape.size()),
+ tensorType,
+ 1,
+ flatBufferBuilder.CreateString("output"),
+ quantizationParameters);
+
+ std::vector<flatbuffers::Offset<Tensor>> tensors;
+ std::vector<int32_t> operatorInputs;
+ std::vector<int> subgraphInputs;
+ flatbuffers::Offset<void> operatorBuiltinOptions;
+
+ if (useOption)
+ {
+ tensors = { inputTensor, outputTensor};
+ operatorInputs = {{0}};
+ subgraphInputs = {{0}};
+ operatorBuiltinOptions = CreateReshapeOptions(
+ flatBufferBuilder,
+ flatBufferBuilder.CreateVector(targetShape.data(), targetShape.size())).Union();
+ }
+ else
+ {
+ buffers.push_back(
+ CreateBuffer(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(targetShape.data()),
+ sizeof(int32_t) * targetShape.size())));
+ int32_t size = static_cast<int32_t>(targetShape.size());
+ auto shapeTensor = CreateTensor(flatBufferBuilder,
+ flatBufferBuilder.CreateVector<int32_t>( { size } ),
+ tflite::TensorType_INT32,
+ 2,
+ flatBufferBuilder.CreateString("shape"));
+ tensors = { inputTensor, outputTensor, shapeTensor };
+ operatorInputs = {{ 0, 2 }};
+ subgraphInputs = {{ 0, 2 }};
+ operatorBuiltinOptions = CreateReshapeOptions(flatBufferBuilder).Union();
+ }
+
+ // create operator
+ tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_ReshapeOptions;
+
+ const std::vector<int32_t> operatorOutputs{{1}};
+ flatbuffers::Offset <Operator> redefineOperator =
+ CreateOperator(flatBufferBuilder,
+ 0,
+ flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+ operatorBuiltinOptionsType,
+ operatorBuiltinOptions);
+
+ const std::vector<int> subgraphOutputs{{1}};
+ flatbuffers::Offset <SubGraph> subgraph =
+ CreateSubGraph(flatBufferBuilder,
+ flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+ flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+ flatBufferBuilder.CreateVector(&redefineOperator, 1));
+
+ flatbuffers::Offset <flatbuffers::String> modelDescription =
+ flatBufferBuilder.CreateString("ArmnnDelegate: Reshape Operator Model");
+ flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
+ redefineOperatorCode);
+
+ flatbuffers::Offset <Model> flatbufferModel =
+ CreateModel(flatBufferBuilder,
+ TFLITE_SCHEMA_VERSION,
+ flatBufferBuilder.CreateVector(&operatorCode, 1),
+ flatBufferBuilder.CreateVector(&subgraph, 1),
+ modelDescription,
+ flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+ flatBufferBuilder.Finish(flatbufferModel);
+
+ return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+ flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void RedefineTest(tflite::BuiltinOperator redefineOperatorCode,
+ tflite::TensorType tensorType,
+ const std::vector<armnn::BackendId>& backends,
+ const std::vector<int32_t>& inputShape,
+ const std::vector<int32_t>& outputShape,
+ std::vector<T>& inputValues,
+ std::vector<T>& expectedOutputValues,
+ std::vector<int32_t>& targetShape,
+ bool useOption = true,
+ float quantScale = 1.0f,
+ int quantOffset = 0)
+{
+ using namespace tflite;
+ std::vector<char> modelBuffer = CreateRedefineTfLiteModel(redefineOperatorCode,
+ tensorType,
+ inputShape,
+ outputShape,
+ targetShape,
+ useOption,
+ quantScale,
+ quantOffset);
+
+ const Model* tfLiteModel = GetModel(modelBuffer.data());
+ CHECK(tfLiteModel != nullptr);
+ // Create TfLite Interpreters
+ std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&armnnDelegateInterpreter) == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter != nullptr);
+ CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+ std::unique_ptr<Interpreter> tfLiteInterpreter;
+ CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+ (&tfLiteInterpreter) == kTfLiteOk);
+ CHECK(tfLiteInterpreter != nullptr);
+ CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+ // Create the ArmNN Delegate
+ armnnDelegate::DelegateOptions delegateOptions(backends);
+ std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+ theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+ armnnDelegate::TfLiteArmnnDelegateDelete);
+ CHECK(theArmnnDelegate != nullptr);
+ // Modify armnnDelegateInterpreter to use armnnDelegate
+ CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+ // Set input data
+ armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
+ armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
+
+ // Run EnqueueWorkload
+ CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+ CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+ auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+ auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
+ auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId);
+ auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+ auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
+ auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId);
+
+ CHECK(outputShape.size() == tfLiteDelegateOutputTensor->dims->size);
+ CHECK(outputShape.size() == armnnDelegateOutputTensor->dims->size);
+
+ for (size_t i = 0; i < static_cast<size_t>(tfLiteDelegateOutputTensor->dims->size); i++)
+ {
+ CHECK(outputShape[i] == armnnDelegateOutputTensor->dims->data[i]);
+ CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]);
+ }
+
+ for (size_t i = 0; i < expectedOutputValues.size(); i++)
+ {
+ CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
+ CHECK(tfLiteDelegateOutputData[i] == expectedOutputValues[i]);
+ CHECK(tfLiteDelegateOutputData[i] == armnnDelegateOutputData[i]);
+ }
+}
+
+} // anonymous namespace
\ No newline at end of file
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RedefineTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/schema/schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void ReshapeSimpleTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 3, 2, 2 };
+ std::vector<int32_t> targetShape { 1, 3, 2, 2 };
+
+ std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+ 8.0f, 12.0f, -15.0f, 2.0f,
+ 3.0f, -4.0f, -1.0f, -11.0f };
+
+ std::vector<float> expectedOutputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+ 8.0f, 12.0f, -15.0f, 2.0f,
+ 3.0f, -4.0f, -1.0f, -11.0f };
+
+ RedefineTest<float>(tflite::BuiltinOperator_RESHAPE,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ targetShape,
+ useOption);
+}
+
+void ReshapeReduceDimTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 1, 4, 3 };
+ std::vector<int32_t> targetShape { 1, 4, 3 };
+
+ std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+ 8.0f, 12.0f, -15.0f, 2.0f,
+ 3.0f, -4.0f, -1.0f, -11.0f };
+
+ std::vector<float> expectedOutputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+ 8.0f, 12.0f, -15.0f, 2.0f,
+ 3.0f, -4.0f, -1.0f, -11.0f };
+
+ RedefineTest<float>(tflite::BuiltinOperator_RESHAPE,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ targetShape,
+ useOption);
+}
+
+void ReshapeFlattenTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 6, 2 };
+ std::vector<int32_t> targetShape { -1, 2 };
+
+ std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+ 8.0f, 12.0f, -15.0f, 2.0f,
+ 3.0f, -4.0f, -1.0f, -11.0f };
+
+ std::vector<float> expectedOutputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+ 8.0f, 12.0f, -15.0f, 2.0f,
+ 3.0f, -4.0f, -1.0f, -11.0f };
+
+ RedefineTest<float>(tflite::BuiltinOperator_RESHAPE,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ targetShape,
+ useOption);
+}
+
+void ReshapeFlattenAllTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 12 };
+ std::vector<int32_t> targetShape { -1 };
+
+ std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+ 8.0f, 12.0f, -15.0f, 2.0f,
+ 3.0f, -4.0f, -1.0f, -11.0f };
+
+ std::vector<float> expectedOutputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+ 8.0f, 12.0f, -15.0f, 2.0f,
+ 3.0f, -4.0f, -1.0f, -11.0f };
+
+ RedefineTest<float>(tflite::BuiltinOperator_RESHAPE,
+ ::tflite::TensorType_FLOAT32,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ targetShape,
+ useOption);
+}
+
+void ReshapeInt8Test(std::vector<armnn::BackendId>& backends, bool useOption = true)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 6, 2 };
+ std::vector<int32_t> targetShape { -1, 2 };
+
+ std::vector<int8_t> inputValues = { -5, 8, -10, 7,
+ 8, 12, -15, 2,
+ 3, -4, -1, -11 };
+
+ std::vector<int8_t> expectedOutputValues = { -5, 8, -10, 7,
+ 8, 12, -15, 2,
+ 3, -4, -1, -11 };
+
+ RedefineTest<int8_t>(tflite::BuiltinOperator_RESHAPE,
+ ::tflite::TensorType_INT8,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ targetShape,
+ useOption,
+ 2.5f,
+ 1);
+}
+
+void ReshapeUint8Test(std::vector<armnn::BackendId>& backends, bool useOption = true)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 6, 2 };
+ std::vector<int32_t> targetShape { -1, 2 };
+
+ std::vector<uint8_t> inputValues = { 5, 8, 10, 7,
+ 8, 12, 15, 2,
+ 3, 4, 1, 11 };
+
+ std::vector<uint8_t> expectedOutputValues = { 5, 8, 10, 7,
+ 8, 12, 15, 2,
+ 3, 4, 1, 11 };
+
+ RedefineTest<uint8_t>(tflite::BuiltinOperator_RESHAPE,
+ ::tflite::TensorType_UINT8,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ targetShape,
+ useOption,
+ 2.5f,
+ 1);
+}
+
+void ReshapeInt16Test(std::vector<armnn::BackendId>& backends, bool useOption = true)
+{
+ // Set input data
+ std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+ std::vector<int32_t> outputShape { 6, 2 };
+ std::vector<int32_t> targetShape { -1, 2 };
+
+ std::vector<int16_t> inputValues = { -5, 8, -10, 7,
+ 8, 12, -15, 2,
+ 3, -4, -1, -11 };
+
+ std::vector<int16_t> expectedOutputValues = { -5, 8, -10, 7,
+ 8, 12, -15, 2,
+ 3, -4, -1, -11 };
+
+ RedefineTest<int16_t>(tflite::BuiltinOperator_RESHAPE,
+ ::tflite::TensorType_INT16,
+ backends,
+ inputShape,
+ outputShape,
+ inputValues,
+ expectedOutputValues,
+ targetShape,
+ useOption,
+ 2.5f,
+ 0);
+}
+
+TEST_SUITE("Reshape_GpuAccTests")
+{
+
+TEST_CASE ("Reshape_Simple_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ ReshapeSimpleTest(backends);
+}
+
+TEST_CASE ("Reshape_ReduceDimension_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ ReshapeReduceDimTest(backends);
+}
+
+TEST_CASE ("Reshape_Flatten_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ ReshapeFlattenTest(backends);
+}
+
+TEST_CASE ("Reshape_FlattenAll_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ ReshapeFlattenAllTest(backends);
+}
+
+TEST_CASE ("Reshape_Int8_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ ReshapeInt8Test(backends);
+}
+
+TEST_CASE ("Reshape_Uint8_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ ReshapeUint8Test(backends);
+}
+
+TEST_CASE ("Reshape_Simple_ShapeTensor_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ ReshapeSimpleTest(backends, false);
+}
+
+TEST_CASE ("Reshape_ReduceDimension_ShapeTensor_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ ReshapeReduceDimTest(backends, false);
+}
+
+TEST_CASE ("Reshape_Flatten_ShapeTensor_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ ReshapeFlattenTest(backends, false);
+}
+
+TEST_CASE ("Reshape_FlattenAll_ShapeTensor_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ ReshapeFlattenAllTest(backends, false);
+}
+
+TEST_CASE ("Reshape_Int8_ShapeTensor_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ ReshapeInt8Test(backends, false);
+}
+
+TEST_CASE ("Reshape_Uint8_ShapeTensor_GpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ ReshapeUint8Test(backends, false);
+}
+
+} // TEST_SUITE("Reshape_GpuAccTests")
+
+TEST_SUITE("Reshape_CpuAccTests")
+{
+
+TEST_CASE ("Reshape_Simple_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ ReshapeSimpleTest(backends);
+}
+
+TEST_CASE ("Reshape_ReduceDimension_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ ReshapeReduceDimTest(backends);
+}
+
+TEST_CASE ("Reshape_Flatten_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ ReshapeFlattenTest(backends);
+}
+
+TEST_CASE ("Reshape_FlattenAll_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ ReshapeFlattenAllTest(backends);
+}
+
+TEST_CASE ("Reshape_Int8_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ ReshapeInt8Test(backends);
+}
+
+TEST_CASE ("Reshape_Uint8_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ ReshapeUint8Test(backends);
+}
+
+TEST_CASE ("Reshape_Simple_ShapeTensor_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ ReshapeSimpleTest(backends, false);
+}
+
+TEST_CASE ("Reshape_ReduceDimension_ShapeTensor_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ ReshapeReduceDimTest(backends, false);
+}
+
+TEST_CASE ("Reshape_Flatten_ShapeTensor_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ ReshapeFlattenTest(backends, false);
+}
+
+TEST_CASE ("Reshape_FlattenAll_ShapeTensor_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ ReshapeFlattenAllTest(backends, false);
+}
+
+TEST_CASE ("Reshape_Int8_ShapeTensor_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ ReshapeInt8Test(backends, false);
+}
+
+TEST_CASE ("Reshape_Uint8_ShapeTensor_CpuAcc_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ ReshapeUint8Test(backends, false);
+}
+
+} // TEST_SUITE("Reshape_CpuAccTests")
+
+TEST_SUITE("Reshape_CpuRefTests")
+{
+
+TEST_CASE ("Reshape_Simple_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ReshapeSimpleTest(backends);
+}
+
+TEST_CASE ("Reshape_ReduceDimension_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ReshapeReduceDimTest(backends);
+}
+
+TEST_CASE ("Reshape_Flatten_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ReshapeFlattenTest(backends);
+}
+
+TEST_CASE ("Reshape_FlattenAll_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ReshapeFlattenAllTest(backends);
+}
+
+TEST_CASE ("Reshape_Int8_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ReshapeInt8Test(backends);
+}
+
+TEST_CASE ("Reshape_Uint8_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ReshapeUint8Test(backends);
+}
+
+TEST_CASE ("Reshape_Int16_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ReshapeInt16Test(backends);
+}
+
+TEST_CASE ("Reshape_Simple_ShapeTensor_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ReshapeSimpleTest(backends, false);
+}
+
+TEST_CASE ("Reshape_ReduceDimension_ShapeTensor_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ReshapeReduceDimTest(backends, false);
+}
+
+TEST_CASE ("Reshape_Flatten_ShapeTensor_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ReshapeFlattenTest(backends, false);
+}
+
+TEST_CASE ("Reshape_FlattenAll_ShapeTensor_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ReshapeFlattenAllTest(backends, false);
+}
+
+TEST_CASE ("Reshape_Int8_ShapeTensor_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ReshapeInt8Test(backends, false);
+}
+
+TEST_CASE ("Reshape_Uint8_ShapeTensor_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ReshapeUint8Test(backends, false);
+}
+
+TEST_CASE ("Reshape_Int16_ShapeTensor_CpuRef_Test")
+{
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ ReshapeInt16Test(backends, false);
+}
+
+} // TEST_SUITE("Reshape_CpuRefTests")
+
+} // namespace armnnDelegate
\ No newline at end of file