src/armnnTfLiteParser/test/TransposeConv.cpp
src/armnnTfLiteParser/test/Transpose.cpp
src/armnnTfLiteParser/test/Unpack.cpp
+ src/armnnTfLiteParser/test/Unsupported.cpp
src/armnnTfLiteParser/test/LoadModel.cpp
src/armnnTfLiteParser/test/GetBuffer.cpp
src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
#include "armnn/NetworkFwd.hpp"
#include "armnn/Tensor.hpp"
#include "armnn/INetwork.hpp"
+#include "armnn/Optional.hpp"
#include <memory>
#include <map>
class ITfLiteParser
{
public:
- static ITfLiteParser* CreateRaw();
- static ITfLiteParserPtr Create();
+ struct TfLiteParserOptions
+ {
+ TfLiteParserOptions()
+ : m_StandInLayerForUnsupported(false) {}
+
+ bool m_StandInLayerForUnsupported;
+ };
+
+ static ITfLiteParser* CreateRaw(const armnn::Optional<TfLiteParserOptions>& options = armnn::EmptyOptional());
+ static ITfLiteParserPtr Create(const armnn::Optional<TfLiteParserOptions>& options = armnn::EmptyOptional());
static void Destroy(ITfLiteParser* parser);
/// Create the network from a flatbuffers binary file on disk
} // <anonymous>
-TfLiteParser::TfLiteParser()
-: m_Network(nullptr, nullptr)
+TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
+: m_Options(options)
+, m_Network(nullptr, nullptr)
, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
{
// register supported operators
- m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
- m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
- m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
- m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
- m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
- m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseDetectionPostProcess;
- m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
- m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
- m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
- m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
- m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
- m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
- m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
- m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
- m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
- m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
- m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
- m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
- m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
- m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
- m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
- m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
- m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
- m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
- m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
- m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
- m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
- m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
- m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
- m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
- m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
+ m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D] = &TfLiteParser::ParseAveragePool2D;
+ m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND] = &TfLiteParser::ParseBatchToSpaceND;
+ m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION] = &TfLiteParser::ParseConcatenation;
+ m_ParserFunctions[tflite::BuiltinOperator_CONV_2D] = &TfLiteParser::ParseConv2D;
+ m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D] = &TfLiteParser::ParseDepthwiseConv2D;
+ m_ParserFunctions[tflite::BuiltinOperator_CUSTOM] = &TfLiteParser::ParseCustomOperator;
+ m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED] = &TfLiteParser::ParseFullyConnected;
+ m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC] = &TfLiteParser::ParseLogistic;
+ m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION] = &TfLiteParser::ParseL2Normalization;
+ m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D] = &TfLiteParser::ParseMaxPool2D;
+ m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM] = &TfLiteParser::ParseMaximum;
+ m_ParserFunctions[tflite::BuiltinOperator_MINIMUM] = &TfLiteParser::ParseMinimum;
+ m_ParserFunctions[tflite::BuiltinOperator_RELU] = &TfLiteParser::ParseRelu;
+ m_ParserFunctions[tflite::BuiltinOperator_RELU6] = &TfLiteParser::ParseRelu6;
+ m_ParserFunctions[tflite::BuiltinOperator_RESHAPE] = &TfLiteParser::ParseReshape;
+ m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR] = &TfLiteParser::ParseResizeBilinear;
+ m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX] = &TfLiteParser::ParseSoftmax;
+ m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] = &TfLiteParser::ParseSpaceToBatchND;
+ m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE] = &TfLiteParser::ParseSqueeze;
+ m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE] = &TfLiteParser::ParseStridedSlice;
+ m_ParserFunctions[tflite::BuiltinOperator_SUB] = &TfLiteParser::ParseSub;
+ m_ParserFunctions[tflite::BuiltinOperator_ADD] = &TfLiteParser::ParseAdd;
+ m_ParserFunctions[tflite::BuiltinOperator_MUL] = &TfLiteParser::ParseMul;
+ m_ParserFunctions[tflite::BuiltinOperator_MEAN] = &TfLiteParser::ParseMean;
+ m_ParserFunctions[tflite::BuiltinOperator_PACK] = &TfLiteParser::ParsePack;
+ m_ParserFunctions[tflite::BuiltinOperator_PAD] = &TfLiteParser::ParsePad;
+ m_ParserFunctions[tflite::BuiltinOperator_SPLIT] = &TfLiteParser::ParseSplit;
+ m_ParserFunctions[tflite::BuiltinOperator_TANH] = &TfLiteParser::ParseTanH;
+ m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE] = &TfLiteParser::ParseTranspose;
+ m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV] = &TfLiteParser::ParseTransposeConv;
+ m_ParserFunctions[tflite::BuiltinOperator_UNPACK] = &TfLiteParser::ParseUnpack;
+
+ // register supported custom operators
+ m_CustomParserFunctions["TFLite_Detection_PostProcess"] = &TfLiteParser::ParseDetectionPostProcess;
}
void TfLiteParser::ResetParser()
tensorSlots.inputSlots.push_back(slot);
}
+void TfLiteParser::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
+{
+ CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+ // NOTE: By default we presume the custom operator is not supported
+ auto customParserFunction = &TfLiteParser::ParseUnsupportedOperator;
+
+ // Identify custom code defined for custom operator
+ const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+ const auto& customCode = m_Model->operator_codes[operatorPtr->opcode_index]->custom_code;
+
+ // Find parser function that correspondes to custom code (if any)
+ auto iterator = m_CustomParserFunctions.find(customCode);
+ if (iterator != m_CustomParserFunctions.end())
+ {
+ customParserFunction = iterator->second;
+ }
+
+ // Run parser function
+ (this->*customParserFunction)(subgraphIndex, operatorIndex);
+}
+
void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
- //
+
auto opcodeIndex = operatorPtr->opcode_index;
- auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
+ auto opcode = m_Model->operator_codes[opcodeIndex]->builtin_code;
- throw ParseException(
- boost::str(
- boost::format("Operator not supported. "
- "subgraph:%1% operator:%2% "
- "opcode_index:%3% opcode:%4% / %5% %6%") %
- subgraphIndex %
- operatorIndex %
- opcodeIndex %
- opcode %
- tflite::EnumNameBuiltinOperator(opcode) %
- CHECK_LOCATION().AsString()));
+ if (!m_Options || !m_Options.value().m_StandInLayerForUnsupported)
+ {
+ // Do not add StandInLayer, throw ParseException instead
+ throw ParseException(
+ boost::str(
+ boost::format("Operator not supported. "
+ "subgraph:%1% operator:%2% "
+ "opcode_index:%3% opcode:%4% / %5% %6%") %
+ subgraphIndex %
+ operatorIndex %
+ opcodeIndex %
+ opcode %
+ tflite::EnumNameBuiltinOperator(opcode) %
+ CHECK_LOCATION().AsString()));
+ }
+
+ auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+ auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+
+ const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputs.size());
+ const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputs.size());
+
+ StandInDescriptor descriptor(numInputs, numOutputs);
+ auto layerName = boost::str(boost::format("StandIn:%1%:%2%:%3%") % subgraphIndex % operatorIndex % opcode);
+
+ // Add a non-executable StandInLayer as a placeholder for any unsupported operator
+ IConnectableLayer* layer = m_Network->AddStandInLayer(descriptor, layerName.c_str());
+ for (unsigned int i = 0u; i < numOutputs; ++i)
+ {
+ layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i]));
+ }
+
+ auto inputTensorIds = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+ auto outputTensorIds = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+
+ RegisterInputSlots(subgraphIndex, operatorIndex, layer, inputTensorIds);
+ RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
}
void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
return result;
}
-ITfLiteParser* ITfLiteParser::CreateRaw()
+ITfLiteParser* ITfLiteParser::CreateRaw(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
{
- return new TfLiteParser();
+ return new TfLiteParser(options);
}
-ITfLiteParserPtr ITfLiteParser::Create()
+ITfLiteParserPtr ITfLiteParser::Create(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
{
- return ITfLiteParserPtr(CreateRaw(), &ITfLiteParser::Destroy);
+ return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
}
void ITfLiteParser::Destroy(ITfLiteParser* parser)
#include <schema_generated.h>
#include <functional>
+#include <unordered_map>
#include <vector>
namespace armnnTfLiteParser
/// Return the output tensor names for a given subgraph
virtual std::vector<std::string> GetSubgraphOutputTensorNames(size_t subgraphId) const override;
- TfLiteParser();
+ TfLiteParser(const armnn::Optional<ITfLiteParser::TfLiteParserOptions>& options = armnn::EmptyOptional());
virtual ~TfLiteParser() {}
public:
// signature for the parser functions
using OperatorParsingFunction = void(TfLiteParser::*)(size_t subgraphIndex, size_t operatorIndex);
+ void ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex);
void ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex);
+
void ParseActivation(size_t subgraphIndex, size_t operatorIndex, armnn::ActivationFunction activationType);
void ParseAdd(size_t subgraphIndex, size_t operatorIndex);
void ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex);
armnn::TensorInfo& tensorInfo,
armnn::Optional<armnn::PermutationVector&> permutationVector);
+ // Settings for configuring the TfLiteParser
+ armnn::Optional<ITfLiteParser::TfLiteParserOptions> m_Options;
+
/// The network we're building. Gets cleared after it is passed to the user
armnn::INetworkPtr m_Network;
- std::vector<OperatorParsingFunction> m_ParserFunctions;
ModelPtr m_Model;
+ std::vector<OperatorParsingFunction> m_ParserFunctions;
+ std::unordered_map<std::string, OperatorParsingFunction> m_CustomParserFunctions;
+
/// A mapping of an output slot to each of the input slots it should be connected to
/// The outputSlot is from the layer that creates this tensor as one of its ouputs
/// The inputSlots are from the layers that use this tensor as one of their inputs
#include <iostream>
using armnnTfLiteParser::ITfLiteParser;
-using TensorRawPtr = const tflite::TensorT *;
+using armnnTfLiteParser::ITfLiteParserPtr;
+using TensorRawPtr = const tflite::TensorT *;
struct ParserFlatbuffersFixture
{
ParserFlatbuffersFixture() :
- m_Parser(ITfLiteParser::Create()),
+ m_Parser(nullptr, &ITfLiteParser::Destroy),
m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions())),
m_NetworkIdentifier(-1)
{
+ ITfLiteParser::TfLiteParserOptions options;
+ options.m_StandInLayerForUnsupported = true;
+
+ m_Parser.reset(ITfLiteParser::CreateRaw(armnn::Optional<ITfLiteParser::TfLiteParserOptions>(options)));
}
std::vector<uint8_t> m_GraphBinary;
- std::string m_JsonString;
- std::unique_ptr<ITfLiteParser, void (*)(ITfLiteParser *parser)> m_Parser;
- armnn::IRuntimePtr m_Runtime;
- armnn::NetworkId m_NetworkIdentifier;
+ std::string m_JsonString;
+ ITfLiteParserPtr m_Parser;
+ armnn::IRuntimePtr m_Runtime;
+ armnn::NetworkId m_NetworkIdentifier;
/// If the single-input-single-output overload of Setup() is called, these will store the input and output name
/// so they don't need to be passed to the single-input-single-output overload of RunTest().
}
}
}
-}
\ No newline at end of file
+}
--- /dev/null
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <armnn/LayerVisitorBase.hpp>
+
+#include <layers/StandInLayer.hpp>
+
+#include <boost/assert.hpp>
+#include <boost/polymorphic_cast.hpp>
+#include <boost/test/unit_test.hpp>
+
+#include <sstream>
+#include <string>
+#include <vector>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+using namespace armnn;
+
+class StandInLayerVerifier : public LayerVisitorBase<VisitorThrowingPolicy>
+{
+public:
+ StandInLayerVerifier(const std::vector<TensorInfo>& inputInfos,
+ const std::vector<TensorInfo>& outputInfos)
+ : LayerVisitorBase<VisitorThrowingPolicy>()
+ , m_InputInfos(inputInfos)
+ , m_OutputInfos(outputInfos) {}
+
+ void VisitInputLayer(const IConnectableLayer*, LayerBindingId, const char*) override {}
+
+ void VisitOutputLayer(const IConnectableLayer*, LayerBindingId id, const char*) override {}
+
+ void VisitStandInLayer(const IConnectableLayer* layer,
+ const StandInDescriptor& descriptor,
+ const char*) override
+ {
+ unsigned int numInputs = boost::numeric_cast<unsigned int>(m_InputInfos.size());
+ BOOST_CHECK(descriptor.m_NumInputs == numInputs);
+ BOOST_CHECK(layer->GetNumInputSlots() == numInputs);
+
+ unsigned int numOutputs = boost::numeric_cast<unsigned int>(m_OutputInfos.size());
+ BOOST_CHECK(descriptor.m_NumOutputs == numOutputs);
+ BOOST_CHECK(layer->GetNumOutputSlots() == numOutputs);
+
+ const StandInLayer* standInLayer = boost::polymorphic_downcast<const StandInLayer*>(layer);
+ for (unsigned int i = 0u; i < numInputs; ++i)
+ {
+ const OutputSlot* connectedSlot = standInLayer->GetInputSlot(i).GetConnectedOutputSlot();
+ BOOST_CHECK(connectedSlot != nullptr);
+
+ const TensorInfo& inputInfo = connectedSlot->GetTensorInfo();
+ BOOST_CHECK(inputInfo == m_InputInfos[i]);
+ }
+
+ for (unsigned int i = 0u; i < numOutputs; ++i)
+ {
+ const TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo();
+ BOOST_CHECK(outputInfo == m_OutputInfos[i]);
+ }
+ }
+
+private:
+ std::vector<TensorInfo> m_InputInfos;
+ std::vector<TensorInfo> m_OutputInfos;
+};
+
+class DummyCustomFixture : public ParserFlatbuffersFixture
+{
+public:
+ explicit DummyCustomFixture(const std::vector<TensorInfo>& inputInfos,
+ const std::vector<TensorInfo>& outputInfos)
+ : ParserFlatbuffersFixture()
+ , m_StandInLayerVerifier(inputInfos, outputInfos)
+ {
+ const unsigned int numInputs = boost::numeric_cast<unsigned int>(inputInfos.size());
+ BOOST_ASSERT(numInputs > 0);
+
+ const unsigned int numOutputs = boost::numeric_cast<unsigned int>(outputInfos.size());
+ BOOST_ASSERT(numOutputs > 0);
+
+ m_JsonString = R"(
+ {
+ "version": 3,
+ "operator_codes": [{
+ "builtin_code": "CUSTOM",
+ "custom_code": "DummyCustomOperator"
+ }],
+ "subgraphs": [ {
+ "tensors": [)";
+
+ // Add input tensors
+ for (unsigned int i = 0u; i < numInputs; ++i)
+ {
+ const TensorInfo& inputInfo = inputInfos[i];
+ m_JsonString += R"(
+ {
+ "shape": )" + GetTensorShapeAsString(inputInfo.GetShape()) + R"(,
+ "type": )" + GetDataTypeAsString(inputInfo.GetDataType()) + R"(,
+ "buffer": 0,
+ "name": "inputTensor)" + std::to_string(i) + R"(",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ )" + std::to_string(inputInfo.GetQuantizationScale()) + R"( ],
+ "zero_point": [ )" + std::to_string(inputInfo.GetQuantizationOffset()) + R"( ],
+ }
+ },)";
+ }
+
+ // Add output tensors
+ for (unsigned int i = 0u; i < numOutputs; ++i)
+ {
+ const TensorInfo& outputInfo = outputInfos[i];
+ m_JsonString += R"(
+ {
+ "shape": )" + GetTensorShapeAsString(outputInfo.GetShape()) + R"(,
+ "type": )" + GetDataTypeAsString(outputInfo.GetDataType()) + R"(,
+ "buffer": 0,
+ "name": "outputTensor)" + std::to_string(i) + R"(",
+ "quantization": {
+ "min": [ 0.0 ],
+ "max": [ 255.0 ],
+ "scale": [ )" + std::to_string(outputInfo.GetQuantizationScale()) + R"( ],
+ "zero_point": [ )" + std::to_string(outputInfo.GetQuantizationOffset()) + R"( ],
+ }
+ })";
+
+ if (i + 1 < numOutputs)
+ {
+ m_JsonString += ",";
+ }
+ }
+
+ const std::string inputIndices = GetIndicesAsString(0u, numInputs - 1u);
+ const std::string outputIndices = GetIndicesAsString(numInputs, numInputs + numOutputs - 1u);
+
+ // Add dummy custom operator
+ m_JsonString += R"(],
+ "inputs": )" + inputIndices + R"(,
+ "outputs": )" + outputIndices + R"(,
+ "operators": [
+ {
+ "opcode_index": 0,
+ "inputs": )" + inputIndices + R"(,
+ "outputs": )" + outputIndices + R"(,
+ "builtin_options_type": 0,
+ "custom_options": [ ],
+ "custom_options_format": "FLEXBUFFERS"
+ }
+ ],
+ } ],
+ "buffers" : [
+ { },
+ { }
+ ]
+ }
+ )";
+
+ ReadStringToBinary();
+ }
+
+ void RunTest()
+ {
+ INetworkPtr network = m_Parser->CreateNetworkFromBinary(m_GraphBinary);
+ network->Accept(m_StandInLayerVerifier);
+ }
+
+private:
+ static std::string GetTensorShapeAsString(const TensorShape& tensorShape)
+ {
+ std::stringstream stream;
+ stream << "[ ";
+ for (unsigned int i = 0u; i < tensorShape.GetNumDimensions(); ++i)
+ {
+ stream << tensorShape[i];
+ if (i + 1 < tensorShape.GetNumDimensions())
+ {
+ stream << ",";
+ }
+ stream << " ";
+ }
+ stream << "]";
+
+ return stream.str();
+ }
+
+ static std::string GetDataTypeAsString(DataType dataType)
+ {
+ switch (dataType)
+ {
+ case DataType::Float32: return "FLOAT32";
+ case DataType::QuantisedAsymm8: return "UINT8";
+ default: return "UNKNOWN";
+ }
+ }
+
+ static std::string GetIndicesAsString(unsigned int first, unsigned int last)
+ {
+ std::stringstream stream;
+ stream << "[ ";
+ for (unsigned int i = first; i <= last ; ++i)
+ {
+ stream << i;
+ if (i + 1 <= last)
+ {
+ stream << ",";
+ }
+ stream << " ";
+ }
+ stream << "]";
+
+ return stream.str();
+ }
+
+ StandInLayerVerifier m_StandInLayerVerifier;
+};
+
+class DummyCustom1Input1OutputFixture : public DummyCustomFixture
+{
+public:
+ DummyCustom1Input1OutputFixture()
+ : DummyCustomFixture({ TensorInfo({ 1, 1 }, DataType::Float32) },
+ { TensorInfo({ 2, 2 }, DataType::Float32) }) {}
+};
+
+class DummyCustom2Inputs1OutputFixture : public DummyCustomFixture
+{
+public:
+ DummyCustom2Inputs1OutputFixture()
+ : DummyCustomFixture({ TensorInfo({ 1, 1 }, DataType::Float32), TensorInfo({ 2, 2 }, DataType::Float32) },
+ { TensorInfo({ 3, 3 }, DataType::Float32) }) {}
+};
+
+BOOST_FIXTURE_TEST_CASE(UnsupportedCustomOperator1Input1Output, DummyCustom1Input1OutputFixture)
+{
+ RunTest();
+}
+
+BOOST_FIXTURE_TEST_CASE(UnsupportedCustomOperator2Inputs1Output, DummyCustom2Inputs1OutputFixture)
+{
+ RunTest();
+}
+
+BOOST_AUTO_TEST_SUITE_END()