set(armnn_serializer_sources)
list(APPEND armnn_serializer_sources
include/armnnSerializer/ISerializer.hpp
- include/armnnDeserializeParser/IDeserializeParser.hpp
+ include/armnnDeserializer/IDeserializer.hpp
src/armnnSerializer/Schema_generated.h
src/armnnSerializer/Serializer.hpp
src/armnnSerializer/Serializer.cpp
src/armnnSerializer/SerializerUtils.hpp
src/armnnSerializer/SerializerUtils.cpp
- src/armnnDeserializeParser/DeserializeParser.hpp
- src/armnnDeserializeParser/DeserializeParser.cpp
+ src/armnnDeserializer/Deserializer.hpp
+ src/armnnDeserializer/Deserializer.cpp
)
add_library_ex(armnnSerializer SHARED ${armnn_serializer_sources})
list(APPEND unittest_sources
src/armnnSerializer/Schema_generated.h
src/armnnSerializer/test/SerializerTests.cpp
- src/armnnDeserializeParser/test/DeserializeAdd.cpp
- src/armnnDeserializeParser/test/DeserializeConvolution2d.cpp
- src/armnnDeserializeParser/test/DeserializeMultiplication.cpp
- src/armnnDeserializeParser/test/DeserializePooling2d.cpp
- src/armnnDeserializeParser/test/DeserializeReshape.cpp
- src/armnnDeserializeParser/test/ParserFlatbuffersSerializeFixture.hpp
- src/armnnDeserializeParser/test/SchemaSerialize.s
+ src/armnnDeserializer/test/DeserializeAdd.cpp
+ src/armnnDeserializer/test/DeserializeConvolution2d.cpp
+ src/armnnDeserializer/test/DeserializeMultiplication.cpp
+ src/armnnDeserializer/test/DeserializePooling2d.cpp
+ src/armnnDeserializer/test/DeserializeReshape.cpp
+ src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
+ src/armnnDeserializer/test/SchemaSerialize.s
)
- set_source_files_properties(src/armnnDeserializeParser/test/SchemaSerialize.s PROPERTIES COMPILE_FLAGS "-x assembler-with-cpp")
+ set_source_files_properties(src/armnnDeserializer/test/SchemaSerialize.s PROPERTIES COMPILE_FLAGS "-x assembler-with-cpp")
endif()
if(BUILD_ONNX_PARSER)
#include <map>
#include <vector>
-namespace armnnDeserializeParser
+namespace armnnDeserializer
{
-
using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
-class IDeserializeParser;
-using IDeserializeParserPtr = std::unique_ptr<IDeserializeParser, void(*)(IDeserializeParser* parser)>;
+class IDeserializer;
+using IDeserializerPtr = std::unique_ptr<IDeserializer, void(*)(IDeserializer* parser)>;
-class IDeserializeParser
+class IDeserializer
{
public:
- static IDeserializeParser* CreateRaw();
- static IDeserializeParserPtr Create();
- static void Destroy(IDeserializeParser* parser);
+ static IDeserializer* CreateRaw();
+ static IDeserializerPtr Create();
+ static void Destroy(IDeserializer* parser);
/// Create an input network from binary file contents
virtual armnn::INetworkPtr CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent) = 0;
const std::string& name) const = 0;
protected:
- virtual ~IDeserializeParser() {};
+ virtual ~IDeserializer() {};
};
-}
\ No newline at end of file
+} //namespace armnnDeserializer
\ No newline at end of file
+++ /dev/null
-# The Arm NN Deserialize parser
-
-The `armnnDeserializeParser` is a library for loading neural networks defined by Arm NN FlatBuffers files
-into the Arm NN runtime.
-
-For more information about the layers that are supported, and the networks that have been tested,
-see [DeserializeSupport.md](./DeserializeSupport.md)
\ No newline at end of file
// SPDX-License-Identifier: MIT
//
-#include "DeserializeParser.hpp"
+#include "Deserializer.hpp"
#include <armnn/ArmNN.hpp>
#include <armnn/Exceptions.hpp>
using armnn::ParseException;
using namespace armnn;
-using namespace armnn::armnnSerializer;
+using namespace armnnSerializer;
-namespace armnnDeserializeParser
+namespace armnnDeserializer
{
namespace
const uint32_t VIRTUAL_LAYER_ID = std::numeric_limits<uint32_t>::max();
- void CheckGraph(const DeserializeParser::GraphPtr& graph,
+ void CheckGraph(const Deserializer::GraphPtr& graph,
unsigned int layersIndex,
const CheckLocation& location)
{
}
}
-void CheckLayers(const DeserializeParser::GraphPtr& graph,
+void CheckLayers(const Deserializer::GraphPtr& graph,
unsigned int layersIndex,
unsigned int layerIndex,
const CheckLocation& location)
}
}
-void CheckTensorPtr(DeserializeParser::TensorRawPtr rawPtr,
+void CheckTensorPtr(Deserializer::TensorRawPtr rawPtr,
const CheckLocation& location)
{
if (rawPtr == nullptr)
}
}
-void CheckConstTensorPtr(DeserializeParser::ConstTensorRawPtr rawPtr,
+void CheckConstTensorPtr(Deserializer::ConstTensorRawPtr rawPtr,
const CheckLocation& location)
{
if (rawPtr == nullptr)
return true;
}
-DeserializeParser::DeserializeParser()
+Deserializer::Deserializer()
: m_Network(nullptr, nullptr),
//May require LayerType_Max to be included
-m_ParserFunctions(Layer_MAX+1, &DeserializeParser::ParseUnsupportedLayer)
+m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer)
{
// register supported layers
- m_ParserFunctions[Layer_AdditionLayer] = &DeserializeParser::ParseAdd;
- m_ParserFunctions[Layer_Convolution2dLayer] = &DeserializeParser::ParseConvolution2d;
- m_ParserFunctions[Layer_DepthwiseConvolution2dLayer] = &DeserializeParser::ParseDepthwiseConvolution2d;
- m_ParserFunctions[Layer_MultiplicationLayer] = &DeserializeParser::ParseMultiplication;
- m_ParserFunctions[Layer_Pooling2dLayer] = &DeserializeParser::ParsePooling2d;
- m_ParserFunctions[Layer_ReshapeLayer] = &DeserializeParser::ParseReshape;
- m_ParserFunctions[Layer_SoftmaxLayer] = &DeserializeParser::ParseSoftmax;
+ m_ParserFunctions[Layer_AdditionLayer] = &Deserializer::ParseAdd;
+ m_ParserFunctions[Layer_Convolution2dLayer] = &Deserializer::ParseConvolution2d;
+ m_ParserFunctions[Layer_DepthwiseConvolution2dLayer] = &Deserializer::ParseDepthwiseConvolution2d;
+ m_ParserFunctions[Layer_MultiplicationLayer] = &Deserializer::ParseMultiplication;
+ m_ParserFunctions[Layer_Pooling2dLayer] = &Deserializer::ParsePooling2d;
+ m_ParserFunctions[Layer_ReshapeLayer] = &Deserializer::ParseReshape;
+ m_ParserFunctions[Layer_SoftmaxLayer] = &Deserializer::ParseSoftmax;
}
-DeserializeParser::LayerBaseRawPtr DeserializeParser::GetBaseLayer(const GraphPtr& graphPtr, unsigned int layerIndex)
+Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPtr, unsigned int layerIndex)
{
auto layerType = graphPtr->layers()->Get(layerIndex)->layer_type();
}
}
-int32_t DeserializeParser::GetBindingLayerInfo(const GraphPtr& graphPtr, unsigned int layerIndex)
+int32_t Deserializer::GetBindingLayerInfo(const GraphPtr& graphPtr, unsigned int layerIndex)
{
auto layerType = graphPtr->layers()->Get(layerIndex)->layer_type();
return 0;
}
-armnn::DataLayout ToDataLayout(armnn::armnnSerializer::DataLayout dataLayout)
+armnn::DataLayout ToDataLayout(armnnSerializer::DataLayout dataLayout)
{
switch (dataLayout)
{
- case armnn::armnnSerializer::DataLayout::DataLayout_NHWC:
+ case armnnSerializer::DataLayout::DataLayout_NHWC:
return armnn::DataLayout::NHWC;
- case armnn::armnnSerializer::DataLayout::DataLayout_NCHW:
+ case armnnSerializer::DataLayout::DataLayout_NCHW:
default:
return armnn::DataLayout::NCHW;
}
}
-armnn::TensorInfo ToTensorInfo(DeserializeParser::TensorRawPtr tensorPtr)
+armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
{
armnn::DataType type;
CHECK_TENSOR_PTR(tensorPtr);
return result;
}
-armnn::ConstTensor ToConstTensor(DeserializeParser::ConstTensorRawPtr constTensorPtr)
+armnn::ConstTensor ToConstTensor(Deserializer::ConstTensorRawPtr constTensorPtr)
{
CHECK_CONST_TENSOR_PTR(constTensorPtr);
armnn::TensorInfo tensorInfo = ToTensorInfo(constTensorPtr->info());
}
}
-DeserializeParser::LayerBaseRawPtrVector DeserializeParser::GetGraphInputs(const GraphPtr& graphPtr)
+Deserializer::LayerBaseRawPtrVector Deserializer::GetGraphInputs(const GraphPtr& graphPtr)
{
CHECK_GRAPH(graphPtr, 0);
return result;
}
-DeserializeParser::LayerBaseRawPtrVector DeserializeParser::GetGraphOutputs(const GraphPtr& graphPtr)
+Deserializer::LayerBaseRawPtrVector Deserializer::GetGraphOutputs(const GraphPtr& graphPtr)
{
CHECK_GRAPH(graphPtr, 0);
const auto& numOutputs = graphPtr->outputIds()->size();
return result;
}
-DeserializeParser::TensorRawPtrVector DeserializeParser::GetInputs(const GraphPtr& graphPtr,
+Deserializer::TensorRawPtrVector Deserializer::GetInputs(const GraphPtr& graphPtr,
unsigned int layerIndex)
{
CHECK_LAYERS(graphPtr, 0, layerIndex);
return result;
}
-DeserializeParser::TensorRawPtrVector DeserializeParser::GetOutputs(const GraphPtr& graphPtr,
+Deserializer::TensorRawPtrVector Deserializer::GetOutputs(const GraphPtr& graphPtr,
unsigned int layerIndex)
{
CHECK_LAYERS(graphPtr, 0, layerIndex);
return result;
}
-void DeserializeParser::ParseUnsupportedLayer(unsigned int layerIndex)
+void Deserializer::ParseUnsupportedLayer(unsigned int layerIndex)
{
CHECK_LAYERS(m_Graph, 0, layerIndex);
const auto layerName = GetBaseLayer(m_Graph, layerIndex)->layerName()->c_str();
CHECK_LOCATION().AsString()));
}
-void DeserializeParser::ResetParser()
+void Deserializer::ResetParser()
{
m_Network = armnn::INetworkPtr(nullptr, nullptr);
m_Graph = nullptr;
}
-IDeserializeParser* IDeserializeParser::CreateRaw()
+IDeserializer* IDeserializer::CreateRaw()
{
- return new DeserializeParser();
+ return new Deserializer();
}
-IDeserializeParserPtr IDeserializeParser::Create()
+IDeserializerPtr IDeserializer::Create()
{
- return IDeserializeParserPtr(CreateRaw(), &IDeserializeParser::Destroy);
+ return IDeserializerPtr(CreateRaw(), &IDeserializer::Destroy);
}
-void IDeserializeParser::Destroy(IDeserializeParser* parser)
+void IDeserializer::Destroy(IDeserializer* parser)
{
delete parser;
}
-INetworkPtr DeserializeParser::CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent)
+INetworkPtr Deserializer::CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent)
{
ResetParser();
m_Graph = LoadGraphFromBinary(binaryContent.data(), binaryContent.size());
return CreateNetworkFromGraph();
}
-armnn::INetworkPtr DeserializeParser::CreateNetworkFromBinary(std::istream& binaryContent)
+armnn::INetworkPtr Deserializer::CreateNetworkFromBinary(std::istream& binaryContent)
{
ResetParser();
m_Graph = LoadGraphFromBinary(binaryContent);
return CreateNetworkFromGraph();
}
-DeserializeParser::GraphPtr DeserializeParser::LoadGraphFromBinary(const uint8_t* binaryContent, size_t len)
+Deserializer::GraphPtr Deserializer::LoadGraphFromBinary(const uint8_t* binaryContent, size_t len)
{
if (binaryContent == nullptr)
{
return GetSerializedGraph(binaryContent);
}
-DeserializeParser::GraphPtr DeserializeParser::LoadGraphFromBinary(std::istream& binaryContent)
+Deserializer::GraphPtr Deserializer::LoadGraphFromBinary(std::istream& binaryContent)
{
std::string content((std::istreambuf_iterator<char>(binaryContent)), std::istreambuf_iterator<char>());
return GetSerializedGraph(content.data());
}
-INetworkPtr DeserializeParser::CreateNetworkFromGraph()
+INetworkPtr Deserializer::CreateNetworkFromGraph()
{
m_Network = INetwork::Create();
BOOST_ASSERT(m_Graph != nullptr);
return std::move(m_Network);
}
-BindingPointInfo DeserializeParser::GetNetworkInputBindingInfo(unsigned int layerIndex,
+BindingPointInfo Deserializer::GetNetworkInputBindingInfo(unsigned int layerIndex,
const std::string& name) const
{
CHECK_LAYERS(m_Graph, 0, layerIndex);
CHECK_LOCATION().AsString()));
}
-BindingPointInfo DeserializeParser::GetNetworkOutputBindingInfo(unsigned int layerIndex,
+BindingPointInfo Deserializer::GetNetworkOutputBindingInfo(unsigned int layerIndex,
const std::string& name) const
{
CHECK_LAYERS(m_Graph, 0, layerIndex);
CHECK_LOCATION().AsString()));
}
-void DeserializeParser::SetupInputLayers()
+void Deserializer::SetupInputLayers()
{
CHECK_GRAPH(m_Graph, 0);
auto inputs = GetGraphInputs(m_Graph);
}
}
-void DeserializeParser::SetupOutputLayers()
+void Deserializer::SetupOutputLayers()
{
CHECK_GRAPH(m_Graph, 0);
auto outputs = GetGraphOutputs(m_Graph);
}
}
-void DeserializeParser::RegisterOutputSlots(uint32_t layerIndex,
+void Deserializer::RegisterOutputSlots(uint32_t layerIndex,
IConnectableLayer* layer)
{
CHECK_LAYERS(m_Graph, 0, layerIndex);
}
}
-void DeserializeParser::RegisterInputSlots(uint32_t layerIndex,
+void Deserializer::RegisterInputSlots(uint32_t layerIndex,
armnn::IConnectableLayer* layer)
{
CHECK_LAYERS(m_Graph, 0, layerIndex);
}
}
-void DeserializeParser::RegisterInputSlotOfConnection(uint32_t connectionIndex,
+void Deserializer::RegisterInputSlotOfConnection(uint32_t connectionIndex,
armnn::IInputSlot* slot)
{
BOOST_ASSERT(m_GraphConnections[0].size() > connectionIndex);
slots.inputSlots.push_back(slot);
}
-void DeserializeParser::RegisterOutputSlotOfConnection(uint32_t connectionIndex,
+void Deserializer::RegisterOutputSlotOfConnection(uint32_t connectionIndex,
armnn::IOutputSlot* slot)
{
BOOST_ASSERT(m_GraphConnections[0].size() > connectionIndex);
slots.outputSlot = slot;
}
-void DeserializeParser::ParseAdd(unsigned int layerIndex)
+void Deserializer::ParseAdd(unsigned int layerIndex)
{
CHECK_LAYERS(m_Graph, 0, layerIndex);
auto inputs = GetInputs(m_Graph, layerIndex);
RegisterOutputSlots(layerIndex, layer);
}
-void DeserializeParser::ParseConvolution2d(unsigned int layerIndex)
+void Deserializer::ParseConvolution2d(unsigned int layerIndex)
{
CHECK_LAYERS(m_Graph, 0, layerIndex);
auto inputs = GetInputs(m_Graph, layerIndex);
RegisterOutputSlots(layerIndex, layer);
}
-void DeserializeParser::ParseDepthwiseConvolution2d(unsigned int layerIndex)
+void Deserializer::ParseDepthwiseConvolution2d(unsigned int layerIndex)
{
CHECK_LAYERS(m_Graph, 0, layerIndex);
auto inputs = GetInputs(m_Graph, layerIndex);
RegisterOutputSlots(layerIndex, layer);
}
-void DeserializeParser::ParseMultiplication(unsigned int layerIndex)
+void Deserializer::ParseMultiplication(unsigned int layerIndex)
{
CHECK_LAYERS(m_Graph, 0, layerIndex);
auto inputs = GetInputs(m_Graph, layerIndex);
RegisterOutputSlots(layerIndex, layer);
}
-armnn::Pooling2dDescriptor DeserializeParser::GetPoolingDescriptor(DeserializeParser::PoolingDescriptor pooling2dDesc,
+armnn::Pooling2dDescriptor Deserializer::GetPoolingDescriptor(Deserializer::PoolingDescriptor pooling2dDesc,
unsigned int layerIndex)
{
armnn::Pooling2dDescriptor desc;
return desc;
}
-void DeserializeParser::ParsePooling2d(unsigned int layerIndex)
+void Deserializer::ParsePooling2d(unsigned int layerIndex)
{
CHECK_LAYERS(m_Graph, 0, layerIndex);
RegisterOutputSlots(layerIndex, layer);
}
-armnn::TensorInfo DeserializeParser::OutputShapeOfReshape(const armnn::TensorInfo& inputTensorInfo,
+armnn::TensorInfo Deserializer::OutputShapeOfReshape(const armnn::TensorInfo& inputTensorInfo,
const std::vector<uint32_t>& targetDimsIn)
{
std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
return reshapeInfo;
}
-void DeserializeParser::ParseReshape(unsigned int layerIndex)
+void Deserializer::ParseReshape(unsigned int layerIndex)
{
CHECK_LAYERS(m_Graph, 0, layerIndex);
auto inputs = GetInputs(m_Graph, layerIndex);
const auto targetDims = m_Graph->layers()->Get(layerIndex)->layer_as_ReshapeLayer()->descriptor()->targetShape();
std::vector<uint32_t> outputDims(targetDims->begin(), targetDims->begin() + targetDims->size());
- armnn::TensorInfo reshapeOutputTensorInfo = DeserializeParser::OutputShapeOfReshape(inputTensorInfo, outputDims);
+ armnn::TensorInfo reshapeOutputTensorInfo = Deserializer::OutputShapeOfReshape(inputTensorInfo, outputDims);
const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
const std::vector<uint32_t> expectedDims(outputs[0]->dimensions()->begin(),
RegisterOutputSlots(layerIndex, layer);
}
-void DeserializeParser::ParseSoftmax(unsigned int layerIndex)
+void Deserializer::ParseSoftmax(unsigned int layerIndex)
{
CHECK_LAYERS(m_Graph, 0, layerIndex);
- DeserializeParser::TensorRawPtrVector inputs = GetInputs(m_Graph, layerIndex);
+ Deserializer::TensorRawPtrVector inputs = GetInputs(m_Graph, layerIndex);
CHECK_VALID_SIZE(inputs.size(), 1);
- DeserializeParser::TensorRawPtrVector outputs = GetOutputs(m_Graph, layerIndex);
+ Deserializer::TensorRawPtrVector outputs = GetOutputs(m_Graph, layerIndex);
CHECK_VALID_SIZE(outputs.size(), 1);
armnn::SoftmaxDescriptor descriptor;
RegisterOutputSlots(layerIndex, layer);
}
-} // namespace armnnDeserializeParser
+} // namespace armnnDeserializer
#pragma once
#include "armnn/INetwork.hpp"
-#include "armnnDeserializeParser/IDeserializeParser.hpp"
+#include "armnnDeserializer/IDeserializer.hpp"
#include <Schema_generated.h>
-namespace armnnDeserializeParser
+namespace armnnDeserializer
{
-class DeserializeParser : public IDeserializeParser
+class Deserializer : public IDeserializer
{
public:
// Shorthands for deserializer types
- using ConstTensorRawPtr = const armnn::armnnSerializer::ConstTensor *;
- using GraphPtr = const armnn::armnnSerializer::SerializedGraph *;
- using TensorRawPtr = const armnn::armnnSerializer::TensorInfo *;
- using PoolingDescriptor = const armnn::armnnSerializer::Pooling2dDescriptor *;
+ using ConstTensorRawPtr = const armnnSerializer::ConstTensor *;
+ using GraphPtr = const armnnSerializer::SerializedGraph *;
+ using TensorRawPtr = const armnnSerializer::TensorInfo *;
+ using PoolingDescriptor = const armnnSerializer::Pooling2dDescriptor *;
using TensorRawPtrVector = std::vector<TensorRawPtr>;
- using LayerRawPtr = const armnn::armnnSerializer::LayerBase *;
- using LayerBaseRawPtr = const armnn::armnnSerializer::LayerBase *;
+ using LayerRawPtr = const armnnSerializer::LayerBase *;
+ using LayerBaseRawPtr = const armnnSerializer::LayerBase *;
using LayerBaseRawPtrVector = std::vector<LayerBaseRawPtr>;
public:
/// Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name
BindingPointInfo GetNetworkOutputBindingInfo(unsigned int layerId, const std::string& name) const override;
- DeserializeParser();
- ~DeserializeParser() {}
+ Deserializer();
+ ~Deserializer() {}
public:
// testable helpers
private:
// No copying allowed until it is wanted and properly implemented
- DeserializeParser(const DeserializeParser&) = delete;
- DeserializeParser& operator=(const DeserializeParser&) = delete;
+ Deserializer(const Deserializer&) = delete;
+ Deserializer& operator=(const Deserializer&) = delete;
/// Create the network from an already loaded flatbuffers graph
armnn::INetworkPtr CreateNetworkFromGraph();
// signature for the parser functions
- using LayerParsingFunction = void(DeserializeParser::*)(unsigned int layerIndex);
+ using LayerParsingFunction = void(Deserializer::*)(unsigned int layerIndex);
void ParseUnsupportedLayer(unsigned int layerIndex);
void ParseAdd(unsigned int layerIndex);
std::vector<Connection> m_GraphConnections;
};
-}
+} //namespace armnnDeserializer
--- /dev/null
+# The Arm NN Deserializer
+
+The `armnnDeserializer` is a library for loading neural networks defined by Arm NN FlatBuffers files
+into the Arm NN runtime.
+
+For more information about the layers that are supported, and the networks that have been tested,
+see [DeserializerSupport.md](./DeserializerSupport.md)
\ No newline at end of file
#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersSerializeFixture.hpp"
-#include "../DeserializeParser.hpp"
+#include "../Deserializer.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(DeserializeParser)
+BOOST_AUTO_TEST_SUITE(Deserializer)
struct AddFixture : public ParserFlatbuffersSerializeFixture
{
#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersSerializeFixture.hpp"
-#include "../DeserializeParser.hpp"
+#include "../Deserializer.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(DeserializeParser)
+BOOST_AUTO_TEST_SUITE(Deserializer)
struct Convolution2dFixture : public ParserFlatbuffersSerializeFixture
{
#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersSerializeFixture.hpp"
-#include "../DeserializeParser.hpp"
+#include "../Deserializer.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(DeserializeParser)
+BOOST_AUTO_TEST_SUITE(Deserializer)
struct MultiplicationFixture : public ParserFlatbuffersSerializeFixture
{
#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersSerializeFixture.hpp"
-#include "../DeserializeParser.hpp"
+#include "../Deserializer.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(DeserializeParser)
+BOOST_AUTO_TEST_SUITE(Deserializer)
struct Pooling2dFixture : public ParserFlatbuffersSerializeFixture
{
#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersSerializeFixture.hpp"
-#include "../DeserializeParser.hpp"
+#include "../Deserializer.hpp"
#include <string>
#include <iostream>
-BOOST_AUTO_TEST_SUITE(DeserializeParser)
+BOOST_AUTO_TEST_SUITE(Deserializer)
struct ReshapeFixture : public ParserFlatbuffersSerializeFixture
{
#include "SchemaSerialize.hpp"
#include <armnn/IRuntime.hpp>
-#include <armnnDeserializeParser/IDeserializeParser.hpp>
+#include <armnnDeserializer/IDeserializer.hpp>
#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <Schema_generated.h>
-using armnnDeserializeParser::IDeserializeParser;
-using TensorRawPtr = armnn::armnnSerializer::TensorInfo*;
+using armnnDeserializer::IDeserializer;
+using TensorRawPtr = armnnSerializer::TensorInfo*;
struct ParserFlatbuffersSerializeFixture
{
ParserFlatbuffersSerializeFixture() :
- m_Parser(IDeserializeParser::Create()),
+ m_Parser(IDeserializer::Create()),
m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions())),
m_NetworkIdentifier(-1)
{
std::vector<uint8_t> m_GraphBinary;
std::string m_JsonString;
- std::unique_ptr<IDeserializeParser, void (*)(IDeserializeParser* parser)> m_Parser;
+ std::unique_ptr<IDeserializer, void (*)(IDeserializer* parser)> m_Parser;
armnn::IRuntimePtr m_Runtime;
armnn::NetworkId m_NetworkIdentifier;
const std::map<std::string, std::vector<DataType>>& expectedOutputData);
void CheckTensors(const TensorRawPtr& tensors, size_t shapeSize, const std::vector<int32_t>& shape,
- armnn::armnnSerializer::TensorInfo tensorType, const std::string& name,
+ armnnSerializer::TensorInfo tensorType, const std::string& name,
const float scale, const int64_t zeroPoint)
{
BOOST_CHECK_EQUAL(shapeSize, tensors->dimensions()->size());
// SPDX-License-Identifier: MIT
//
-namespace armnn.armnnSerializer;
+namespace armnnSerializer;
file_identifier "ARMN";
using namespace armnn;
namespace fb = flatbuffers;
-namespace serializer = armnn::armnnSerializer;
+namespace serializer = armnnSerializer;
namespace armnnSerializer
{
}
// Build FlatBuffer for Input Layer
-void SerializerVisitor::VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
+void SerializerVisitor::VisitInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
{
// Create FlatBuffer BaseLayer
auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
}
// Build FlatBuffer for Output Layer
-void SerializerVisitor::VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
+void SerializerVisitor::VisitOutputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
{
// Create FlatBuffer BaseLayer
auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
}
// Build FlatBuffer for Addition Layer
-void SerializerVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
+void SerializerVisitor::VisitAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
{
// Create FlatBuffer BaseLayer
auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
}
// Build FlatBuffer for Convolution2dLayer
-void SerializerVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& descriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
+void SerializerVisitor::VisitConvolution2dLayer(const armnn::IConnectableLayer* layer,
+ const armnn::Convolution2dDescriptor& descriptor,
+ const armnn::ConstTensor& weights,
+ const armnn::Optional<armnn::ConstTensor>& biases,
const char* name)
{
// Create FlatBuffer BaseLayer
CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution2dLayer);
}
-void SerializerVisitor::VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
- const DepthwiseConvolution2dDescriptor& descriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
+void SerializerVisitor::VisitDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
+ const armnn::DepthwiseConvolution2dDescriptor& descriptor,
+ const armnn::ConstTensor& weights,
+ const armnn::Optional<armnn::ConstTensor>& biases,
const char* name)
{
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthwiseConvolution2d);
}
// Build FlatBuffer for Multiplication Layer
-void SerializerVisitor::VisitMultiplicationLayer(const IConnectableLayer* layer, const char* name)
+void SerializerVisitor::VisitMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
{
// Create FlatBuffer BaseLayer
auto flatBufferMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
}
// Build FlatBuffer for Reshape Layer
-void SerializerVisitor::VisitReshapeLayer(const IConnectableLayer* layer,
+void SerializerVisitor::VisitReshapeLayer(const armnn::IConnectableLayer* layer,
const armnn::ReshapeDescriptor& reshapeDescriptor,
const char* name)
{
}
// Build FlatBuffer for Softmax Layer
-void SerializerVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer,
- const SoftmaxDescriptor& softmaxDescriptor,
+void SerializerVisitor::VisitSoftmaxLayer(const armnn::IConnectableLayer* layer,
+ const armnn::SoftmaxDescriptor& softmaxDescriptor,
const char* name)
{
// Create FlatBuffer BaseLayer
CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer);
}
-void SerializerVisitor::VisitPooling2dLayer(const IConnectableLayer* layer,
- const Pooling2dDescriptor& pooling2dDescriptor,
+void SerializerVisitor::VisitPooling2dLayer(const armnn::IConnectableLayer* layer,
+ const armnn::Pooling2dDescriptor& pooling2dDescriptor,
const char* name)
{
auto fbPooling2dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
CreateAnyLayer(fbPooling2dLayer.o, serializer::Layer::Layer_Pooling2dLayer);
}
-fb::Offset<serializer::LayerBase> SerializerVisitor::CreateLayerBase(const IConnectableLayer* layer,
+fb::Offset<serializer::LayerBase> SerializerVisitor::CreateLayerBase(const armnn::IConnectableLayer* layer,
const serializer::LayerType layerType)
{
std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
void SerializerVisitor::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
{
- auto anyLayer = armnn::armnnSerializer::CreateAnyLayer(m_flatBufferBuilder,
- serializerLayer,
- layer);
+ auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer);
m_serializedLayers.push_back(anyLayer);
}
return fbVector;
}
-flatbuffers::Offset<serializer::ConstTensor> SerializerVisitor::CreateConstTensorInfo(const ConstTensor& constTensor)
+flatbuffers::Offset<serializer::ConstTensor>
+ SerializerVisitor::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
{
- TensorInfo tensorInfo = constTensor.GetInfo();
+ armnn::TensorInfo tensorInfo = constTensor.GetInfo();
// Get the dimensions
std::vector<unsigned int> shape;
switch (tensorInfo.GetDataType())
{
- case DataType::Float32:
- case DataType::Signed32:
+ case armnn::DataType::Float32:
+ case armnn::DataType::Signed32:
{
auto fbVector = CreateDataVector<int32_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
flatbuffers::Offset<serializer::IntData> flatBuffersData = serializer::CreateIntData(
fbPayload = flatBuffersData.o;
break;
}
- case DataType::Float16:
+ case armnn::DataType::Float16:
{
auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
fbPayload = flatBuffersData.o;
break;
}
- case DataType::QuantisedAsymm8:
- case DataType::Boolean:
+ case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::Boolean:
default:
{
auto fbVector = CreateDataVector<int8_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
return flatBufferConstTensor;
}
-std::vector<fb::Offset<serializer::InputSlot>> SerializerVisitor::CreateInputSlots(const IConnectableLayer* layer)
+std::vector<fb::Offset<serializer::InputSlot>>
+ SerializerVisitor::CreateInputSlots(const armnn::IConnectableLayer* layer)
{
std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
return inputSlots;
}
-std::vector<fb::Offset<serializer::OutputSlot>> SerializerVisitor::CreateOutputSlots(const IConnectableLayer* layer)
+std::vector<fb::Offset<serializer::OutputSlot>>
+ SerializerVisitor::CreateOutputSlots(const armnn::IConnectableLayer* layer)
{
std::vector<fb::Offset<serializer::OutputSlot>> outputSlots;
for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
{
const IOutputSlot& outputSlot = layer->GetOutputSlot(slotIndex);
- const TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
+ const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
// Get the dimensions
std::vector<unsigned int> shape;
return m_outputIds;
}
- std::vector<flatbuffers::Offset<armnn::armnnSerializer::AnyLayer>>& GetSerializedLayers()
+ std::vector<flatbuffers::Offset<armnnSerializer::AnyLayer>>& GetSerializedLayers()
{
return m_serializedLayers;
}
private:
/// Creates the Input Slots and Output Slots and LayerBase for the layer.
- flatbuffers::Offset<armnn::armnnSerializer::LayerBase> CreateLayerBase(
+ flatbuffers::Offset<armnnSerializer::LayerBase> CreateLayerBase(
const armnn::IConnectableLayer* layer,
- const armnn::armnnSerializer::LayerType layerType);
+ const armnnSerializer::LayerType layerType);
/// Creates the serializer AnyLayer for the layer and adds it to m_serializedLayers.
- void CreateAnyLayer(const flatbuffers::Offset<void>& layer, const armnn::armnnSerializer::Layer serializerLayer);
+ void CreateAnyLayer(const flatbuffers::Offset<void>& layer, const armnnSerializer::Layer serializerLayer);
/// Creates the serializer ConstTensor for the armnn ConstTensor.
- flatbuffers::Offset<armnn::armnnSerializer::ConstTensor> CreateConstTensorInfo(
+ flatbuffers::Offset<armnnSerializer::ConstTensor> CreateConstTensorInfo(
const armnn::ConstTensor& constTensor);
template <typename T>
uint32_t GetSerializedId(unsigned int guid);
/// Creates the serializer InputSlots for the layer.
- std::vector<flatbuffers::Offset<armnn::armnnSerializer::InputSlot>> CreateInputSlots(
+ std::vector<flatbuffers::Offset<armnnSerializer::InputSlot>> CreateInputSlots(
const armnn::IConnectableLayer* layer);
/// Creates the serializer OutputSlots for the layer.
- std::vector<flatbuffers::Offset<armnn::armnnSerializer::OutputSlot>> CreateOutputSlots(
+ std::vector<flatbuffers::Offset<armnnSerializer::OutputSlot>> CreateOutputSlots(
const armnn::IConnectableLayer* layer);
/// FlatBufferBuilder to create our layers' FlatBuffers.
flatbuffers::FlatBufferBuilder m_flatBufferBuilder;
/// AnyLayers required by the SerializedGraph.
- std::vector<flatbuffers::Offset<armnn::armnnSerializer::AnyLayer>> m_serializedLayers;
+ std::vector<flatbuffers::Offset<armnnSerializer::AnyLayer>> m_serializedLayers;
/// Guids of all Input Layers required by the SerializedGraph.
std::vector<unsigned int> m_inputIds;
{
using namespace armnn;
-namespace serializer = armnn::armnnSerializer;
-serializer::ConstTensorData GetFlatBufferConstTensorData(DataType dataType)
+armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType)
{
switch (dataType)
{
- case DataType::Float32:
- case DataType::Signed32:
- return serializer::ConstTensorData::ConstTensorData_IntData;
- case DataType::Float16:
- return serializer::ConstTensorData::ConstTensorData_ShortData;
- case DataType::QuantisedAsymm8:
- case DataType::Boolean:
- return serializer::ConstTensorData::ConstTensorData_ByteData;
+ case armnn::DataType::Float32:
+ case armnn::DataType::Signed32:
+ return armnnSerializer::ConstTensorData::ConstTensorData_IntData;
+ case armnn::DataType::Float16:
+ return armnnSerializer::ConstTensorData::ConstTensorData_ShortData;
+ case armnn::DataType::QuantisedAsymm8:
+ case armnn::DataType::Boolean:
+ return armnnSerializer::ConstTensorData::ConstTensorData_ByteData;
default:
- return serializer::ConstTensorData::ConstTensorData_NONE;
+ return armnnSerializer::ConstTensorData::ConstTensorData_NONE;
}
}
-serializer::DataType GetFlatBufferDataType(DataType dataType)
+armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
{
switch (dataType)
{
- case DataType::Float32:
- return serializer::DataType::DataType_Float32;
- case DataType::Float16:
- return serializer::DataType::DataType_Float16;
- case DataType::Signed32:
- return serializer::DataType::DataType_Signed32;
- case DataType::QuantisedAsymm8:
- return serializer::DataType::DataType_QuantisedAsymm8;
- case DataType::Boolean:
- return serializer::DataType::DataType_Boolean;
+ case armnn::DataType::Float32:
+ return armnnSerializer::DataType::DataType_Float32;
+ case armnn::DataType::Float16:
+ return armnnSerializer::DataType::DataType_Float16;
+ case armnn::DataType::Signed32:
+ return armnnSerializer::DataType::DataType_Signed32;
+ case armnn::DataType::QuantisedAsymm8:
+ return armnnSerializer::DataType::DataType_QuantisedAsymm8;
+ case armnn::DataType::Boolean:
+ return armnnSerializer::DataType::DataType_Boolean;
default:
- return serializer::DataType::DataType_Float16;
+ return armnnSerializer::DataType::DataType_Float16;
}
}
-serializer::DataLayout GetFlatBufferDataLayout(DataLayout dataLayout)
+armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout)
{
switch (dataLayout)
{
- case DataLayout::NHWC:
- return serializer::DataLayout::DataLayout_NHWC;
- case DataLayout::NCHW:
+ case armnn::DataLayout::NHWC:
+ return armnnSerializer::DataLayout::DataLayout_NHWC;
+ case armnn::DataLayout::NCHW:
default:
- return serializer::DataLayout::DataLayout_NCHW;
+ return armnnSerializer::DataLayout::DataLayout_NCHW;
}
}
-serializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(PoolingAlgorithm poolingAlgorithm)
+armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm)
{
switch (poolingAlgorithm)
{
- case PoolingAlgorithm::Average:
- return serializer::PoolingAlgorithm::PoolingAlgorithm_Average;
- case PoolingAlgorithm::L2:
- return serializer::PoolingAlgorithm::PoolingAlgorithm_L2;
- case PoolingAlgorithm::Max:
+ case armnn::PoolingAlgorithm::Average:
+ return armnnSerializer::PoolingAlgorithm::PoolingAlgorithm_Average;
+ case armnn::PoolingAlgorithm::L2:
+ return armnnSerializer::PoolingAlgorithm::PoolingAlgorithm_L2;
+ case armnn::PoolingAlgorithm::Max:
default:
- return serializer::PoolingAlgorithm::PoolingAlgorithm_Max;
+ return armnnSerializer::PoolingAlgorithm::PoolingAlgorithm_Max;
}
}
-serializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(OutputShapeRounding outputShapeRounding)
+armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(armnn::OutputShapeRounding outputShapeRounding)
{
switch (outputShapeRounding)
{
- case OutputShapeRounding::Ceiling:
- return serializer::OutputShapeRounding::OutputShapeRounding_Ceiling;
- case OutputShapeRounding::Floor:
+ case armnn::OutputShapeRounding::Ceiling:
+ return armnnSerializer::OutputShapeRounding::OutputShapeRounding_Ceiling;
+ case armnn::OutputShapeRounding::Floor:
default:
- return serializer::OutputShapeRounding::OutputShapeRounding_Floor;
+ return armnnSerializer::OutputShapeRounding::OutputShapeRounding_Floor;
}
}
-serializer::PaddingMethod GetFlatBufferPaddingMethod(PaddingMethod paddingMethod)
+armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod)
{
switch (paddingMethod)
{
- case PaddingMethod::IgnoreValue:
- return serializer::PaddingMethod::PaddingMethod_IgnoreValue;
- case PaddingMethod::Exclude:
+ case armnn::PaddingMethod::IgnoreValue:
+ return armnnSerializer::PaddingMethod::PaddingMethod_IgnoreValue;
+ case armnn::PaddingMethod::Exclude:
default:
- return serializer::PaddingMethod::PaddingMethod_Exclude;
+ return armnnSerializer::PaddingMethod::PaddingMethod_Exclude;
}
}
namespace armnnSerializer
{
-armnn::armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType);
+armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType);
-armnn::armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType);
+armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType);
-armnn::armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout);
+armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout);
-armnn::armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm);
+armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm);
-armnn::armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(
+armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(
armnn::OutputShapeRounding outputShapeRounding);
-armnn::armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod);
+armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod);
} // namespace armnnSerializer
\ No newline at end of file
#include "../Serializer.hpp"
-#include <armnnDeserializeParser/IDeserializeParser.hpp>
+#include <armnnDeserializer/IDeserializer.hpp>
#include <random>
#include <sstream>
#include <boost/test/unit_test.hpp>
#include <flatbuffers/idl.h>
-using armnnDeserializeParser::IDeserializeParser;
+using armnnDeserializer::IDeserializer;
namespace
{
armnn::INetworkPtr DeserializeNetwork(const std::string& serializerString)
{
std::vector<std::uint8_t> const serializerVector{serializerString.begin(), serializerString.end()};
- return armnnDeserializeParser::IDeserializeParser::Create()->CreateNetworkFromBinary(serializerVector);
+ return IDeserializer::Create()->CreateNetworkFromBinary(serializerVector);
}
std::string SerializeNetwork(const armnn::INetwork& network)
#include <armnn/TypesUtils.hpp>
#if defined(ARMNN_SERIALIZER)
-#include "armnnDeserializeParser/IDeserializeParser.hpp"
+#include "armnnDeserializer/IDeserializer.hpp"
#endif
#if defined(ARMNN_CAFFE_PARSER)
#include "armnnCaffeParser/ICaffeParser.hpp"
if (modelFormat.find("armnn") != std::string::npos)
{
#if defined(ARMNN_SERIALIZER)
- return MainImpl<armnnDeserializeParser::IDeserializeParser, float>(
+ return MainImpl<armnnDeserializer::IDeserializer, float>(
modelPath.c_str(), isModelBinary, computeDevice,
inputNamesVector, inputTensorShapes,
inputTensorDataFilePathsVector, inputTypesVector,
#include <armnn/ArmNN.hpp>
#if defined(ARMNN_SERIALIZER)
-#include "armnnDeserializeParser/IDeserializeParser.hpp"
+#include "armnnDeserializer/IDeserializer.hpp"
#endif
#if defined(ARMNN_TF_LITE_PARSER)
#include <armnnTfLiteParser/ITfLiteParser.hpp>
#if defined(ARMNN_SERIALIZER)
template <>
-struct CreateNetworkImpl<armnnDeserializeParser::IDeserializeParser>
+struct CreateNetworkImpl<armnnDeserializer::IDeserializer>
{
public:
- using IParser = armnnDeserializeParser::IDeserializeParser;
+ using IParser = armnnDeserializer::IDeserializer;
using Params = InferenceModelInternal::Params;
using BindingPointInfo = InferenceModelInternal::BindingPointInfo;