2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
5 #include "TfParser.hpp"
7 #include <armnn/INetwork.hpp>
8 #include <armnn/Utils.hpp>
9 #include <armnn/TypesUtils.hpp>
10 #include <armnn/Exceptions.hpp>
11 #include <armnn/Descriptors.hpp>
13 #include <GraphTopologicalSort.hpp>
14 #include <Permute.hpp>
15 #include <VerificationHelpers.hpp>
17 #include <google/protobuf/io/zero_copy_stream_impl.h>
18 #include <google/protobuf/text_format.h>
20 #include "tensorflow/core/framework/graph.pb.h"
21 #include "tensorflow/core/framework/node_def.pb.h"
22 #include "tensorflow/core/framework/types.pb.h"
23 #include "tensorflow/core/framework/tensor.pb.h"
24 #include "tensorflow/core/framework/tensor_shape.pb.h"
26 #include <boost/assert.hpp>
27 #include <boost/format.hpp>
28 #include <boost/core/ignore_unused.hpp>
29 #include <boost/log/trivial.hpp>
30 #include <boost/numeric/conversion/cast.hpp>
31 #include <boost/polymorphic_cast.hpp>
38 using namespace armnn;
40 namespace armnnTfParser
45 const PermutationVector NHWCToArmNN = { 0, 2, 3, 1 };
46 const PermutationVector ArmNNToNHWC = { 0, 3, 1, 2 };
48 IConnectableLayer* AddSwizzleLayer(INetwork& network, IOutputSlot& input, const PermutationVector& mapping,
49 const std::string& name)
51 // Adds swizzle layer.
52 IConnectableLayer* const layer = network.AddPermuteLayer(mapping, name.c_str());
54 // Connects intput to swizzle layer.
55 input.Connect(layer->GetInputSlot(0));
57 // Sets up swizzled output.
58 const TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mapping);
59 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
64 IConnectableLayer* SwizzleInDeswizzleOut(INetwork& network, IOutputSlot& input, IConnectableLayer& layer,
65 const std::string& name)
67 // Adds swizzle layer.
68 IConnectableLayer* const swizzleLayer = AddSwizzleLayer(network, input, NHWCToArmNN, "swizzle_for-" + name);
70 // Connects swizzledInput to layer.
71 swizzleLayer->GetOutputSlot(0).Connect(layer.GetInputSlot(0));
73 // Adds deswizzle layer.
74 IConnectableLayer* const deswizzleLayer = AddSwizzleLayer(network, layer.GetOutputSlot(0), ArmNNToNHWC,
75 "deswizzle_for-" + name);
77 return deswizzleLayer;
80 template <typename Callable>
81 void ReadMandatoryNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
82 const std::string& attribName,
83 tensorflow::AttrValue::ValueCase expectedValueCase,
86 auto iter = nodeDef.attr().find(attribName);
87 if (iter != nodeDef.attr().end())
89 const auto& attrValue = iter->second;
90 if (attrValue.value_case() == expectedValueCase)
99 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
100 "but found %4% instead %5%")
103 % static_cast<int>(expectedValueCase)
104 % static_cast<int>(attrValue.value_case())
105 % CHECK_LOCATION().AsString()));
110 throw ParseException(
113 "Could not find required attribute %1% in node %2% %3%")
116 % CHECK_LOCATION().AsString()));
120 template <typename Callable>
121 void ReadOptionalNodeAttributeImpl(const tensorflow::NodeDef& nodeDef,
122 const std::string& attribName,
123 tensorflow::AttrValue::ValueCase expectedValueCase,
126 auto iter = nodeDef.attr().find(attribName);
127 if (iter != nodeDef.attr().end())
129 const auto& attrValue = iter->second;
130 if (attrValue.value_case() == expectedValueCase)
136 throw ParseException(
139 "Attribute %1% of node %2% expected to have %3% as tensorflow::AttrValue::ValueCase, "
140 "but found %4% instead %5%")
143 % static_cast<int>(expectedValueCase)
144 % static_cast<int>(attrValue.value_case())
145 % CHECK_LOCATION().AsString()));
150 float ReadMandatoryNodeFloatAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
152 float attribValue = 0.0f;
153 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kF,
154 [&attribValue](const tensorflow::AttrValue& attrValue)
156 attribValue = attrValue.f();
161 uint32_t ReadMandatoryNodeUint32Attribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
163 uint32_t attribValue = 0u;
164 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kI,
165 [&attribValue](const tensorflow::AttrValue& attrValue)
167 attribValue = static_cast<uint32_t>(attrValue.i());
172 std::string ReadMandatoryNodeStringAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
174 std::string attribValue = "";
175 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kS,
176 [&attribValue](const tensorflow::AttrValue& attrValue)
178 attribValue = attrValue.s();
183 std::vector<uint32_t> ReadMandatoryNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
184 const std::string& name)
186 std::vector<uint32_t> attriList;
187 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
188 [&attriList](const tensorflow::AttrValue& attrValue)
190 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
192 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
199 std::vector<uint32_t> ReadOptionalNodeUint32ListAttribute(const tensorflow::NodeDef& nodeDef,
200 const std::string& name)
202 std::vector<uint32_t> attriList;
203 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kList,
204 [&attriList](const tensorflow::AttrValue& attrValue)
206 for (int attriNum = 0; attriNum < attrValue.list().i_size(); ++attriNum)
208 attriList.push_back(static_cast<uint32_t>(attrValue.list().i(attriNum)));
215 bool ReadOptionalNodeBoolAttribute(const tensorflow::NodeDef& nodeDef,
216 const std::string& name,
217 bool defaultValue = false)
219 bool attribValue = defaultValue;
220 ReadOptionalNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kB,
221 [&attribValue](const tensorflow::AttrValue& attrValue)
223 attribValue = attrValue.b();
228 tensorflow::DataType ReadMandatoryNodeTypeAttribute(const tensorflow::NodeDef& nodeDef, const std::string& name)
230 tensorflow::DataType attribValue = tensorflow::DT_INVALID;
231 ReadMandatoryNodeAttributeImpl(nodeDef, name, tensorflow::AttrValue::kType,
232 [&attribValue](const tensorflow::AttrValue& attrValue)
234 attribValue = attrValue.type();
239 TensorInfo PrepareReshape(const TensorInfo& input, const std::vector<int32_t>& targetDims)
241 std::vector<unsigned int> outDims(targetDims.begin(), targetDims.end());
242 const auto stretchDim = std::find(targetDims.begin(), targetDims.end(), -1);
244 if (stretchDim != targetDims.end())
246 if (std::find(std::next(stretchDim), targetDims.end(), -1) != targetDims.end())
248 throw ParseException(
251 "At most one component of shape can be -1 %1%")
252 % CHECK_LOCATION().AsString()));
255 auto targetNumElements =
256 boost::numeric_cast<unsigned int>(
257 std::accumulate(targetDims.begin(), targetDims.end(), -1, std::multiplies<int32_t>()));
258 auto stretchIndex = static_cast<size_t>(std::distance(targetDims.begin(), stretchDim));
259 outDims[stretchIndex] = input.GetNumElements() / targetNumElements;
262 TensorInfo reshapeInfo = input;
263 reshapeInfo.SetShape(TensorShape{ static_cast<unsigned int>(outDims.size()), outDims.data() });
268 // We need the input0Slot to guide the reshape for input1Slot.
269 IOutputSlot* BroadcastForAddandMul(IOutputSlot* input0Slot, IOutputSlot* input1Slot, bool isNHWC, INetwork& m_Network,
270 const tensorflow::NodeDef& nodeDef)
272 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
273 const TensorInfo inputTensorInfo = input0Slot->GetTensorInfo();
274 const unsigned int matchDim = inputTensorInfo.GetNumDimensions() - (isNHWC ? 1 : 3);
275 std::array<unsigned int, MaxNumOfTensorDimensions> reshapedDimensions;
276 std::fill_n(reshapedDimensions.begin(), inputTensorInfo.GetNumDimensions(), 1);
277 reshapedDimensions[matchDim] = input1Info.GetShape()[0];
279 armnn::TensorInfo reshapedInfo = input1Info;
280 reshapedInfo.SetShape(TensorShape{ inputTensorInfo.GetNumDimensions(), reshapedDimensions.data() });
282 const std::string reshapeLayerName = "reshape_for-" + nodeDef.name();
283 ReshapeDescriptor reshapeDesc;
284 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
285 IConnectableLayer* const reshapeLayer = m_Network.AddReshapeLayer(reshapeDesc, reshapeLayerName.c_str());
287 input1Slot->Connect(reshapeLayer->GetInputSlot(0));
288 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
290 input1Slot = &reshapeLayer->GetOutputSlot(0);
295 OutputId ParseOutputId(const std::string & name)
297 unsigned int outputNum = 0;
298 size_t colonPos = name.find_last_of(":");
299 if (colonPos != std::string::npos)
301 int n = std::stoi(name.substr(colonPos+1));
304 throw ParseException(
307 "Output tensor id is out of range for %1% %2%")
309 % CHECK_LOCATION().AsString()));
311 outputNum = static_cast<unsigned int>(n);
313 return OutputId(name.substr(0,colonPos),outputNum);
316 #define CHECK_DATA_FORMAT(NODE_DEF, FORMAT, NODE_TYPE) \
317 if( FORMAT != "NHWC" && FORMAT != "NCHW" ) \
319 throw ParseException( \
322 "Unsupported data format %1% passed for %2% node %3%. " \
323 "Only NHWC and NCHW supported %4%") \
327 % CHECK_LOCATION().AsString())); \
330 #define CHECK_PADDING_TYPE(NODE_DEF, PADDING) \
331 if(PADDING != "SAME" && PADDING != "VALID" ) \
333 throw ParseException( \
336 "Only 'SAME' and 'VALID' padding supported. Got %1% for %2% %3%") \
339 % CHECK_LOCATION().AsString())); \
344 const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
345 { "Const", &TfParser::ParseConst },
346 { "Add", &TfParser::ParseAdd },
347 { "BiasAdd", &TfParser::ParseBiasAdd },
348 { "Identity", &TfParser::ParseIdentity },
349 { "Conv2D", &TfParser::ParseConv2D },
350 { "DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
351 { "FusedBatchNorm", &TfParser::ParseFusedBatchNorm },
352 { "ConcatV2", &TfParser::ParseConcat },
353 { "LRN", &TfParser::ParseLrn },
354 { "MatMul", &TfParser::ParseMatMul },
355 { "Mul", &TfParser::ParseMul },
356 { "Placeholder", &TfParser::ParsePlaceholder },
357 { "Relu", &TfParser::ParseRelu },
358 { "Relu6", &TfParser::ParseRelu6 },
359 { "Reshape", &TfParser::ParseReshape },
360 { "ResizeBilinear", &TfParser::ParseResizeBilinear },
361 { "Shape", &TfParser::ParseShape },
362 { "Squeeze", &TfParser::ParseSqueeze },
363 { "Sigmoid", &TfParser::ParseSigmoid },
364 { "Softmax", &TfParser::ParseSoftmax },
365 { "Softplus", &TfParser::ParseSoftplus },
366 { "Tanh", &TfParser::ParseTanh },
367 { "MaxPool", &TfParser::ParseMaxPool },
368 { "AvgPool", &TfParser::ParseAvgPool },
369 { "Maximum", &TfParser::ParseMaximum },
372 ITfParser* ITfParser::CreateRaw()
374 return new TfParser();
377 ITfParserPtr ITfParser::Create()
379 return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
382 void ITfParser::Destroy(ITfParser* parser)
387 inline void CalculateSamePadding(uint32_t inputSize, uint32_t stride,
388 uint32_t filterSize, bool samePadding,
389 uint32_t* paddingFront, uint32_t* paddingBack) {
394 uint32_t outputSize = (inputSize + stride - 1) / stride;
395 uint32_t temp = (outputSize - 1) * stride + filterSize;
396 if (temp > inputSize) {
397 *paddingFront = (temp - inputSize) / 2;
398 *paddingBack = (temp - inputSize) - *paddingFront;
403 void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
406 CalculateSamePadding(input, stride, kernel, samePadding, &outPadHead, &outPadTail);
409 /// An Abstract base class which represents a single tensorflow operation (node)
410 /// that has been (potentially partially) converted to Armnn.
411 /// It may not yet have been fully converted into actual Armnn layers.
412 class ParsedTfOperation
415 ParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
421 virtual ~ParsedTfOperation() {};
423 const tensorflow::NodeDef& GetNode() const { return m_Node; }
425 /// Gets the ArmNN IOutputSlot corresponding to the given output index of the Tensorflow operation.
426 /// This may result in the creation of Armnn layers if this was deferred (e.g. see ParsedConstTfOperation).
427 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) = 0;
429 /// If this operation is an Identity then this will follow return the 'parent' operation (recursively).
430 virtual ParsedTfOperation* ResolveIdentityOperations()
437 const tensorflow::NodeDef& m_Node;
440 /// An ParsedTfOperation where the Armnn equivalent is a single layer,
441 /// with output slots that correspond directly to the Tf node outputs.
442 class SingleLayerParsedTfOperation : public ParsedTfOperation
445 SingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node, IConnectableLayer* layer)
446 : ParsedTfOperation(parser, node)
451 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
453 BOOST_ASSERT(m_Layer);
454 // Assumes one-to-one mapping between Tf and armnn output slots.
455 unsigned int armnnOutputSlotIdx = tfOutputIndex;
456 if (armnnOutputSlotIdx >= m_Layer->GetNumOutputSlots())
458 throw ParseException(
461 "The requested output slot #%1% "
462 "for %2% does not exist %3%")
465 % CHECK_LOCATION().AsString()));
467 return m_Layer->GetOutputSlot(armnnOutputSlotIdx);
471 IConnectableLayer* m_Layer;
474 /// A SingleLayerParsedTfOperation for deferred layer creation.
475 class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
478 DeferredSingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
479 : SingleLayerParsedTfOperation(parser, node, nullptr)
483 IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
487 CreateLayerDeferred();
489 return SingleLayerParsedTfOperation::ResolveArmnnOutputSlot(tfOutputIndex);
493 virtual void CreateLayerDeferred() = 0;
498 : m_Network(nullptr, nullptr)
503 const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
505 if (nodeDef->op() != "Identity")
510 if (nodeDef->input_size() != 1)
512 throw ParseException(
515 "Identity node should have a single input! %1% has %2% inputs %3%")
517 % nodeDef->input_size()
518 % CHECK_LOCATION().AsString()));
521 auto it = m_NodesByName.find(nodeDef->input(0));
522 if (it != m_NodesByName.end())
524 const tensorflow::NodeDef* inputNode = it->second;
525 return ResolveIdentityNode(inputNode);
529 throw ParseException(
532 "Cannot find what the Identity node %1% is linked to! %2%")
534 % CHECK_LOCATION().AsString()));
538 std::vector<OutputOfConstNodeDef>
539 TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
541 std::vector<OutputOfConstNodeDef> ret;
543 if (nodeDef.op() == "Const")
545 // For some reason const node can have "Control Inputs". We ignore them for now.
549 ret.reserve(boost::numeric_cast<size_t>(nodeDef.input_size()));
550 for (int j = 0; j < nodeDef.input_size(); ++j)
552 OutputId outputId = ParseOutputId(nodeDef.input(j));
554 if (nodeDef.input(j)[0] == '^') // I couldn't find a better test for control inputs.
556 throw ParseException(
559 "Node '%1%' has Control Input '%2%' for input #%3% which is unsupported. %4%")
563 % CHECK_LOCATION().AsString()));
566 auto inputIt = m_NodesByName.find(outputId.m_IndexedValue);
567 if (inputIt == m_NodesByName.end())
569 throw ParseException(
572 "Can't find node '%1%', which is listed as an input of '%2%' %3%")
575 % CHECK_LOCATION().AsString()));
577 ret.push_back(OutputOfConstNodeDef(inputIt->second,outputId.m_Index));
583 std::vector<OutputOfParsedTfOperation>
584 TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
585 std::size_t expectedNumInputs)
587 // Fetches the tensorflow nodes connected as inputs and validate the size.
588 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
589 const std::size_t numInputs = nodes.size();
590 if (numInputs != expectedNumInputs)
592 throw ParseException(
595 "Unexpected number of inputs for node %1%. Expected %2%, found %3% %4%")
599 % CHECK_LOCATION().AsString()));
601 // Fetches the corresponding ParsedTfOperation operations
602 std::vector<OutputOfParsedTfOperation> result;
603 for (auto&& node : nodes)
605 auto it = m_ParsedTfOperations.find(node.m_IndexedValue->name());
606 if (it == m_ParsedTfOperations.end())
608 throw ParseException(
611 "Node with name '%1%' has not been parsed %2%")
612 % node.m_IndexedValue->name()
613 % CHECK_LOCATION().AsString()));
615 ParsedTfOperation* parsedOp = it->second.get();
616 // Transparently 'skip' any Identity operations. This simplifies the logic inside the ParseXXX() functions.
617 parsedOp = parsedOp->ResolveIdentityOperations();
618 result.push_back(OutputOfParsedTfOperation(parsedOp,node.m_Index));
623 ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
625 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
627 // If one of the inputs is a MatMul and the other is a const, then we handle both nodes
628 // together as FullyConnected.
629 if (inputs[0].m_IndexedValue->GetNode().op() == "MatMul" &&
630 HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
632 IConnectableLayer* layer =
633 AddFullyConnectedLayer(inputs[0].m_IndexedValue->GetNode(),
634 &nodeDef,nodeDef.name().c_str());
635 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
637 else if (HasParsedConstTensor<float>(inputs[0].m_IndexedValue->GetNode().name()) &&
638 inputs[1].m_IndexedValue->GetNode().op() == "MatMul")
640 IConnectableLayer* layer =
641 AddFullyConnectedLayer(inputs[1].m_IndexedValue->GetNode(),
642 &nodeDef,nodeDef.name().c_str());
643 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
647 // Otherwise it's just a regular addition.
648 return AddAdditionLayer(nodeDef);
652 ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
654 return AddAdditionLayer(nodeDef, true);
657 /// An ParsedTfOperation which forwards to another (used for Identity nodes).
658 class ParsedIdentityTfOperation : public ParsedTfOperation
661 ParsedIdentityTfOperation(TfParser* parser, const tensorflow::NodeDef& node, ParsedTfOperation* representative)
662 : ParsedTfOperation(parser, node)
663 , m_Representative(representative)
667 virtual IOutputSlot& ResolveArmnnOutputSlot(unsigned int tfOutputIndex) override
669 BOOST_ASSERT(m_Representative);
670 return m_Representative->ResolveArmnnOutputSlot(tfOutputIndex);
673 virtual ParsedTfOperation* ResolveIdentityOperations() override
675 return m_Representative->ResolveIdentityOperations();
679 ParsedTfOperation* m_Representative;
682 ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
684 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
685 // Any requests for the output slots of this node should be forwarded to the node connected as input.
686 return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
689 /// An ParsedTfOperation for a Const node.
690 /// Creation of the armnn ConstLayer is deferred until it is actually needed, because Const nodes are mostly used
691 /// for weight inputs to MatMul/Conv2D nodes and in these cases armnn doesn't need a ConstLayer.
692 template <typename T>
693 class ParsedConstTfOperation : public DeferredSingleLayerParsedTfOperation
696 ParsedConstTfOperation(TfParser* parser, const tensorflow::NodeDef& node,
697 const T* tensorData, const TensorInfo& tensorInfo)
698 : DeferredSingleLayerParsedTfOperation(parser, node),
699 m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
700 m_TensorInfo(tensorInfo)
702 BOOST_ASSERT(tensorInfo.GetDataType() == GetDataType<T>());
705 void CreateLayerDeferred() override
707 BOOST_ASSERT(m_Layer == nullptr);
708 m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
709 m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
712 ConstTensor GetConstTensor(bool swizzleForConvolutionWeights, std::vector<T>& outputTensorData) const
714 // Mappings from TensorFlow filter tensors to the ArmNN filter tensors.
715 // Tensorflow weights are [H, W, In, Out].
716 // ArmNN weights are [Out, In, H, W].
717 static const PermutationVector HWIOToOIHW = {2, 3, 1, 0};
719 const TensorInfo outInfo = swizzleForConvolutionWeights
720 ? armnnUtils::Permuted(m_TensorInfo, HWIOToOIHW)
723 outputTensorData.resize(m_TensorInfo.GetNumElements());
725 // Copies or swizzles from the permanent storage into the storage the caller provided.
726 if (swizzleForConvolutionWeights)
728 armnnUtils::Permute(outInfo.GetShape(), HWIOToOIHW, m_Storage.data(), outputTensorData.data());
732 memcpy(outputTensorData.data(), m_Storage.data(), m_TensorInfo.GetNumBytes());
734 // Updates the result to point to the user provided storage.
735 ConstTensor constTensor(outInfo, outputTensorData);
740 ///< Manages the lifetime of the tensor data.
741 std::vector<T> m_Storage;
742 ///< Describes the layout of the tensor and points to the data in m_Storage.
743 TensorInfo m_TensorInfo;
746 DataType ConvertTfTensorDataType(const tensorflow::DataType tfDataType,
747 const tensorflow::NodeDef& nodeDef)
751 case tensorflow::DT_FLOAT:
752 return DataType::Float32;
754 case tensorflow::DT_INT32:
755 return DataType::Signed32;
758 throw ParseException(
761 "Unknown DataType %1% for node %2% %3%")
762 % tensorflow::DataType_Name(tfDataType)
764 % CHECK_LOCATION().AsString()));
768 struct ParseTfTensorValueList
770 template<typename DataType>
772 const tensorflow::TensorProto& tfTensor,
773 unsigned int dstElements,
774 std::vector<int8_t>& outputData);
776 template <typename DataType>
777 static void ReadData(const void* srcData, unsigned int numSrcElements,
778 std::vector<int8_t>& dstData, unsigned int numDstElements)
780 // If there are no entries in the list, perform no action.
781 if (numSrcElements == 0)
786 // If no size was provided, use the length of the value list.
787 if (numDstElements == 0)
789 numDstElements = numSrcElements;
793 dstData.resize(std::max(numSrcElements, numDstElements) * sizeof(DataType));
795 const DataType* srcTensor = reinterpret_cast<const DataType*>(srcData);
796 DataType* dstTensor = reinterpret_cast<DataType*>(dstData.data());
798 // Copies the value list entries into the destination.
799 std::copy(srcTensor, srcTensor + numSrcElements, dstTensor);
801 if (numDstElements > numSrcElements)
803 // Uses the last element in the list to fill the remaining entries.
804 std::fill(dstTensor + numSrcElements, dstTensor + numDstElements, srcTensor[numSrcElements - 1]);
811 void ParseTfTensorValueList::Parse<float>(const tensorflow::TensorProto& tfTensor,
812 unsigned int dstElements, std::vector<int8_t>& outputData)
814 ReadData<float>(tfTensor.float_val().data(), static_cast<unsigned int>(tfTensor.float_val_size()),
815 outputData, dstElements);
819 void ParseTfTensorValueList::Parse<int32_t>(const tensorflow::TensorProto& tfTensor,
820 unsigned int dstElements, std::vector<int8_t>& outputData)
822 ReadData<int32_t>(tfTensor.int_val().data(), static_cast<unsigned int>(tfTensor.int_val_size()),
823 outputData, dstElements);
826 template <template<typename> class OperatorType, typename T = int8_t>
827 struct MakeTfOperation
829 template<typename DataType, class... Args>
830 inline static std::unique_ptr<OperatorType<DataType>> Parse(TfParser* parser, const tensorflow::NodeDef& node,
833 return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
838 struct MakeTfOperation<ParsedConstTfOperation>
840 template<typename DataType, class... Args>
841 inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(TfParser* parser,
842 const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
844 return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
845 reinterpret_cast<const DataType*>(tensorData.data()), tensorInfo);
849 template <class FuncType>
850 struct InvokeParseFunction
852 template<class ResType, class... Args>
853 inline static ResType Result(DataType dataType, Args&&... args)
855 if (dataType == DataType::Float32)
857 return FuncType::template Parse<float>(std::forward<Args>(args)...);
859 else if (dataType == DataType::Signed32)
861 return FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
867 template<class... Args>
868 inline static void Result(DataType dataType, Args&&... args)
870 if (dataType == DataType::Float32)
872 FuncType::template Parse<float>(std::forward<Args>(args)...);
874 else if (dataType == DataType::Signed32)
876 FuncType::template Parse<int32_t>(std::forward<Args>(args)...);
881 ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
883 BOOST_ASSERT(nodeDef.op() == "Const");
885 if (nodeDef.attr().count("value") == 0)
887 throw ParseException(
890 "Value not found for Const node - %1% %2%")
892 % CHECK_LOCATION().AsString()));
895 const tensorflow::TensorProto& tfTensor = nodeDef.attr().at("value").tensor();
896 const tensorflow::TensorShapeProto& tfTensorShape = tfTensor.tensor_shape();
897 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "dtype");
899 const auto GetDimensionSize = [](auto& d) { return d.size(); };
901 std::vector<unsigned int> dimensionSizes;
902 std::transform(tfTensorShape.dim().begin(), tfTensorShape.dim().end(),
903 std::back_inserter(dimensionSizes), GetDimensionSize);
905 // Calculates number of elements.
906 const DataType dataType = ConvertTfTensorDataType(tfDataType, nodeDef);
907 unsigned int numElements = 0U;
909 if (!dimensionSizes.empty())
911 numElements = std::accumulate(dimensionSizes.begin(), dimensionSizes.end(),
912 1U, std::multiplies<unsigned int>());
915 std::vector<int8_t> tensorData;
917 // Get tensor data from the list of values attribute.
918 if (tfTensor.tensor_content().empty())
920 InvokeParseFunction<ParseTfTensorValueList>::Result<void>(dataType, tfTensor, numElements, tensorData);
922 // If the tensor shape is not defined, but there is a value list, then interpret the data as a 1D
923 // tensor of the provided number of elements.
924 if (numElements == 0)
926 const unsigned int tfNumElements =
927 static_cast<unsigned int>(tensorData.size()) / GetDataTypeSize(dataType);
928 dimensionSizes.push_back(tfNumElements);
931 // Gets tensor data from tensor content attribute.
934 tensorData.assign(tfTensor.tensor_content().begin(), tfTensor.tensor_content().end());
936 // Checks if a tensor shape is defined for the tensor content.
937 if (numElements == 0)
939 throw ParseException(
942 "No tensor shape found for Const node - %1% %2%")
944 % CHECK_LOCATION().AsString()));
948 // Const node requires at least a list of values or a content attribute.
949 if (tensorData.empty())
951 throw ParseException(
954 "No tensor data found for Const node - %1% %2%")
956 % CHECK_LOCATION().AsString()));
959 const TensorInfo tensorInfo(static_cast<unsigned int>(dimensionSizes.size()),
960 dimensionSizes.data(),
963 // If we have a list of values, then the length of the list must be
964 // less than or equal to the number of elements implied by the shape argument.
965 if (tensorData.size() > tensorInfo.GetNumBytes())
967 throw ParseException(
970 "Number of elements (%1%) should be less than or equal "
971 "to the number of elements implied by the shape argument (%2%) for Const node - %3% %4%")
972 % (tensorData.size() / GetDataTypeSize(dataType))
973 % tensorInfo.GetNumElements()
975 % CHECK_LOCATION().AsString()));
978 return InvokeParseFunction<MakeTfOperation<ParsedConstTfOperation>>::Result<ParsedTfOperationPtr>(
979 dataType, this, nodeDef, tensorData, tensorInfo);
982 template<typename Type>
983 bool TfParser::HasParsedConstTensor(const std::string & nodeName) const
985 auto it = m_ParsedTfOperations.find(nodeName);
986 if (it == m_ParsedTfOperations.end() ||
987 dynamic_cast<ParsedConstTfOperation<Type>*>(it->second.get()) == nullptr)
997 ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
998 const tensorflow::GraphDef& graphDef)
1000 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1001 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1002 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1004 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1006 throw ParseException(
1009 "ArmNN only supports Convolution layers with constant weights for %1%, input %2% %3%")
1011 % inputs[1].m_IndexedValue->GetNode().name()
1012 % CHECK_LOCATION().AsString()));
1014 ParsedConstTfOperation<float>* weightNode =
1015 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1017 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1018 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1019 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1021 // Read the dilations, if present - only [1,1,1,1] (the default) is supported.
1022 std::vector<uint32_t> dilations = ReadOptionalNodeUint32ListAttribute(nodeDef, "dilations");
1023 if (!dilations.empty())
1025 for (auto dilation : dilations)
1029 throw ParseException(
1032 "ArmNN only supports Convolution layers with dilations [1,1,1,1] for %1% %2%")
1034 % CHECK_LOCATION().AsString()));
1039 Convolution2dDescriptor desc;
1040 desc.m_BiasEnabled = false;
1042 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Conv2D");
1044 if (dataFormat == "NHWC")
1046 desc.m_StrideX = strides[2];
1047 desc.m_StrideY = strides[1];
1048 // Swizzles input to supported memory layout.
1049 inputTensorInfo = armnnUtils::Permuted(inputSlot.GetTensorInfo(), NHWCToArmNN);
1051 else if (dataFormat == "NCHW")
1053 desc.m_StrideX = strides[3];
1054 desc.m_StrideY = strides[2];
1057 uint32_t inputHeight = inputTensorInfo.GetShape()[2];
1058 uint32_t inputWidth = inputTensorInfo.GetShape()[3];
1060 std::vector<float> outputTensorData;
1062 ConstTensor weightTensor = weightNode->GetConstTensor(true, outputTensorData);
1064 uint32_t weightHeight = weightTensor.GetShape()[2];
1065 uint32_t weightWidth = weightTensor.GetShape()[3];
1067 bool padding = false;
1068 TensorInfo outputInfo;
1070 CHECK_PADDING_TYPE(nodeDef, paddingString);
1072 if (paddingString == "SAME")
1075 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1076 weightTensor.GetShape()[0],
1077 static_cast<uint32_t>(ceil(
1078 static_cast<float>(inputHeight) /
1079 static_cast<float>(desc.m_StrideY))),
1080 static_cast<uint32_t>(ceil(
1081 static_cast<float>(inputWidth) /
1082 static_cast<float>(desc.m_StrideX)))
1083 }, DataType::Float32);
1085 else if (paddingString == "VALID")
1088 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1089 weightTensor.GetShape()[0],
1090 static_cast<uint32_t>(ceil(
1091 static_cast<float>(inputHeight - weightHeight + 1) /
1092 static_cast<float>(desc.m_StrideY))),
1093 static_cast<uint32_t>(ceil(
1094 static_cast<float>(inputWidth - weightWidth + 1) /
1095 static_cast<float>(desc.m_StrideX)))
1096 }, DataType::Float32);
1099 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1100 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1102 IConnectableLayer* layer = m_Network->AddConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
1103 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1105 if (dataFormat == "NHWC")
1107 layer = SwizzleInDeswizzleOut(*m_Network, inputSlot, *layer, nodeDef.name());
1111 inputSlot.Connect(layer->GetInputSlot(0));
1114 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1117 ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
1118 const tensorflow::GraphDef& graphDef)
1120 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1121 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1122 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1124 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1126 throw ParseException(
1129 "ArmNN only supports Depthwise Convolution layer with constant weights. "
1130 "Non const input found %1% for node %2% %3%")
1131 % inputs[1].m_IndexedValue->GetNode().name()
1133 % CHECK_LOCATION().AsString()));
1135 ParsedConstTfOperation<float>* weightNode =
1136 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1139 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
1140 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1141 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
1143 DepthwiseConvolution2dDescriptor desc;
1144 desc.m_BiasEnabled = false;
1146 CHECK_DATA_FORMAT(nodeDef, dataFormat, "DepthwiseConv2dNative");
1148 if (dataFormat == "NHWC")
1150 desc.m_StrideX = strides[2];
1151 desc.m_StrideY = strides[1];
1152 // Swizzles input to supported memory layout.
1153 inputTensorInfo = armnnUtils::Permuted(inputSlot.GetTensorInfo(), NHWCToArmNN);
1155 else if (dataFormat == "NCHW")
1157 desc.m_StrideX = strides[3];
1158 desc.m_StrideY = strides[2];
1161 uint32_t inputHeight = inputTensorInfo.GetShape()[2];
1162 uint32_t inputWidth = inputTensorInfo.GetShape()[3];
1164 std::vector<float> outputTensorData;
1166 ConstTensor weightTensor = weightNode->GetConstTensor(true, outputTensorData);
1168 uint32_t weightHeight = weightTensor.GetShape()[2];
1169 uint32_t weightWidth = weightTensor.GetShape()[3];
1171 bool padding = false;
1172 TensorInfo outputInfo;
1174 CHECK_PADDING_TYPE(nodeDef, paddingString);
1176 if (paddingString == "SAME")
1179 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1180 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1181 static_cast<uint32_t>(ceil(
1182 static_cast<float>(inputHeight) /
1183 static_cast<float>(desc.m_StrideY))),
1184 static_cast<uint32_t>(ceil(
1185 static_cast<float>(inputWidth) /
1186 static_cast<float>(desc.m_StrideX)))
1187 }, DataType::Float32);
1189 else if (paddingString == "VALID")
1192 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
1193 weightTensor.GetShape()[0] * weightTensor.GetShape()[1],
1194 static_cast<uint32_t>(ceil(
1195 static_cast<float>(inputHeight - weightHeight + 1) /
1196 static_cast<float>(desc.m_StrideY))),
1197 static_cast<uint32_t>(ceil(
1198 static_cast<float>(inputWidth - weightWidth + 1) /
1199 static_cast<float>(desc.m_StrideX)))
1200 }, DataType::Float32);
1203 CalcPadding(inputHeight, weightHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, padding);
1204 CalcPadding(inputWidth, weightWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, padding);
1206 IConnectableLayer* layer = m_Network->AddDepthwiseConvolution2dLayer(desc, weightTensor, nodeDef.name().c_str());
1207 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1209 if (dataFormat == "NHWC")
1211 layer = SwizzleInDeswizzleOut(*m_Network, inputSlot, *layer, nodeDef.name());
1215 inputSlot.Connect(layer->GetInputSlot(0));
1218 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1221 ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
1222 const tensorflow::GraphDef& graphDef)
1224 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
1226 if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
1228 throw ParseException(
1231 "ArmNN only supports FusedBatchNormalization layers with constant scale. "
1232 "Input %1%. Node %2% %3%")
1233 % inputs[1].m_IndexedValue->GetNode().name()
1235 % CHECK_LOCATION().AsString()));
1237 ParsedConstTfOperation<float>* scaleNode =
1238 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[1].m_IndexedValue);
1240 if (!HasParsedConstTensor<float>(inputs[2].m_IndexedValue->GetNode().name()))
1242 throw ParseException(
1245 "ArmNN only supports FusedBatchNormalization layers with constant offset. "
1246 "Input %1%. Node %2% %3%")
1247 % inputs[2].m_IndexedValue->GetNode().name()
1249 % CHECK_LOCATION().AsString()));
1251 ParsedConstTfOperation<float>* offsetNode =
1252 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[2].m_IndexedValue);
1254 if (!HasParsedConstTensor<float>(inputs[3].m_IndexedValue->GetNode().name()))
1256 throw ParseException(
1259 "ArmNN only supports FusedBatchNormalization layers with constant mean. "
1260 "Input %1%. Node %2% %3%")
1261 % inputs[3].m_IndexedValue->GetNode().name()
1263 % CHECK_LOCATION().AsString()));
1265 ParsedConstTfOperation<float>* meanNode =
1266 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[3].m_IndexedValue);
1268 if (!HasParsedConstTensor<float>(inputs[4].m_IndexedValue->GetNode().name()))
1270 throw ParseException(
1273 "ArmNN only supports FusedBatchNormalization layers with constant variance. "
1274 "Input %1%. Node %2% %3%")
1275 % inputs[4].m_IndexedValue->GetNode().name()
1277 % CHECK_LOCATION().AsString()));
1279 ParsedConstTfOperation<float>* varianceNode =
1280 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(inputs[4].m_IndexedValue);
1282 // The descriptor only has the epsilon attribute.
1283 BatchNormalizationDescriptor desc;
1284 desc.m_Eps = ReadMandatoryNodeFloatAttribute(nodeDef, "epsilon");
1286 // Data for the parsed tensor args (scale, offset, mean, variance) must be stored
1287 // locally until the layer is added.
1288 std::vector<float> scaleTensorData;
1289 ConstTensor scaleTensor = scaleNode->GetConstTensor(false, scaleTensorData);
1291 std::vector<float> offsetTensorData;
1292 ConstTensor offsetTensor = offsetNode->GetConstTensor(false, offsetTensorData);
1294 std::vector<float> meanTensorData;
1295 ConstTensor meanTensor = meanNode->GetConstTensor(false, meanTensorData);
1297 std::vector<float> varianceTensorData;
1298 ConstTensor varianceTensor = varianceNode->GetConstTensor(false, varianceTensorData);
1300 IConnectableLayer* layer = m_Network->AddBatchNormalizationLayer(desc,
1305 nodeDef.name().c_str());
1307 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1309 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
1311 if (dataFormat == "NHWC")
1313 const TensorInfo outputTensorInfo = armnnUtils::Permuted(inputSlot.GetTensorInfo(), NHWCToArmNN);
1314 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1315 layer = SwizzleInDeswizzleOut(*m_Network, inputSlot, *layer, nodeDef.name());
1319 layer->GetOutputSlot(0).SetTensorInfo(inputSlot.GetTensorInfo());
1320 inputSlot.Connect(layer->GetInputSlot(0));
1323 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1326 bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
1327 size_t alphaLayerIndex,
1328 const OutputOfParsedTfOperation& otherOp,
1329 armnn::IOutputSlot** outputOfLeakyRelu,
1330 armnn::ActivationDescriptor & desc)
1332 const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
1334 // Verifying all these assumptions hold:
1336 // 1, the mulNodeDef is an elementwise multiplication node "Mul"
1337 // 2, the alphaLayerIndex selects a constant node from the inputs of the "Mul" node
1338 // 3, the inputLayerIndex selects a layer which has the same name as otherNodeDef
1341 if (mulNodeDef.op() == "Mul")
1343 size_t otherLayerIndex = (alphaLayerIndex == 0 ? 1 : 0);
1344 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(mulNodeDef, 2);
1346 BOOST_ASSERT(inputs.size() == 2);
1347 BOOST_ASSERT((otherLayerIndex == 0 || alphaLayerIndex == 0));
1348 BOOST_ASSERT((otherLayerIndex == 1 || alphaLayerIndex == 1));
1349 BOOST_ASSERT(((otherLayerIndex + alphaLayerIndex) == 1));
1351 if (inputs[otherLayerIndex].m_IndexedValue->GetNode().name() == otherNodeDef.name())
1353 if (HasParsedConstTensor<float>(inputs[alphaLayerIndex].m_IndexedValue->GetNode().name()))
1355 ParsedConstTfOperation<float>* alpha =
1356 boost::polymorphic_downcast<ParsedConstTfOperation<float> *>(
1357 inputs[alphaLayerIndex].m_IndexedValue);
1359 std::vector<float> const_data;
1360 ConstTensor const_tensor = alpha->GetConstTensor(false, const_data);
1362 if (const_data.size() == 1)
1364 desc.m_Function = ActivationFunction::LeakyReLu;
1365 desc.m_A = const_data[0];
1367 *outputOfLeakyRelu = &(otherOp.m_IndexedValue->ResolveArmnnOutputSlot(otherOp.m_Index));
1376 // For max nodes, we only support those as part of a leaky relu, i.e.,
1377 // as part for a max(mul(a, x), x) expression. We thus need to
1378 // identify one input as a multiplication with a scalar constant,
1379 // extract the constant and the two inputs, verify that the two other
1380 // inputs are the same node, and then create a leaky relu node.
1382 ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
1383 const tensorflow::GraphDef& graphDef)
1385 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1386 auto inputNode0 = inputs[0].m_IndexedValue->GetNode();
1387 auto inputNode1 = inputs[1].m_IndexedValue->GetNode();
1388 IOutputSlot* outputOfLeakyRelu = nullptr;
1390 ActivationDescriptor desc;
1392 // There are four possible scenarios we need to support (respectively below):
1393 // 1, max(mul(a, x), x)
1394 // 2, max(mul(x, a), x)
1395 // 3, max(x, mul(a, x))
1396 // 4, max(x, mul(x, a))
1398 if (IsSupportedLeakyReluPattern(inputNode0, 0, inputs[1], &outputOfLeakyRelu, desc) ||
1399 IsSupportedLeakyReluPattern(inputNode0, 1, inputs[1], &outputOfLeakyRelu, desc) ||
1400 IsSupportedLeakyReluPattern(inputNode1, 0, inputs[0], &outputOfLeakyRelu, desc) ||
1401 IsSupportedLeakyReluPattern(inputNode1, 1, inputs[0], &outputOfLeakyRelu, desc))
1403 BOOST_ASSERT(outputOfLeakyRelu != nullptr);
1405 IConnectableLayer* const layer = m_Network->AddActivationLayer(desc, nodeDef.name().c_str());
1406 outputOfLeakyRelu->Connect(layer->GetInputSlot(0));
1407 layer->GetOutputSlot(0).SetTensorInfo(outputOfLeakyRelu->GetTensorInfo());
1408 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1412 throw ParseException(
1415 "ArmNN currenly offers limited support for Maximum node when it can be fused to "
1416 "form a LeakyRelu activation as leakyrelu=max(mul(alpha, X), X). "
1419 % CHECK_LOCATION().AsString()));
1423 ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
1424 const tensorflow::GraphDef& graphDef)
1426 std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
1427 // In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
1428 unsigned int numInputs = static_cast<unsigned int>(nodes.size());
1429 unsigned int numConcatView = numInputs - 1;
1431 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), MaxNumOfTensorDimensions);
1432 std::vector<unsigned int>mergeDimSizes(MaxNumOfTensorDimensions, 0u);
1434 unsigned int mergeDim = 0;
1435 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, numInputs);
1437 // The last input is the axis for concatenation.
1438 if (!HasParsedConstTensor<int32_t>(inputs[numInputs - 1].m_IndexedValue->GetNode().name()))
1440 throw ParseException(
1443 "ArmNN only supports Concat with constant axis. "
1444 "Input %1%. Node %2% %3%")
1445 % inputs[numInputs - 1].m_IndexedValue->GetNode().name()
1447 % CHECK_LOCATION().AsString()));
1449 ParsedConstTfOperation<int32_t>* shapeNode =
1450 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[numInputs - 1].m_IndexedValue);
1452 std::vector<int32_t> axisTensorData;
1453 ConstTensor axisTensor = shapeNode->GetConstTensor(false, axisTensorData);
1455 // This concatDim indicates the data format: 3 is the NHWC, 1 is the NCHW.
1456 const unsigned int concatDimInput = static_cast<unsigned int>(axisTensorData[0]);
1458 // Armnn supports concatenation along the channel dimension for data formats NHWC and NCHW.
1459 if (concatDimInput == 0 || concatDimInput == 2)
1461 throw ParseException(
1464 "Dimension %1% for concatenation is not supported by Armnn. "
1468 % CHECK_LOCATION().AsString()));
1471 // This is the only concatDim we support in armnn.
1472 const unsigned int concatDim = 1;
1473 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
1475 // Need to double check whether it should be
1476 IOutputSlot& inputSlot =
1477 inputs[viewIndex].m_IndexedValue->ResolveArmnnOutputSlot(inputs[viewIndex].m_Index);
1478 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1480 if (inputTensorInfo.GetNumDimensions() != MaxNumOfTensorDimensions)
1482 throw ParseException(
1485 "The number of dimensions: %1% for input tensors of the "
1486 "concatenation op should be %2% for Node %3% %4%")
1487 % inputTensorInfo.GetNumDimensions()
1488 % MaxNumOfTensorDimensions
1490 % CHECK_LOCATION().AsString()));
1493 if (concatDimInput == 3)
1495 inputTensorInfo = armnnUtils::Permuted(inputTensorInfo, NHWCToArmNN);
1498 for (unsigned int dim = 0; dim < MaxNumOfTensorDimensions; ++dim)
1500 mergeDimSizes[dim] = inputTensorInfo.GetShape()[dim];
1503 for (unsigned int j = 0; j < concatDim; ++j)
1505 concatDescriptor.SetViewOriginCoord(viewIndex, j, 0);
1508 concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
1509 mergeDim += mergeDimSizes[concatDim];
1511 for (unsigned int j = concatDim+1; j < MaxNumOfTensorDimensions; ++j)
1513 concatDescriptor.SetViewOriginCoord(viewIndex, j, 0);
1517 mergeDimSizes[concatDim] = mergeDim;
1518 armnn::IConnectableLayer *layer = m_Network->AddMergerLayer(concatDescriptor, nodeDef.name().c_str());
1520 layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(MaxNumOfTensorDimensions, mergeDimSizes.data(),
1521 DataType::Float32));
1523 for (unsigned int v = 0; v < numConcatView; ++v)
1525 IOutputSlot& inputSlot = inputs[v].m_IndexedValue->ResolveArmnnOutputSlot(inputs[v].m_Index);
1526 if (concatDimInput == 3)
1528 IConnectableLayer* const swizzleLayer = AddSwizzleLayer(*m_Network, inputSlot, NHWCToArmNN,
1529 "swizzle_for-" + nodeDef.name());
1530 swizzleLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(v));
1534 inputSlot.Connect(layer->GetInputSlot(v));
1538 if (concatDimInput == 3)
1540 IConnectableLayer* const deswizzleLayer = AddSwizzleLayer(*m_Network, layer->GetOutputSlot(0), ArmNNToNHWC,
1541 "deswizzle_for-" + nodeDef.name());
1542 layer = deswizzleLayer;
1545 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1548 ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
1549 const tensorflow::GraphDef& graphDef)
1551 // Note: the Shape layer is handled in a special way, because:
1552 // 1. ARMNN doesn't support int32 tensors which it outputs.
1553 // 2. ARMNN works with statically shaped tensors which are known at parse time.
1554 // 3. because of 1. and 2. we treat the output of Shape as a temporary const int32
1555 // tensor which may be used as an input to other ops, most likely a Reshape.
1557 const tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "out_type");
1558 if (tfDataType != tensorflow::DT_INT32)
1560 throw ParseException(
1563 "Armnn only supports DT_INT32 as out_type. Got %1% for Node %2% %3%")
1564 % tensorflow::DataType_Name(tfDataType)
1566 % CHECK_LOCATION().AsString()));
1569 const std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1570 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1571 const TensorInfo& prevLayerTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1572 unsigned int prevLayerDimensions = prevLayerTensorInfo.GetNumDimensions();
1574 std::vector<int32_t> shapeTensorData;
1575 shapeTensorData.reserve(prevLayerDimensions);
1577 for (unsigned int i=0; i<prevLayerDimensions; ++i)
1579 shapeTensorData.push_back(static_cast<int32_t>(prevLayerTensorInfo.GetShape()[i]));
1582 TensorInfo shapeTensorInfo(1, &prevLayerDimensions, DataType::Signed32);
1584 return std::make_unique<ParsedConstTfOperation<int32_t>>(this,
1586 &shapeTensorData[0],
1590 ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
1591 const tensorflow::GraphDef& graphDef)
1593 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1594 ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
1596 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
1598 throw ParseException(
1601 "ArmNN only supports Reshape layers with constant shapes. "
1602 "Input %1% Node %2% %3%")
1603 % inputs[1].m_IndexedValue->GetNode().name()
1605 % CHECK_LOCATION().AsString()));
1607 ParsedConstTfOperation<int32_t>* shapeNode =
1608 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1610 armnn::IOutputSlot& prevLayerOutputSlot = inputNode->ResolveArmnnOutputSlot(inputs[0].m_Index);
1611 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1613 std::vector<int32_t> shapeTensorData;
1614 ConstTensor shapeTensor = shapeNode->GetConstTensor(false, shapeTensorData);
1615 const TensorInfo outputTensorInfo = PrepareReshape(inputTensorInfo, shapeTensorData);
1617 TensorShape targetShape = outputTensorInfo.GetShape();
1618 ReshapeDescriptor reshapeDesc;
1619 reshapeDesc.m_TargetShape = targetShape;
1621 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1622 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1623 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1625 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1628 ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
1629 const tensorflow::GraphDef& graphDef)
1631 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
1633 if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
1635 throw ParseException(
1638 "ArmNN only supports ResizeBilinear layers with constant sizes. "
1639 "Input %1%. Node %2% %3%")
1640 % inputs[1].m_IndexedValue->GetNode().name()
1642 % CHECK_LOCATION().AsString()));
1644 ParsedConstTfOperation<int32_t>* sizeNode =
1645 boost::polymorphic_downcast<ParsedConstTfOperation<int32_t>*>(inputs[1].m_IndexedValue);
1647 // Checks the align_corners attribute is not set.
1648 if (ReadOptionalNodeBoolAttribute(nodeDef, "align_corners", false))
1650 throw ParseException(
1653 "ArmNN only supports ResizeBilinear layers with align_corners set to false. "
1656 % CHECK_LOCATION().AsString()));
1659 // Data for the parsed tensor args (size) must be stored locally.
1660 std::vector<int32_t> sizeTensorData;
1661 ConstTensor sizeTensor = sizeNode->GetConstTensor(false, sizeTensorData);
1663 // The descriptor only has target height and width attributes, which we get from the size tensor.
1664 ResizeBilinearDescriptor desc;
1665 desc.m_TargetHeight = static_cast<uint32_t> (sizeTensorData[0]);
1666 desc.m_TargetWidth = static_cast<uint32_t> (sizeTensorData[1]);
1668 IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc, nodeDef.name().c_str());
1670 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1671 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
1672 // The input shape is always in BHWC format, this will be swizzled below; for now,
1673 // get the batch and channels to make up the ArmNN output shape with the target size.
1674 unsigned int outBatch = inputTensorInfo.GetShape()[0];
1675 unsigned int outChannels = inputTensorInfo.GetShape()[3];
1676 unsigned int outHeight = desc.m_TargetHeight;
1677 unsigned int outWidth = desc.m_TargetWidth;
1678 TensorShape outShape({outBatch, outChannels, outHeight, outWidth});
1679 // The output DataType is always Float32, regardless of the input DataType.
1680 const TensorInfo outputTensorInfo(outShape, armnn::DataType::Float32);
1681 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
1683 // TensorFlow ResizeBilinear input is always in BHWC format, so add swizzle and deswizzle layers.
1684 layer = SwizzleInDeswizzleOut(*m_Network, inputSlot, *layer, nodeDef.name());
1686 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1689 TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo inputTensorInfo)
1691 BOOST_ASSERT(nodeDef.op() == "Squeeze");
1692 tensorflow::DataType tfDataType = ReadMandatoryNodeTypeAttribute(nodeDef, "T");
1695 if (tfDataType == tensorflow::DT_FLOAT)
1697 type = DataType::Float32;
1699 else if (tfDataType == tensorflow::DT_INT32)
1701 type = DataType::Signed32;
1705 throw ParseException(
1707 boost::format("Unsupported DataType %1% for Squeeze operation %2% %3%")
1708 % tensorflow::DataType_Name(tfDataType)
1710 % CHECK_LOCATION().AsString()));
1714 if (inputTensorInfo.GetNumDimensions() > 4)
1716 throw ParseException(
1719 "Unsupported number of dimensions: %1% for input shape for Squeeze %2% %3%")
1720 % inputTensorInfo.GetNumDimensions()
1722 % CHECK_LOCATION().AsString()));
1725 std::vector<uint32_t> squeezeDims = ReadOptionalNodeUint32ListAttribute(nodeDef, "squeeze_dims");
1726 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
1728 if (squeezeDims.empty())
1730 squeezeDims.assign(dimensionSequence,
1731 dimensionSequence+inputTensorInfo.GetNumDimensions());
1734 std::vector<uint32_t> outputDims;
1735 for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
1737 bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
1738 auto currentDimension = inputTensorInfo.GetShape()[i];
1739 if (skipSqueeze || currentDimension != 1)
1741 outputDims.push_back(currentDimension);
1745 if (outputDims.size() > 4)
1747 throw ParseException(
1750 "Unsupported number of dimensions: %1% for output shape for Squeeze %2% %3%")
1753 % CHECK_LOCATION().AsString()));
1756 TensorShape outShape = TensorShape(static_cast<unsigned int>(outputDims.size()),
1759 TensorInfo outTensorInfo = inputTensorInfo;
1760 outTensorInfo.SetShape(outShape);
1761 outTensorInfo.SetDataType(type);
1763 return outTensorInfo;
1766 ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1768 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1770 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1771 TensorInfo inputTensorInfo = prevLayerOutputSlot.GetTensorInfo();
1773 TensorInfo outputInfo;
1774 outputInfo = OutputShapeOfSqueeze(nodeDef, inputTensorInfo);
1776 ReshapeDescriptor reshapeDesc;
1777 reshapeDesc.m_TargetShape = outputInfo.GetShape();
1778 IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, nodeDef.name().c_str());
1779 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1780 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1782 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1785 ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1787 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1789 NormalizationDescriptor normalizationDescriptor;
1790 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
1791 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
1792 normalizationDescriptor.m_Alpha = ReadMandatoryNodeFloatAttribute(nodeDef, "alpha");
1793 normalizationDescriptor.m_Beta = ReadMandatoryNodeFloatAttribute(nodeDef, "beta");
1794 normalizationDescriptor.m_K = ReadMandatoryNodeFloatAttribute(nodeDef, "bias");
1795 normalizationDescriptor.m_NormSize = ReadMandatoryNodeUint32Attribute(nodeDef, "depth_radius");
1797 // The window size must be an odd value. For a window size of (2 * n + 1), TensorFlow defines depth_radius = n.
1798 normalizationDescriptor.m_NormSize = normalizationDescriptor.m_NormSize * 2 + 1;
1800 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1802 IConnectableLayer* layer = m_Network->AddNormalizationLayer(normalizationDescriptor,
1803 nodeDef.name().c_str());
1805 const TensorInfo permutedInfo = armnnUtils::Permuted(prevLayerOutputSlot.GetTensorInfo(), NHWCToArmNN);
1806 layer->GetOutputSlot(0).SetTensorInfo(permutedInfo);
1808 layer = SwizzleInDeswizzleOut(*m_Network, prevLayerOutputSlot, *layer, nodeDef.name());
1810 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1813 /// An ParsedTfOperation for a MatMul node.
1814 /// Creation of the armnn FullyConnected layer is deferred until it is actually needed, because
1815 /// MatMul nodes are often used for the first part of a biased FullyConnected (MatMul followed
1816 /// by Add) and in these cases armnn doesn't need a separate layer for the MatMul.
1818 class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
1821 ParsedMatMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
1822 : DeferredSingleLayerParsedTfOperation(parser, node)
1826 void CreateLayerDeferred() override
1828 BOOST_ASSERT(m_Layer == nullptr);
1829 m_Layer = m_Parser->AddFullyConnectedLayer(m_Node, nullptr, m_Node.name().c_str());
1833 ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1835 // Defers the creation of the layer (see ParsedMatMulTfOperation).
1836 return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
1839 /// An ParsedTfOperation for a Mul node.
1840 /// Creation of the armnn Mul layer is deferred until it is actually needed, because Mul nodes
1841 /// are also used for the first part of a leaky relu activation function (Mul followed by Maximum)
1842 /// and in these cases armnn doesn't need a separate layer for the Mul.
1844 class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
1847 ParsedMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
1848 : DeferredSingleLayerParsedTfOperation(parser, node)
1852 void CreateLayerDeferred() override
1854 BOOST_ASSERT(m_Layer == nullptr);
1855 m_Layer = m_Parser->AddMultiplicationLayer(m_Node);
1859 ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1861 boost::ignore_unused(graphDef);
1863 return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
1866 ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
1867 const tensorflow::GraphDef& graphDef)
1869 boost::ignore_unused(graphDef);
1871 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
1873 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkInputsBindingInfo.size());
1875 auto it = m_InputShapes.find(nodeDef.name());
1876 if (it == m_InputShapes.end())
1878 throw ParseException(
1881 "Missing input shape for Placeholder '%1%' %2%")
1883 % CHECK_LOCATION().AsString()));
1885 TensorInfo tensorInfo(it->second, DataType::Float32);
1887 IConnectableLayer* const layer = m_Network->AddInputLayer(layerId, nodeDef.name().c_str());
1889 layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1891 TrackInputBinding(layer, layerId, tensorInfo);
1893 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1896 ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
1897 const tensorflow::GraphDef& graphDef)
1899 boost::ignore_unused(graphDef);
1901 ActivationDescriptor activationDesc;
1902 activationDesc.m_Function = ActivationFunction::ReLu;
1903 return AddActivationLayer(nodeDef, activationDesc);
1906 ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
1907 const tensorflow::GraphDef& graphDef)
1909 boost::ignore_unused(graphDef);
1911 ActivationDescriptor activationDesc;
1912 activationDesc.m_Function = ActivationFunction::BoundedReLu;
1913 activationDesc.m_A = 6.0f;
1914 activationDesc.m_B = 0.0f;
1916 return AddActivationLayer(nodeDef, activationDesc);
1919 ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
1920 const tensorflow::GraphDef& graphDef)
1922 boost::ignore_unused(graphDef);
1924 ActivationDescriptor activationDesc;
1925 activationDesc.m_Function = ActivationFunction::Sigmoid;
1927 return AddActivationLayer(nodeDef, activationDesc);
1930 ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
1931 const tensorflow::GraphDef& graphDef)
1933 boost::ignore_unused(graphDef);
1935 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1937 SoftmaxDescriptor softmaxDescriptor;
1938 IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(softmaxDescriptor, nodeDef.name().c_str());
1940 IOutputSlot& prevLayerSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1941 prevLayerSlot.Connect(layer->GetInputSlot(0));
1942 layer->GetOutputSlot(0).SetTensorInfo(prevLayerSlot.GetTensorInfo());
1944 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1947 ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
1948 const tensorflow::GraphDef& graphDef)
1950 boost::ignore_unused(graphDef);
1952 ActivationDescriptor activationDesc;
1953 activationDesc.m_Function = ActivationFunction::SoftReLu;
1955 return AddActivationLayer(nodeDef, activationDesc);
1958 ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
1960 boost::ignore_unused(graphDef);
1962 ActivationDescriptor activationDesc;
1963 activationDesc.m_Function = ActivationFunction::TanH;
1964 activationDesc.m_A = 1.0f;
1965 activationDesc.m_B = 1.0f;
1967 return AddActivationLayer(nodeDef, activationDesc);
1970 ParsedTfOperationPtr TfParser::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
1971 ActivationDescriptor& activationDesc)
1973 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1975 IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, nodeDef.name().c_str());
1977 IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
1978 prevLayerOutputSlot.Connect(layer->GetInputSlot(0));
1979 layer->GetOutputSlot(0).SetTensorInfo(prevLayerOutputSlot.GetTensorInfo());
1980 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
1983 ParsedTfOperationPtr TfParser::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
1984 const tensorflow::GraphDef& graphDef)
1986 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
1989 ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
1990 const tensorflow::GraphDef& graphDef)
1992 return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
1995 ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
1996 const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
1998 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
1999 IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2000 TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
2002 if (inputs.size() != 1)
2004 throw ParseException(
2007 "2D Pooling expects one input!. Got %1% for Node %2% %3%")
2010 % CHECK_LOCATION().AsString()));
2013 std::string paddingString = ReadMandatoryNodeStringAttribute(nodeDef, "padding");
2014 std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
2015 std::vector<uint32_t> strides = ReadMandatoryNodeUint32ListAttribute(nodeDef, "strides");
2016 std::vector<uint32_t> ksize = ReadMandatoryNodeUint32ListAttribute(nodeDef, "ksize"); // size of pool windows
2018 Pooling2dDescriptor pooling2dDescriptor;
2019 pooling2dDescriptor.m_PoolType = pooltype;
2020 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
2021 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Floor;
2023 CHECK_DATA_FORMAT(nodeDef, dataFormat, "Pooling2D");
2025 if (dataFormat == "NHWC")
2027 pooling2dDescriptor.m_StrideX = strides[2];
2028 pooling2dDescriptor.m_StrideY = strides[1];
2029 pooling2dDescriptor.m_PoolWidth = ksize[2];
2030 pooling2dDescriptor.m_PoolHeight = ksize[1];
2031 // Swizzles input to supported memory layout.
2032 inputTensorInfo = armnnUtils::Permuted(inputSlot.GetTensorInfo(), NHWCToArmNN);
2034 else if (dataFormat == "NCHW")
2036 pooling2dDescriptor.m_StrideX = strides[3];
2037 pooling2dDescriptor.m_StrideY = strides[2];
2038 pooling2dDescriptor.m_PoolWidth = ksize[3];
2039 pooling2dDescriptor.m_PoolHeight = ksize[2];
2042 uint32_t inputHeight = inputTensorInfo.GetShape()[2];
2043 uint32_t inputWidth = inputTensorInfo.GetShape()[3];
2045 bool padding = false;
2046 TensorInfo outputInfo;
2048 CHECK_PADDING_TYPE(nodeDef, paddingString);
2050 if (paddingString == "SAME")
2053 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2054 inputTensorInfo.GetShape()[1],
2055 static_cast<uint32_t>(ceil(
2056 static_cast<float>(inputHeight) /
2057 static_cast<float>(pooling2dDescriptor.m_StrideY))),
2058 static_cast<uint32_t>(ceil(
2059 static_cast<float>(inputWidth) /
2060 static_cast<float>(pooling2dDescriptor.m_StrideX)))
2061 }, DataType::Float32);
2063 else if (paddingString == "VALID")
2066 outputInfo = TensorInfo({ inputTensorInfo.GetShape()[0],
2067 inputTensorInfo.GetShape()[1],
2068 static_cast<uint32_t>(ceil(
2069 static_cast<float>(inputHeight - pooling2dDescriptor.m_PoolHeight + 1) /
2070 static_cast<float>(pooling2dDescriptor.m_StrideY))),
2071 static_cast<uint32_t>(ceil(
2072 static_cast<float>(inputWidth - pooling2dDescriptor.m_PoolWidth + 1) /
2073 static_cast<float>(pooling2dDescriptor.m_StrideX)))
2074 }, DataType::Float32);
2077 CalcPadding(inputWidth, pooling2dDescriptor.m_PoolWidth, pooling2dDescriptor.m_StrideX,
2078 pooling2dDescriptor.m_PadLeft, pooling2dDescriptor.m_PadRight, padding);
2079 CalcPadding(inputHeight, pooling2dDescriptor.m_PoolHeight, pooling2dDescriptor.m_StrideY,
2080 pooling2dDescriptor.m_PadTop, pooling2dDescriptor.m_PadBottom, padding);
2083 IConnectableLayer* layer = m_Network->AddPooling2dLayer(pooling2dDescriptor, nodeDef.name().c_str());
2084 if (layer == nullptr)
2086 throw ParseException(
2089 "Failed to add pooling2d layer for %1% %2%")
2091 % CHECK_LOCATION().AsString()));
2094 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2096 if (dataFormat == "NHWC")
2098 layer = SwizzleInDeswizzleOut(*m_Network, inputSlot, *layer, nodeDef.name());
2102 inputSlot.Connect(layer->GetInputSlot(0));
2105 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2108 ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
2110 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2112 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2113 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2115 const TensorInfo& input0Info = input0Slot->GetTensorInfo();
2116 const TensorInfo& input1Info = input1Slot->GetTensorInfo();
2120 // BiasAdd takes bias as a 1D tensor. We need to add a reshape layer to create a 4D tensor
2121 // with the same data in the correct dimension for broadcast in addition.
2122 if(input1Info.GetNumDimensions() != 1)
2124 throw ParseException(
2127 "Unsupported bias for BiasAdd. It should be a 1D vector. "
2128 "Got %1% dimensions for input %2%. Node %3% %4%")
2129 % input1Info.GetNumDimensions()
2130 % inputs[1].m_IndexedValue->GetNode().name()
2132 % CHECK_LOCATION().AsString()));
2135 const std::string dataFormat = ReadMandatoryNodeStringAttribute(nodeDef, "data_format");
2137 CHECK_DATA_FORMAT(nodeDef, dataFormat, "BiasAdd");
2138 input1Slot = BroadcastForAddandMul(input0Slot, input1Slot, dataFormat == "NHWC", *m_Network, nodeDef);
2142 if (input0Info.GetNumDimensions() == 1)
2144 const bool isNHWC = true;
2145 input0Slot = BroadcastForAddandMul(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2148 if (input1Info.GetNumDimensions() == 1)
2150 const bool isNHWC = true;
2151 input1Slot = BroadcastForAddandMul(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2155 IConnectableLayer* const layer = m_Network->AddAdditionLayer(nodeDef.name().c_str());
2157 input0Slot->Connect(layer->GetInputSlot(0));
2158 input1Slot->Connect(layer->GetInputSlot(1));
2160 if (input0Info.GetNumDimensions() == 1 && isBiasAdd == false)
2162 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2166 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2169 return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
2172 IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
2174 std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
2176 IConnectableLayer* const layer = m_Network->AddMultiplicationLayer(nodeDef.name().c_str());
2177 IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
2178 IOutputSlot* input1Slot = &inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
2180 auto const input0NumDims = input0Slot->GetTensorInfo().GetNumDimensions();
2181 auto const input1NumDims = input1Slot->GetTensorInfo().GetNumDimensions();
2183 if (input0NumDims < input1NumDims)
2185 const bool isNHWC = true;
2186 input0Slot = BroadcastForAddandMul(input1Slot, input0Slot, isNHWC, *m_Network, nodeDef);
2188 if (input1NumDims < input0NumDims)
2190 const bool isNHWC = true;
2191 input1Slot = BroadcastForAddandMul(input0Slot, input1Slot, isNHWC, *m_Network, nodeDef);
2194 input0Slot->Connect(layer->GetInputSlot(0));
2195 input1Slot->Connect(layer->GetInputSlot(1));
2197 if (input0NumDims < input1NumDims)
2199 layer->GetOutputSlot(0).SetTensorInfo(input1Slot->GetTensorInfo());
2203 layer->GetOutputSlot(0).SetTensorInfo(input0Slot->GetTensorInfo());
2209 IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
2210 const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
2212 // Finds bias const (if applicable).
2213 ParsedConstTfOperation<float>* biasNode = nullptr;
2214 if (addNodeDef != nullptr)
2216 std::vector<OutputOfParsedTfOperation> addInputs = GetInputParsedTfOperationsChecked(*addNodeDef, 2);
2217 // Finds our inputs.
2218 if (HasParsedConstTensor<float>(addInputs[0].m_IndexedValue->GetNode().name()))
2220 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[0].m_IndexedValue);
2222 else if (HasParsedConstTensor<float>(addInputs[1].m_IndexedValue->GetNode().name()))
2224 biasNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(addInputs[1].m_IndexedValue);
2228 throw ParseException(
2231 "ArmNN only supports fully connected layers with constant bias. "
2232 "Inputs %1% and %2%. AddNode %3%. MatMulNode %4% %5%")
2233 % addInputs[0].m_IndexedValue->GetNode().name()
2234 % addInputs[1].m_IndexedValue->GetNode().name()
2235 % addNodeDef->name()
2236 % matMulNodeDef.name()
2237 % CHECK_LOCATION().AsString()));
2241 // Finds matmul inputs.
2242 ParsedConstTfOperation<float>* weightNode = nullptr;
2243 ParsedTfOperation* inputNode = nullptr;
2244 unsigned int inputIdx = 0;
2245 std::vector<OutputOfParsedTfOperation> mulInputs = GetInputParsedTfOperationsChecked(matMulNodeDef, 2);
2246 if (HasParsedConstTensor<float>(mulInputs[0].m_IndexedValue->GetNode().name()))
2248 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[0].m_IndexedValue);
2249 inputNode = mulInputs[1].m_IndexedValue;
2250 inputIdx = mulInputs[1].m_Index;
2252 else if (HasParsedConstTensor<float>(mulInputs[1].m_IndexedValue->GetNode().name()))
2254 weightNode = boost::polymorphic_downcast<ParsedConstTfOperation<float>*>(mulInputs[1].m_IndexedValue);
2255 inputNode = mulInputs[0].m_IndexedValue;
2256 inputIdx = mulInputs[0].m_Index;
2260 throw ParseException(
2263 "ArmNN only supports fully connected layers with constant weights. "
2264 "Inputs %1% and %2%. MatMulNode %3% %4%")
2265 % mulInputs[0].m_IndexedValue->GetNode().name()
2266 % mulInputs[1].m_IndexedValue->GetNode().name()
2267 % matMulNodeDef.name()
2268 % CHECK_LOCATION().AsString()));
2271 std::vector<float> weightTensorData;
2273 ConstTensor weights = weightNode->GetConstTensor(false, weightTensorData);
2275 FullyConnectedDescriptor desc;
2276 desc.m_BiasEnabled = addNodeDef != nullptr;
2278 IConnectableLayer* layer = nullptr;
2280 if (addNodeDef != nullptr)
2282 std::vector<float> biasTensorData;
2283 ConstTensor biases = biasNode->GetConstTensor(false, biasTensorData);
2285 if (weights.GetShape()[1] != biases.GetShape()[0])
2287 throw ParseException(
2290 "Shape of matmul weights and bias do not match. "
2291 "AddNode %1%. MatMulNode %2% %3%")
2292 % addNodeDef->name()
2293 % matMulNodeDef.name()
2294 % CHECK_LOCATION().AsString()));
2297 layer = m_Network->AddFullyConnectedLayer(desc, weights, biases, armnnLayerName);
2301 layer = m_Network->AddFullyConnectedLayer(desc, weights, armnnLayerName);
2304 BOOST_ASSERT(layer != nullptr);
2306 inputNode->ResolveArmnnOutputSlot(inputIdx).Connect(layer->GetInputSlot(0));
2307 unsigned int batches = inputNode->ResolveArmnnOutputSlot(inputIdx).GetTensorInfo().GetShape()[0];
2310 TensorInfo outputInfo({ batches, weights.GetShape()[1] }, DataType::Float32);
2311 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2315 void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
2317 // Gets the type of the node (assume float).
2318 tensorflow::DataType type = tensorflow::DT_FLOAT;
2319 if (nodeDef.attr().count("T") != 0)
2321 auto attr = nodeDef.attr().at("T");
2324 else if (nodeDef.attr().count("dtype") != 0)
2326 auto attr = nodeDef.attr().at("dtype");
2330 if (type != tensorflow::DT_FLOAT && nodeDef.op() != "Const")
2332 throw ParseException(
2335 "Currently only FLOAT is supported for tensorflow nodes (apart from Const). "
2336 "Got %1% for Node %2% %3%")
2337 % tensorflow::DataType_Name(type)
2339 % CHECK_LOCATION().AsString()));
2342 const std::string& operation = nodeDef.op();
2343 auto it = ms_OperationNameToParsingFunctions.find(operation);
2344 if (it != ms_OperationNameToParsingFunctions.end())
2346 auto func = it->second;
2347 ParsedTfOperationPtr parsedTfOperation = (this->*func)(nodeDef, graphDef);
2348 ParsedTfOperation* parsedTfOperationRaw = parsedTfOperation.get();
2350 // Stores the parsed operation so that dependent layers can connect to it.
2351 auto it = m_ParsedTfOperations.find(nodeDef.name());
2352 if (it != m_ParsedTfOperations.end())
2354 throw ParseException(boost::str(boost::format("Name %1% used by more than one node") % nodeDef.name()));
2356 m_ParsedTfOperations[nodeDef.name()] = std::move(parsedTfOperation);
2358 // If this node was requested as an output from the network, then adds an ArmNN output layer.
2359 if (std::find(m_RequestedOutputs.begin(), m_RequestedOutputs.end(), nodeDef.name()) !=
2360 m_RequestedOutputs.end())
2362 auto outId = ParseOutputId(nodeDef.name());
2363 const LayerBindingId layerId = boost::numeric_cast<LayerBindingId>(m_NetworkOutputsBindingInfo.size());
2364 IOutputSlot& prevSlot = parsedTfOperationRaw->ResolveArmnnOutputSlot(outId.m_Index);
2366 TensorInfo tensorInfo = prevSlot.GetTensorInfo();
2368 IConnectableLayer* outputLayer = m_Network->AddOutputLayer(layerId, nodeDef.name().c_str());
2370 prevSlot.Connect(outputLayer->GetInputSlot(0));
2372 TrackOutputBinding(outputLayer, layerId, tensorInfo);
2377 throw ParseException(
2380 "Unsupported operation %1% in tensorflow::GraphDef %2%")
2382 % CHECK_LOCATION().AsString()));
2386 void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
2388 // Adds all nodes to our map.
2389 m_NodesByName.clear();
2390 m_NetworkInputsBindingInfo.clear();
2391 m_NetworkOutputsBindingInfo.clear();
2393 for (int i = 0; i < graphDef.node_size(); ++i)
2395 const tensorflow::NodeDef& node = graphDef.node(i);
2396 m_NodesByName[node.name()] = &node;
2399 // Finds the output nodes the user requested.
2400 std::vector<const tensorflow::NodeDef*> targetNodes;
2401 for (const std::string& requestedOutputName : m_RequestedOutputs)
2403 auto nodeIt = m_NodesByName.find(requestedOutputName);
2404 if (nodeIt == m_NodesByName.end())
2406 throw ParseException(
2409 "Couldn't find requested output node '%1%' in graph %2%")
2410 % requestedOutputName
2411 % CHECK_LOCATION().AsString()));
2413 targetNodes.push_back(nodeIt->second);
2416 // Sorts them into a linear ordering such that all inputs of a node are before the node itself.
2417 std::vector<const tensorflow::NodeDef*> sortedNodes;
2418 if (!armnnUtils::GraphTopologicalSort<const tensorflow::NodeDef*>(
2420 [this](const tensorflow::NodeDef* node)
2422 auto outputs = GetTfInputNodes(*node);
2423 std::vector<const tensorflow::NodeDef*> nodesOnly;
2424 for (const auto & o : outputs) {
2425 nodesOnly.push_back(o.m_IndexedValue);
2431 throw ParseException(
2434 "Cycle detected in graph %1%")
2435 % CHECK_LOCATION().AsString()));
2438 // Parses each node in order, knowing that all inputs of a node will be processed before the node itself.
2439 for (const auto& it : sortedNodes)
2441 const tensorflow::NodeDef& currentNode = *it;
2442 LoadNodeDef(currentNode, graphDef);
2446 INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
2447 const std::map<std::string, TensorShape>& inputShapes,
2448 const std::vector<std::string>& requestedOutputs)
2450 FILE* fd = fopen(graphFile, "r");
2454 throw FileNotFoundException(
2457 "Graph file %1% failed to open %2%")
2459 % CHECK_LOCATION().AsString()));
2462 // Parses the file into a message.
2463 tensorflow::GraphDef graphDef;
2464 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
2465 bool success = google::protobuf::TextFormat::Parse(input, &graphDef);
2471 throw ParseException(
2474 "Failed to parse graph file %1%")
2475 % CHECK_LOCATION().AsString()));
2478 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
2481 INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
2482 const std::map<std::string, TensorShape>& inputShapes,
2483 const std::vector<std::string>& requestedOutputs)
2485 // Parses the string into a message.
2486 tensorflow::GraphDef graphDef;
2487 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &graphDef);
2491 throw ParseException(
2494 "Failed to parse graph file %1%")
2495 % CHECK_LOCATION().AsString()));
2498 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
2501 INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
2502 const std::map<std::string, TensorShape>& inputShapes,
2503 const std::vector<std::string>& requestedOutputs)
2505 FILE* fd = fopen(graphFile, "rb");
2509 throw FileNotFoundException(
2512 "Graph file %1% failed to open %2%")
2514 % CHECK_LOCATION().AsString()));
2517 // Parses the file into a message.
2518 tensorflow::GraphDef graphDef;
2520 google::protobuf::io::FileInputStream inStream(fileno(fd));
2521 google::protobuf::io::CodedInputStream codedStream(&inStream);
2522 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
2523 bool success = graphDef.ParseFromCodedStream(&codedStream);
2528 throw ParseException(
2531 "Failed to parse protobuf file %1% %2%")
2533 % CHECK_LOCATION().AsString()));
2536 return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
2539 INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
2540 const std::map<std::string, TensorShape>& inputShapes,
2541 const std::vector<std::string>& requestedOutputs)
2543 m_Network = INetwork::Create();
2545 m_InputShapes = inputShapes;
2546 if (requestedOutputs.size() == 0)
2548 throw ParseException(
2551 "requestedOutputs must have at least one entry %1%")
2552 % CHECK_LOCATION().AsString()));
2554 m_RequestedOutputs = requestedOutputs;
2558 LoadGraphDef(graphDef);
2560 catch (const ParseException& e)
2568 return std::move(m_Network);
2571 void TfParser::Cleanup()
2573 // Cleanup, in case we reuse this parser.
2574 m_InputShapes.clear();
2575 m_RequestedOutputs.clear();
2576 m_NodesByName.clear();
2577 m_ParsedTfOperations.clear();
2580 BindingPointInfo TfParser::GetNetworkInputBindingInfo(const std::string& name) const
2582 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
2585 BindingPointInfo TfParser::GetNetworkOutputBindingInfo(const std::string& name) const
2587 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
2590 std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string& layerName,
2591 const char* bindingPointDesc,
2592 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
2594 auto it = nameToBindingInfo.find(layerName);
2595 if (it == nameToBindingInfo.end())
2597 throw InvalidArgumentException(
2600 "Unknown %1% '%2%' %3%")
2603 % CHECK_LOCATION().AsString()));
2608 void TfParser::TrackInputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
2610 return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
2613 void TfParser::TrackOutputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
2615 return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
2618 void TfParser::TrackBindingPoint(IConnectableLayer* layer,
2620 const TensorInfo& tensorInfo,
2621 const char* bindingPointDesc,
2622 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
2624 const std::string layerName = layer->GetName();
2625 auto it = nameToBindingInfo.find(layerName);
2626 if (it == nameToBindingInfo.end())
2628 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
2632 throw ParseException(
2635 "Id %1% used by more than one %2% layer %3%")
2638 % CHECK_LOCATION().AsString()));
2642 } // namespace armnnTfParser