2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
5 #include "CaffeParser.hpp"
7 #include "armnn/Descriptors.hpp"
8 #include "armnn/INetwork.hpp"
9 #include "armnn/Utils.hpp"
10 #include "armnn/Exceptions.hpp"
12 #include "GraphTopologicalSort.hpp"
14 #include <boost/numeric/conversion/cast.hpp>
15 #include <boost/assert.hpp>
16 #include <boost/format.hpp>
17 #include <boost/log/trivial.hpp>
20 #include "caffe/proto/caffe.pb.h"
23 #include <google/protobuf/io/coded_stream.h>
24 #include <google/protobuf/io/zero_copy_stream.h>
25 #include <google/protobuf/io/zero_copy_stream_impl.h>
26 #include <google/protobuf/text_format.h>
27 #include <google/protobuf/stubs/common.h>
28 #include <google/protobuf/stubs/once.h>
29 #include <google/protobuf/io/coded_stream.h>
30 #include <google/protobuf/wire_format_lite_inl.h>
31 #include <google/protobuf/descriptor.h>
32 #include <google/protobuf/generated_message_reflection.h>
33 #include <google/protobuf/reflection_ops.h>
34 #include <google/protobuf/wire_format.h>
41 /// Caffe networks are loaded from protobuf files (binary or text) using the protobuf library and the generated
42 /// code from caffe.pb.h. This gives us a caffe::NetParameter which is an in-memory version of the file.
43 /// This contains a flat list of Caffe 'layers' (e.g. convolution, pooling etc.).
44 /// Each layer has inputs (called "bottoms") and outputs (called "tops"). Data flows from bottom to top.
45 /// The bottoms of a layer refer to the tops of other layers, not their names.
46 /// The names of layers seem to be arbitrary (you could rename a layer and the network wouldn't need any other changes).
48 /// Some layers (e.g. Relu) can be configured so that their top and bottom are both the same. This is called an
49 /// "in-place" layer and is a Caffe runtime feature used to reduce memory usage by modifying tensors in-place.
50 /// This isn't relevant to the parser and so we preprocess these layers to convert them to regular layers, to result
51 /// in a consistent graph structure.
53 namespace armnnCaffeParser
56 using namespace armnn;
57 using namespace caffe;
59 using namespace google::protobuf::io;
61 const std::map<std::string, CaffeParser::OperationParsingFunction> CaffeParser::ms_CaffeLayerNameToParsingFunctions = {
62 { "Input", &CaffeParser::ParseInputLayer },
63 { "Convolution", &CaffeParser::ParseConvLayer },
64 { "Pooling", &CaffeParser::ParsePoolingLayer },
65 { "ReLU", &CaffeParser::ParseReluLayer },
66 { "LRN", &CaffeParser::ParseLRNLayer },
67 { "InnerProduct", &CaffeParser::ParseInnerProductLayer },
68 { "Softmax", &CaffeParser::ParseSoftmaxLayer },
69 { "Eltwise", &CaffeParser::ParseEltwiseLayer },
70 { "Concat", &CaffeParser::ParseConcatLayer },
71 { "BatchNorm", &CaffeParser::ParseBatchNormLayer },
72 { "Scale", &CaffeParser::ParseScaleLayer },
73 { "Split", &CaffeParser::ParseSplitLayer },
74 { "Dropout", &CaffeParser::ParseDropoutLayer},
77 ICaffeParser* ICaffeParser::CreateRaw()
79 return new CaffeParser();
82 ICaffeParserPtr ICaffeParser::Create()
84 return ICaffeParserPtr(CreateRaw(), &ICaffeParser::Destroy);
87 void ICaffeParser::Destroy(ICaffeParser* parser)
92 CaffeParser::CaffeParser()
93 : m_Network(nullptr, nullptr)
98 void GetDataFromBlob(const LayerParameter& layerParam, vector<float>& outData, unsigned int blobIndex)
100 if (blobIndex >= boost::numeric_cast<unsigned int>(layerParam.blobs_size()))
102 throw ParseException(boost::str(boost::format("Expected data blob at index %1% in layer %2% not found")
103 % blobIndex % layerParam.name()));
106 const BlobProto& blob = layerParam.blobs(boost::numeric_cast<int>(blobIndex));
108 if (boost::numeric_cast<size_t>(blob.data_size()) != outData.size())
110 throw ParseException(boost::str(boost::format(
111 "Data blob at index %1% in layer %2% has an unexpected size. Expected %3% elements but got %4% elements")
112 % blobIndex % layerParam.name() % outData.size() % blob.data_size()));
115 for (unsigned int i = 0; i < outData.size(); ++i)
117 outData[i] = blob.data(boost::numeric_cast<int>(i));
121 bool IsInRange(unsigned int value, unsigned int min, unsigned int max)
123 return (value >= min && value <= max) ? true : false;
126 template <typename T>
127 size_t SizeOfVectorData(const vector<T>& vec)
129 return vec.size() * sizeof(T);
132 void ValidateNumInputsOutputs(const caffe::LayerParameter& layerParameter,
133 unsigned int numInputs,
134 unsigned int numOutputs)
136 int numInputsActual = layerParameter.bottom_size();
137 if (numInputs != boost::numeric_cast<unsigned int>(numInputsActual))
139 throw ParseException("Loading layer: invalid number of inputs");
142 int numOutputsActual = layerParameter.top_size();
143 if (numOutputs != boost::numeric_cast<unsigned int>(numOutputsActual))
145 throw ParseException("Loading layer: invalid number of outputs");
149 BindingPointInfo CaffeParser::GetNetworkInputBindingInfo(const std::string& name) const
151 return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
154 BindingPointInfo CaffeParser::GetNetworkOutputBindingInfo(const std::string& name) const
156 return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
159 std::pair<armnn::LayerBindingId, armnn::TensorInfo> CaffeParser::GetBindingInfo(const std::string& layerName,
160 const char* bindingPointDesc,
161 const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
163 auto it = nameToBindingInfo.find(layerName);
164 if (it == nameToBindingInfo.end())
166 throw InvalidArgumentException(boost::str(boost::format("Unknown %1% '%2%'") % bindingPointDesc % layerName));
171 TensorInfo CaffeParser::BlobShapeToTensorInfo(const caffe::BlobShape& blobShape) const
173 std::vector<unsigned int> shape;
174 for (int j = 0; j < blobShape.dim_size(); ++j)
176 shape.push_back(static_cast<unsigned int>(blobShape.dim(j)));
179 return TensorInfo(boost::numeric_cast<unsigned int>(shape.size()), shape.data(), DataType::Float32);
182 BlobShape TensorDescToBlobShape(const TensorInfo& desc)
185 for (unsigned int i = 0; i < desc.GetNumDimensions(); ++i)
188 ret.set_dim(boost::numeric_cast<int>(i), desc.GetShape()[i]);
194 vector<const LayerParameter*> CaffeParser::GetInputs(const LayerParameter& layerParam)
196 std::vector<const caffe::LayerParameter*> ret;
197 ret.reserve(boost::numeric_cast<size_t>(layerParam.bottom_size()));
198 for (int j = 0; j < layerParam.bottom_size(); ++j)
200 std::string inputName = layerParam.bottom(j);
201 auto inputIt = m_CaffeLayersByTopName.find(inputName);
202 if (inputIt == m_CaffeLayersByTopName.end())
204 throw ParseException(
205 "Can't find Caffe layer with top called '" + inputName + "', which is listed as an input of '" +
206 layerParam.name() + "'");
208 ret.push_back(inputIt->second);
214 void CaffeParser::ParseInputLayer(const LayerParameter& layerParam)
216 BOOST_ASSERT(layerParam.type() == "Input");
217 ValidateNumInputsOutputs(layerParam, 0, 1);
219 const InputParameter& param = layerParam.input_param();
221 const armnn::LayerBindingId inputId = boost::numeric_cast<armnn::LayerBindingId>(m_NetworkInputsBindingInfo.size());
222 armnn::IConnectableLayer* const inputLayer = m_Network->AddInputLayer(inputId, layerParam.name().c_str());
224 // Decide on the tensor info for this input. This can be specified in the Caffe network but can also
225 // be overriden by user input (m_inputShapes).
226 armnn::TensorInfo inputTensorInfo;
228 const BlobShape* originalShape = param.shape_size() > 0 && param.shape(0).dim_size() > 0 ?
229 ¶m.shape(0) : nullptr;
232 inputTensorInfo = BlobShapeToTensorInfo(*originalShape);
235 auto overrideIt = m_InputShapes.find(layerParam.name());
236 if (overrideIt != m_InputShapes.end())
238 const TensorShape& overrideShape = overrideIt->second;
240 ( originalShape->dim(1) != overrideShape[1]
241 || originalShape->dim(2) != overrideShape[2]
242 || originalShape->dim(3) != overrideShape[3]))
244 throw ParseException("Parsed input shape for '" + layerParam.name() +
245 "' is incompatible with the override provided");
247 inputTensorInfo.SetShape(overrideShape);
249 else if (!originalShape)
251 throw ParseException("No input descriptor given for '" + layerParam.name() +
252 "' and no input shape found in caffe model");
255 TrackInputBinding(inputLayer, inputId, inputTensorInfo);
256 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
257 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), inputLayer->GetOutputSlot(0));
260 void CaffeParser::ParseConvLayer(const LayerParameter& layerParam)
262 BOOST_ASSERT(layerParam.type() == "Convolution");
263 ValidateNumInputsOutputs(layerParam, 1, 1);
265 ConvolutionParameter convParam = layerParam.convolution_param();
266 BlobShape inputShape = TensorDescToBlobShape(GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo());
268 unsigned int kernelH = 0;
269 unsigned int kernelW = 0;
270 if (convParam.has_kernel_h() && convParam.has_kernel_w())
272 kernelH = convParam.kernel_h();
273 kernelW = convParam.kernel_w();
275 else if (convParam.kernel_size_size() > 0)
277 kernelH = (convParam.kernel_size()).Get(0);
278 kernelW = (convParam.kernel_size()).Get(0);
282 throw ParseException("Loading Convolution Layer: Kernel Size defined Illegally");
285 if (!IsInRange(kernelH, 0, 11) || !IsInRange(kernelW, 0, 11) || (kernelH != kernelW))
287 throw ParseException("Loading Convolution Layer: Kernel has invalid size");
290 unsigned int strideH = 0;
291 unsigned int strideW = 0;
293 if (convParam.has_stride_h() && convParam.has_stride_w())
295 strideH = convParam.stride_h();
296 strideW = convParam.stride_w();
298 else if (convParam.stride_size() > 0)
300 strideH = (convParam.stride()).Get(0);
301 strideW = (convParam.stride()).Get(0);
305 // Caffe stride default is 1
306 strideH = strideW = 1;
309 if (!IsInRange(strideH, 0, 11) || !IsInRange(strideW, 0, 11) || (strideH != strideW))
311 throw ParseException("Loading Convolution Layer: stride has invalid size");
314 unsigned int padH = 0;
315 unsigned int padW = 0;
317 if (convParam.has_pad_h() && convParam.has_pad_w())
319 padH = convParam.pad_h();
320 padW = convParam.pad_w();
322 else if (convParam.pad_size() > 0)
324 padH = (convParam.pad()).Get(0);
325 padW = (convParam.pad()).Get(0);
333 if (!IsInRange(padH, 0, 11) || !IsInRange(padW, 0, 11) || (padH != padW))
335 throw ParseException("Loading Convolution Layer: pad has invalid size");
339 const unsigned int numGroups = convParam.has_group() ? convParam.group() : 1;
340 armnn::IOutputSlot& inputConnection = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
342 vector<string> convLayerNames(numGroups);
343 vector<armnn::IConnectableLayer*> convLayers(numGroups);
344 convLayerNames[0] = layerParam.name();
346 armnn::IConnectableLayer* splitterLayer = nullptr;
349 // This convolution is to be applied to chunks of the input data so add a splitter layer
351 // Redirect the convolution input to the splitter
352 unsigned int splitterDimSizes[4] = {static_cast<unsigned int>(inputShape.dim(0)),
353 static_cast<unsigned int>(inputShape.dim(1)),
354 static_cast<unsigned int>(inputShape.dim(2)),
355 static_cast<unsigned int>(inputShape.dim(3))};
357 // Split dimension 1 of the splitter output shape and conv input shapes
358 // according to the number of groups
359 splitterDimSizes[1] /= numGroups;
360 inputShape.set_dim(1, splitterDimSizes[1]);
362 // This is used to describe how the input is to be split
363 ViewsDescriptor splitterDesc(numGroups);
365 // Create an output node for each group, giving each a unique name
366 for (unsigned int g = 0; g < numGroups; ++g)
368 // Work out the names of the splitter layers child convolutions
370 ss << layerParam.name() << "_" << g;
371 convLayerNames[g] = ss.str();
373 splitterDesc.SetViewOriginCoord(g, 1, splitterDimSizes[1] * g);
375 // Set the size of the views.
376 for (unsigned int dimIdx=0; dimIdx < 4; dimIdx++)
378 splitterDesc.SetViewSize(g, dimIdx, splitterDimSizes[dimIdx]);
382 const std::string splitterLayerName = std::string("splitter_") + layerParam.bottom(0);
384 // Add the splitter layer
385 splitterLayer = m_Network->AddSplitterLayer(splitterDesc,
386 splitterLayerName.c_str());
388 inputConnection.Connect(splitterLayer->GetInputSlot(0));
389 for (unsigned int i = 0; i < splitterLayer->GetNumOutputSlots(); i++)
391 splitterLayer->GetOutputSlot(i).SetTensorInfo(BlobShapeToTensorInfo(inputShape));
395 // Ignored Caffe Parameters
403 // Not Available ArmNN Interface Parameters
404 // * Rounding policy;
406 Convolution2dDescriptor convolution2dDescriptor;
407 convolution2dDescriptor.m_PadLeft = padW;
408 convolution2dDescriptor.m_PadRight = padW;
409 convolution2dDescriptor.m_PadTop = padH;
410 convolution2dDescriptor.m_PadBottom = padH;
411 convolution2dDescriptor.m_StrideX = strideW;
412 convolution2dDescriptor.m_StrideY = strideH;
414 unsigned int numFilters = convParam.num_output();
416 // Populate convolution output tensor descriptor dimensions
417 BlobShape outputShape;
418 outputShape.add_dim(0);
419 outputShape.set_dim(0, inputShape.dim(0));
420 outputShape.add_dim(1);
421 // Ensure that dimension 1 of the convolution output is split according to the number of groups.
422 outputShape.set_dim(1, numFilters / numGroups);
423 outputShape.add_dim(2);
425 2, (static_cast<int>(static_cast<float>(inputShape.dim(2) + 2 * padH - kernelH) /
426 boost::numeric_cast<float>(strideH)) + 1));
427 outputShape.add_dim(3);
429 3, (static_cast<int>(static_cast<float>(inputShape.dim(3) + 2 * padW - kernelW) /
430 boost::numeric_cast<float>(strideW)) + 1));
432 // Load the weight data for ALL groups
433 vector<float> weightData(boost::numeric_cast<size_t>(numGroups * inputShape.dim(1) * outputShape.dim(1) *
435 GetDataFromBlob(layerParam, weightData, 0);
437 const unsigned int weightDimSizes[4] = {
438 static_cast<unsigned int>(outputShape.dim(1)), static_cast<unsigned int>(inputShape.dim(1)), kernelH, kernelW};
440 // Bias data - This defaults to true in Caffe
442 vector<float> biasData;
443 convolution2dDescriptor.m_BiasEnabled = convParam.has_bias_term() ? convParam.bias_term() : true;
444 if (convolution2dDescriptor.m_BiasEnabled)
446 biasData.resize(boost::numeric_cast<size_t>(numGroups * outputShape.dim(1)), 1.f);
447 GetDataFromBlob(layerParam, biasData, 1);
449 const unsigned int biasDimSizes[1] = {static_cast<unsigned int>(outputShape.dim(1))};
450 biasInfo = TensorInfo(1, biasDimSizes, DataType::Float32);
453 const unsigned int numWeightsPerGroup = boost::numeric_cast<unsigned int>(weightData.size()) / numGroups;
454 const unsigned int numBiasesPerGroup = boost::numeric_cast<unsigned int>(biasData.size()) / numGroups;
456 armnn::IConnectableLayer* returnLayer = nullptr;
458 for (unsigned int g = 0; g < numGroups; ++g)
460 // set the slot index, group 0 should be connected to the 0th output of the splitter
461 // group 1 should be connected to the 1st output of the splitter
463 // Pull out the weights for this group from that loaded from the model file earlier
464 ConstTensor weights(TensorInfo(4, weightDimSizes, DataType::Float32),
465 weightData.data() + numWeightsPerGroup * g);
467 IConnectableLayer* convLayer = nullptr;
468 if (convolution2dDescriptor.m_BiasEnabled)
470 // Pull out the biases for this group from that loaded from the model file earlier
471 ConstTensor biases(biasInfo, biasData.data() + numBiasesPerGroup * g);
473 convLayer = m_Network->AddConvolution2dLayer(convolution2dDescriptor,
474 weights, biases, convLayerNames[g].c_str());
478 convLayer = m_Network->AddConvolution2dLayer(convolution2dDescriptor,
479 weights, convLayerNames[g].c_str());
481 convLayers[g] = convLayer;
483 // If we have more than one group then the input to the nth convolution the splitter layer's nth output,
484 // otherwise it's the regular input to this layer.
485 armnn::IOutputSlot& splitterInputConnection = splitterLayer ? splitterLayer->GetOutputSlot(g) : inputConnection;
486 splitterInputConnection.Connect(convLayer->GetInputSlot(0));
487 convLayer->GetOutputSlot(0).SetTensorInfo(BlobShapeToTensorInfo(outputShape));
489 returnLayer = convLayer;
494 // If the convolution was performed in chunks, add a layer to merge the results
496 // The merge input shape matches that of the convolution output
497 unsigned int mergeDimSizes[4] = {static_cast<unsigned int>(outputShape.dim(0)),
498 static_cast<unsigned int>(outputShape.dim(1)),
499 static_cast<unsigned int>(outputShape.dim(2)),
500 static_cast<unsigned int>(outputShape.dim(3))};
502 // This is used to describe how the input is to be merged
503 OriginsDescriptor mergeDesc(numGroups);
505 // Now create an input node for each group, using the name from
506 // the output of the corresponding convolution
507 for (unsigned int g = 0; g < numGroups; ++g)
509 mergeDesc.SetViewOriginCoord(g, 1, mergeDimSizes[1] * g);
512 // Make sure the output from the merge is the correct size to hold the data for all groups
513 mergeDimSizes[1] *= numGroups;
514 outputShape.set_dim(1, mergeDimSizes[1]);
516 // The merge layer just assumes the name of the original convolution
517 // layer so the following layer connection "just works"
518 const string mergeOutputName = layerParam.name();
520 // Finally add the merge layer
521 IConnectableLayer* layer = m_Network->AddMergerLayer(mergeDesc, mergeOutputName.c_str());
523 for (unsigned int g = 0; g < numGroups; ++g)
525 convLayers[g]->GetOutputSlot(0).Connect(layer->GetInputSlot(g));
527 layer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(4, mergeDimSizes, DataType::Float32));
534 throw ParseException("Loading Convolution Layer: invalid return layer");
537 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), returnLayer->GetOutputSlot(0));
540 void CaffeParser::ParsePoolingLayer(const LayerParameter& layerParam)
542 ValidateNumInputsOutputs(layerParam, 1, 1);
544 PoolingParameter param = layerParam.pooling_param();
546 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
549 unsigned int kernel_h = 0;
550 unsigned int kernel_w = 0;
551 if (param.has_kernel_h() && param.has_kernel_w())
553 kernel_h = param.kernel_h();
554 kernel_w = param.kernel_w();
556 else if (param.kernel_size() > 0)
558 kernel_h = param.kernel_size();
559 kernel_w = param.kernel_size();
561 else if (param.has_global_pooling())
563 kernel_h = inputInfo.GetShape()[2];
564 kernel_w = inputInfo.GetShape()[3];
568 throw ParseException("Loading Pooling Layer: Kernel Size defined Illegally");
571 if (!IsInRange(kernel_h, 0, 11) || !IsInRange(kernel_w, 0, 11) || (kernel_h != kernel_w))
573 throw ParseException(boost::str(
574 boost::format("Loading Pooling Layer: kernel has invalid size: %1% x %2%") % kernel_h % kernel_w));
578 // Default to a valid value for the case of global pooling (where the strides don't have to be explicitly set)
579 unsigned int stride_h = 1;
580 unsigned int stride_w = 1;
581 if (param.has_stride_h() && param.has_stride_w())
583 stride_h = param.stride_h();
584 stride_w = param.stride_w();
586 else if (param.has_stride())
588 stride_h = param.stride();
589 stride_w = param.stride();
591 else if (!param.has_global_pooling())
593 throw ParseException("Loading Pooling Layer: Stride Size defined Illegally");
596 if (!IsInRange(stride_h, 0, 11) || !IsInRange(stride_w, 0, 11) || (stride_h != stride_w))
598 throw ParseException("Loading Pooling Layer: stride has invalid size");
602 unsigned int pad_h = 0;
603 unsigned int pad_w = 0;
604 if (param.has_pad_h() && param.has_pad_w())
606 pad_h = param.pad_h();
607 pad_w = param.pad_w();
609 else if (param.has_pad())
620 if (!IsInRange(pad_h, 0, 11) || !IsInRange(pad_w, 0, 11) || (pad_h != pad_w))
622 throw ParseException("Loading Pooling Layer: pad has invalid size");
625 // Ignored Caffe Parameters
626 // Stochastic Pooling
629 // Populate Weight and Bias Filter Descriptor
630 Pooling2dDescriptor pooling2dDescriptor;
631 if (param.has_pool())
633 PoolingParameter_PoolMethod p = param.pool();
636 case PoolingParameter_PoolMethod_MAX:
638 pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Max;
641 case PoolingParameter_PoolMethod_AVE:
643 pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average;
646 case PoolingParameter_PoolMethod_STOCHASTIC:
648 throw ParseException("Loading Pooling Layer: Stochastic Pooling Not Supported");
652 throw ParseException("Loading Pooling Layer: Mode Not Supported");
658 throw ParseException("Loading Pooling Layer: No Pooling Method Defined");
661 pooling2dDescriptor.m_PadLeft = pad_w;
662 pooling2dDescriptor.m_PadRight = pad_w;
663 pooling2dDescriptor.m_PadTop = pad_h;
664 pooling2dDescriptor.m_PadBottom = pad_h;
665 pooling2dDescriptor.m_StrideX = stride_w;
666 pooling2dDescriptor.m_StrideY = stride_h;
667 pooling2dDescriptor.m_PoolWidth = kernel_w;
668 pooling2dDescriptor.m_PoolHeight = kernel_h;
670 pooling2dDescriptor.m_OutputShapeRounding = OutputShapeRounding::Ceiling;
671 pooling2dDescriptor.m_PaddingMethod = PaddingMethod::IgnoreValue;
673 armnn::IConnectableLayer* poolingLayer = m_Network->AddPooling2dLayer(pooling2dDescriptor,
674 layerParam.name().c_str());
677 TensorInfo outputInfo(
678 { inputInfo.GetShape()[0],
679 inputInfo.GetShape()[1],
680 static_cast<unsigned int>(ceil(
681 static_cast<float>(inputInfo.GetShape()[2] + 2 * pad_h - kernel_h) /
682 boost::numeric_cast<float>(stride_h))) + 1,
683 static_cast<unsigned int>(ceil(
684 static_cast<float>(inputInfo.GetShape()[3] + 2 * pad_w - kernel_w) /
685 boost::numeric_cast<float>(stride_w))) + 1 },
688 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(poolingLayer->GetInputSlot(0));
689 poolingLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
690 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), poolingLayer->GetOutputSlot(0));
693 void CaffeParser::ParseReluLayer(const LayerParameter& layerParam)
695 ValidateNumInputsOutputs(layerParam, 1, 1);
697 const string& name = layerParam.name();
698 const ReLUParameter& param = layerParam.relu_param();
700 ActivationDescriptor activationDescriptor;
701 const float negativeSlope = param.negative_slope();
702 if (negativeSlope == 0.0f)
704 activationDescriptor.m_Function = ActivationFunction::ReLu;
708 activationDescriptor.m_Function = ActivationFunction::LeakyReLu;
709 activationDescriptor.m_A = negativeSlope;
712 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
713 IConnectableLayer* const activationLayer = m_Network->AddActivationLayer(activationDescriptor, name.c_str());
714 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(activationLayer->GetInputSlot(0));
715 activationLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
716 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), activationLayer->GetOutputSlot(0));
719 void CaffeParser::ParseLRNLayer(const LayerParameter& layerParam)
721 ValidateNumInputsOutputs(layerParam, 1, 1);
723 LRNParameter param = layerParam.lrn_param();
725 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
727 // Ignored BATCH NORMALIZATION Caffe Parameters
728 // Ignored MVN Caffe Parameters
729 // Ignored LRN Caffe Parameters
732 NormalizationDescriptor normalizationDescriptor;
733 if (param.has_norm_region())
735 LRNParameter_NormRegion n = param.norm_region();
738 case LRNParameter_NormRegion_ACROSS_CHANNELS:
740 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
743 case LRNParameter_NormRegion_WITHIN_CHANNEL:
745 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Within;
749 throw ParseException("Loading LRN Layer: Mode Not Supported");
754 // Caffe defaults to normalization across channels
755 normalizationDescriptor.m_NormChannelType = NormalizationAlgorithmChannel::Across;
758 normalizationDescriptor.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
759 if (param.has_local_size())
761 normalizationDescriptor.m_NormSize = param.local_size();
765 throw ParseException("Loading LRN Layer: Local_size not defined");
768 if (param.has_alpha())
770 normalizationDescriptor.m_Alpha = param.alpha();
771 normalizationDescriptor.m_Alpha /= boost::numeric_cast<float>(param.local_size());
775 throw ParseException("Loading LRN Layer: Alpha not defined");
777 if (param.has_beta())
779 normalizationDescriptor.m_Beta = param.beta();
783 throw ParseException("Loading LRN Layer: Beta not defined");
787 normalizationDescriptor.m_K = param.k();
790 normalizationDescriptor.m_K = 1;
792 IConnectableLayer* const normLayer = m_Network->AddNormalizationLayer(normalizationDescriptor,
793 layerParam.name().c_str());
794 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(normLayer->GetInputSlot(0));
795 normLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
797 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), normLayer->GetOutputSlot(0));
800 void CaffeParser::ParseInnerProductLayer(const LayerParameter& layerParam)
802 InnerProductParameter param = layerParam.inner_product_param();
804 ValidateNumInputsOutputs(layerParam, 1, 1);
806 unsigned int outputSize = param.num_output();
808 // Ignored Caffe Parameters
814 FullyConnectedDescriptor tensorFullyConnectedDescriptor;
816 if (param.has_transpose())
818 // If true assume transposed weights
819 tensorFullyConnectedDescriptor.m_TransposeWeightMatrix = param.transpose();
823 // caffe defaults to transposed
824 tensorFullyConnectedDescriptor.m_TransposeWeightMatrix = true;
827 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
829 TensorInfo weightInfo;
832 // allow implicit flattening of extra dimensions
833 unsigned int inputSize = inputInfo.GetShape()[1];
834 for (unsigned int i = 2; i < inputInfo.GetNumDimensions(); ++i)
836 inputSize *= inputInfo.GetShape()[i];
839 vector<float> weightData(inputSize * outputSize);
841 GetDataFromBlob(layerParam, weightData, 0);
842 const unsigned int swTD[2] = { outputSize, inputSize };
843 ConstTensor weights(TensorInfo(2, swTD, DataType::Float32), weightData);
845 tensorFullyConnectedDescriptor.m_BiasEnabled = true;
846 // Todo: check whether bias enabled
847 armnn::IConnectableLayer* fullyConnectedLayer = nullptr;
848 if (tensorFullyConnectedDescriptor.m_BiasEnabled)
851 vector<float> biasData(outputSize);
853 GetDataFromBlob(layerParam, biasData, 1);
855 const unsigned int sbTD[1] = { outputSize };
857 ConstTensor biases(TensorInfo(1, sbTD, DataType::Float32), biasData);
859 fullyConnectedLayer = m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor, weights, biases,
860 layerParam.name().c_str());
864 fullyConnectedLayer = m_Network->AddFullyConnectedLayer(tensorFullyConnectedDescriptor, weights,
865 layerParam.name().c_str());
868 TensorInfo outputInfo({ inputInfo.GetShape()[0], outputSize }, DataType::Float32);
869 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(fullyConnectedLayer->GetInputSlot(0));
870 fullyConnectedLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
871 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), fullyConnectedLayer->GetOutputSlot(0));
874 void CaffeParser::ParseSoftmaxLayer(const LayerParameter& layerParam)
876 ValidateNumInputsOutputs(layerParam, 1, 1);
878 SoftmaxParameter param = layerParam.softmax_param();
880 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
882 // Ignored Caffe Parameters
886 armnn::SoftmaxDescriptor softmaxDescriptor;
887 armnn::IConnectableLayer* const softmaxLayer = m_Network->AddSoftmaxLayer(
889 layerParam.name().c_str());
890 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(softmaxLayer->GetInputSlot(0));
891 softmaxLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
892 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), softmaxLayer->GetOutputSlot(0));
895 void CaffeParser::ParseEltwiseLayer(const LayerParameter& layerParam)
897 ValidateNumInputsOutputs(layerParam, 2, 1);
899 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
901 // Ignored Caffe Parameters
904 EltwiseParameter_EltwiseOp operation = EltwiseParameter_EltwiseOp_SUM; // default to sum as per caffe
906 if (layerParam.has_eltwise_param() && layerParam.eltwise_param().has_operation())
908 operation = layerParam.eltwise_param().operation();
911 armnn::IConnectableLayer* newLayer = nullptr;
914 case EltwiseParameter_EltwiseOp_SUM:
916 newLayer = m_Network->AddAdditionLayer(layerParam.name().c_str());
919 case EltwiseParameter_EltwiseOp_PROD:
921 newLayer = m_Network->AddMultiplicationLayer(layerParam.name().c_str());
926 throw ParseException("Unsupported operation in Eltwise layer");
930 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(newLayer->GetInputSlot(0));
931 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(1)).Connect(newLayer->GetInputSlot(1));
932 newLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
933 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), newLayer->GetOutputSlot(0));
936 void CaffeParser::ParseConcatLayer(const LayerParameter& layerParam)
938 unsigned int numInputs = static_cast<unsigned int>(layerParam.bottom_size());
939 // we assume concat happens along the channel dimension, which is 1 in (0, 1, 2, 3)
940 unsigned int concatDim = 1;
941 unsigned int numOfDims = 4;
943 OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numInputs), numOfDims);// we only consider 4-D tensor here
944 std::vector<unsigned int>mergeDimSizes(numOfDims, 0u);
946 unsigned int mergeDim = 0;
947 for (unsigned int viewIndex = 0; viewIndex < numInputs; ++viewIndex)
949 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(
950 layerParam.bottom(boost::numeric_cast<int>(viewIndex))).GetTensorInfo();
951 // Check whether the dimensions of the input tensors are actually 4
952 if (inputInfo.GetNumDimensions()!=4)
954 throw ParseException("The number of dimensions for input tensors of the concatenation op should be 4.");
957 mergeDimSizes[0] = inputInfo.GetShape()[0];
958 mergeDimSizes[1] = inputInfo.GetShape()[1];
959 mergeDimSizes[2] = inputInfo.GetShape()[2];
960 mergeDimSizes[3] = inputInfo.GetShape()[3];
962 for (unsigned int j = 0; j < concatDim; ++j)
964 concatDescriptor.SetViewOriginCoord(viewIndex, j, 0);
967 concatDescriptor.SetViewOriginCoord(viewIndex, concatDim, mergeDim);
968 mergeDim += mergeDimSizes[concatDim];
970 for (unsigned int j = concatDim+1; j < numOfDims; ++j)
972 concatDescriptor.SetViewOriginCoord(viewIndex, j, 0);
975 mergeDimSizes[concatDim] = mergeDim;
977 armnn::IConnectableLayer *concatlayer = m_Network->AddMergerLayer(concatDescriptor, layerParam.name().c_str());
978 for (unsigned int i = 0; i < numInputs; ++i)
980 armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(boost::numeric_cast<int>(i)));
981 outputSlot.Connect(concatlayer->GetInputSlot(i));
984 concatlayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(numOfDims, mergeDimSizes.data(), DataType::Float32));
985 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), concatlayer->GetOutputSlot(0));
988 void CaffeParser::ParseBatchNormLayer(const LayerParameter& layerParam)
990 ValidateNumInputsOutputs(layerParam, 1, 1);
992 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
994 string name = layerParam.name();
996 BatchNormParameter param = layerParam.batch_norm_param();
997 // If use_global_stats is not explicitly set in the model, assume it to be true (its default value
998 // when the network is in the testing phase).
999 if (param.has_use_global_stats())
1001 if (!param.use_global_stats())
1003 throw ParseException(boost::str(boost::format("Error parsing Batch Norm layer '%1%': "
1004 "Parameter 'use_global_stats' is set to false, which is unsupported (value used for training).")
1009 BatchNormalizationDescriptor desc;
1010 desc.m_Eps = param.eps();
1012 unsigned int channels = inputInfo.GetShape()[1];
1013 unsigned int shape[] = {channels};
1015 vector<float> meanData(channels);
1016 GetDataFromBlob(layerParam, meanData, 0);
1018 vector<float> varianceData(channels);
1019 GetDataFromBlob(layerParam, varianceData, 1);
1021 // read moving average factor and apply scaling (if required)
1022 const BlobProto& blob = layerParam.blobs(boost::numeric_cast<int>(2));
1023 const float movingAverageFactor = blob.data(boost::numeric_cast<int>(0));
1024 if(movingAverageFactor != 0.0f)
1026 const float scaleFactor = 1.0f / movingAverageFactor;
1027 auto scaleFunction = [scaleFactor](float f) -> float { return f * scaleFactor; };
1029 std::transform(varianceData.begin(), varianceData.end(), varianceData.begin(), scaleFunction);
1030 std::transform(meanData.begin(), meanData.end(), meanData.begin(), scaleFunction);
1033 // identity scale operation
1034 vector<float> betaData(channels, 0.0f);
1035 vector<float> gammaData(channels, 1.0f);
1037 ConstTensor mean(TensorInfo(1, shape, armnn::DataType::Float32), meanData);
1038 ConstTensor variance(TensorInfo(1, shape, armnn::DataType::Float32), varianceData);
1039 ConstTensor beta(TensorInfo(1, shape, armnn::DataType::Float32), betaData);
1040 ConstTensor gamma(TensorInfo(1, shape, armnn::DataType::Float32), gammaData);
1042 armnn::IConnectableLayer* const batchNormLayer = m_Network->AddBatchNormalizationLayer(desc,
1043 mean, variance, beta, gamma, name.c_str());
1044 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(batchNormLayer->GetInputSlot(0));
1045 batchNormLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
1046 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), batchNormLayer->GetOutputSlot(0));
1049 void CaffeParser::ParseScaleLayer(const LayerParameter& layerParam)
1051 // current unoptimal solution: add a batchnormalization layer with 0 mean and 1 variance
1052 ValidateNumInputsOutputs(layerParam, 1, 1);
1054 const TensorInfo& inputInfo = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).GetTensorInfo();
1056 string name = layerParam.name();
1058 ScaleParameter param = layerParam.scale_param();
1059 if (param.axis() != 1)
1061 // Would have to use something other than BatchNormalizationLayer in this case
1062 throw ParseException("Loading Scale Layer: Only axis 1 supported currently");
1065 unsigned int channels = inputInfo.GetShape()[1];
1066 unsigned int shape[] = {channels};
1068 BatchNormalizationDescriptor desc;
1069 desc.m_Eps = 0.0f; // don't need epsilon if variance is 1
1070 vector<float> meanData(channels, 0.0f);
1071 vector<float> varianceData(channels, 1.0f);
1072 vector<float> betaData(channels, 0.0f);
1073 vector<float> gammaData(channels);
1075 GetDataFromBlob(layerParam, gammaData, 0);
1077 if(param.has_bias_term())
1079 GetDataFromBlob(layerParam, betaData, 1);
1082 ConstTensor mean(TensorInfo(1, shape, armnn::DataType::Float32), meanData);
1083 ConstTensor variance(TensorInfo(1, shape, armnn::DataType::Float32), varianceData);
1084 ConstTensor beta(TensorInfo(1, shape, armnn::DataType::Float32), betaData);
1085 ConstTensor gamma(TensorInfo(1, shape, armnn::DataType::Float32), gammaData);
1087 armnn::IConnectableLayer* const batchNormLayer = m_Network->AddBatchNormalizationLayer(desc,
1088 mean, variance, beta, gamma, name.c_str());
1089 GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)).Connect(batchNormLayer->GetInputSlot(0));
1090 batchNormLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
1091 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), batchNormLayer->GetOutputSlot(0));
1094 void CaffeParser::ParseSplitLayer(const caffe::LayerParameter& layerParam)
1096 // Used in caffe to duplicate memory - not necessary in armnn
1097 if (layerParam.bottom_size() != 1)
1099 throw ParseException("Split layer '" + layerParam.name() + "' should have exactly 1 bottom");
1101 armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0));
1102 for (int i = 0; i < layerParam.top_size(); i++)
1104 SetArmnnOutputSlotForCaffeTop(layerParam.top(i), outputSlot);
1108 void CaffeParser::ParseDropoutLayer(const caffe::LayerParameter& layerParam)
1110 // Ignored for inference so patch the single input to its single output
1111 if (layerParam.bottom_size() != 1 || layerParam.top_size() != 1)
1113 throw ParseException("Dropout layer '" + layerParam.name() + "' should have exactly 1 bottom and 1 top");
1115 SetArmnnOutputSlotForCaffeTop(layerParam.top(0), GetArmnnOutputSlotForCaffeTop(layerParam.bottom(0)));
1118 void CaffeParser::TrackInputBinding(armnn::IConnectableLayer* layer,
1119 armnn::LayerBindingId id,
1120 const armnn::TensorInfo& tensorInfo)
1122 return TrackBindingPoint(layer, id, tensorInfo, layer->GetName(), m_NetworkInputsBindingInfo);
1125 void CaffeParser::TrackOutputBinding(armnn::IConnectableLayer* layer,
1126 armnn::LayerBindingId id,
1127 const armnn::TensorInfo& tensorInfo)
1129 return TrackBindingPoint(layer, id, tensorInfo, layer->GetName(), m_NetworkOutputsBindingInfo);
1132 void CaffeParser::TrackBindingPoint(armnn::IConnectableLayer* layer,
1133 armnn::LayerBindingId id,
1134 const armnn::TensorInfo& tensorInfo,
1135 const char* bindingPointDesc,
1136 std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
1138 const std::string layerName = layer->GetName();
1139 auto it = nameToBindingInfo.find(layerName);
1140 if (it == nameToBindingInfo.end())
1142 nameToBindingInfo[layerName] = std::make_pair(id, tensorInfo);
1146 throw ParseException(boost::str(
1147 boost::format("Id %1% used by more than one %2% layer") % id % bindingPointDesc));
1151 armnn::IOutputSlot& CaffeParser::GetArmnnOutputSlotForCaffeTop(const std::string& caffeTopName) const
1153 auto it = m_ArmnnOutputSlotForCaffeTop.find(caffeTopName);
1154 if (it != m_ArmnnOutputSlotForCaffeTop.end())
1160 throw ParseException(boost::str(boost::format(
1161 "Could not find armnn output slot for Caffe top '%1%'") % caffeTopName));
1165 void CaffeParser::SetArmnnOutputSlotForCaffeTop(const std::string& caffeTopName, armnn::IOutputSlot& armnnOutputSlot)
1167 auto it = m_ArmnnOutputSlotForCaffeTop.find(caffeTopName);
1168 if (it == m_ArmnnOutputSlotForCaffeTop.end())
1170 m_ArmnnOutputSlotForCaffeTop[caffeTopName] = &armnnOutputSlot;
1174 throw ParseException("Attempting to add duplicate entry for Caffe top '" + caffeTopName + "'");
1178 void CaffeParser::ResolveInPlaceLayers(caffe::NetParameter& netParameter)
1180 // Find layers with the same top
1181 std::map<std::string, std::vector<caffe::LayerParameter*>> layersByTop;
1182 for (int layerIdx = 0; layerIdx < netParameter.layer_size(); ++layerIdx)
1184 caffe::LayerParameter& layer = *netParameter.mutable_layer(layerIdx);
1185 for (int i = 0; i < layer.top_size(); ++i)
1187 layersByTop[layer.top(i)].push_back(&layer);
1191 // For each set of layers with the same top, resolve them to a linear chain rather than in-place layers.
1192 // Note that for 'regular' layers, there will be a single layer in each group and so this will be a no-op.
1193 for (auto layersWithSameTopIt : layersByTop)
1195 const std::string& top = layersWithSameTopIt.first;
1196 const std::vector<caffe::LayerParameter*>& layersWithSameTop = layersWithSameTopIt.second;
1198 // Chain the layers together in the order that they are listed in the prototxt (hopefully this is correct).
1199 // Note that the last layer will not have its top modified so that other layers will continue to reference it.
1200 for (unsigned int layerIdx = 0; layerIdx < layersWithSameTop.size() - 1; ++layerIdx)
1202 caffe::LayerParameter& layer1 = *layersWithSameTop[layerIdx];
1203 caffe::LayerParameter& layer2 = *layersWithSameTop[layerIdx+1];
1204 if (layer1.top_size() != 1)
1206 throw ParseException("Node '" + layer1.name() + "' is an in-place layer but "
1207 "doesn't have exactly one top.");
1209 std::string newTop = layer1.name() + "_top";
1210 layer1.set_top(0, newTop);
1211 if (layer2.bottom_size() != 1 || layer2.bottom(0) != top)
1213 throw ParseException("Node '" + layer2.name() + "' is an in-place layer but "
1214 " doesn't have exactly one bottom, or it doesn't match its top.");
1216 layer2.set_bottom(0, newTop);
1221 void CaffeParser::LoadNetParam(NetParameter& netParameter)
1223 // caffe models sometimes have an implicit input layer.
1224 // in that case, add an explicit one
1225 if (netParameter.input_size() > 0)
1227 LayerParameter* newLayer = netParameter.add_layer();
1229 newLayer->set_type("Input");
1230 newLayer->set_name(netParameter.input(0));
1231 newLayer->add_top(netParameter.input(0));
1233 InputParameter* inputParam = newLayer->mutable_input_param();
1234 BlobShape* shape = inputParam->add_shape();
1236 int dim_size = netParameter.input_dim_size();
1237 for (int i = 0; i < dim_size; ++i)
1239 shape->add_dim(netParameter.input_dim(i));
1243 // Replace in-place layers with regular ones to make the rest of the parsing easier.
1244 ResolveInPlaceLayers(netParameter);
1246 // Create a lookup of Caffe layers by name
1247 for (int i = 0; i < netParameter.layer_size(); ++i)
1249 const caffe::LayerParameter& layer = netParameter.layer(i);
1250 for (int i = 0; i < layer.top_size(); ++i)
1252 m_CaffeLayersByTopName[layer.top(i)] = &layer;
1256 // Find the output layers the user requested
1257 std::vector<const caffe::LayerParameter*> targetLayers;
1258 for (const std::string& requestedOutputName : m_RequestedOutputs)
1260 auto nodeIt = m_CaffeLayersByTopName.find(requestedOutputName);
1261 if (nodeIt == m_CaffeLayersByTopName.end())
1263 throw ParseException("Couldn't find requested output layer '" + requestedOutputName + "' in graph");
1265 targetLayers.push_back(nodeIt->second);
1268 // Sort them into a linear ordering such that all inputs of a node are before the node itself
1269 std::vector<const caffe::LayerParameter*> sortedNodes;
1270 if (!armnnUtils::GraphTopologicalSort<const caffe::LayerParameter*>(
1272 [this](const caffe::LayerParameter* node)
1274 return GetInputs(*node);
1278 throw ParseException("Cycle detected in graph");
1281 // Parse each node in order, knowing that all inputs of a node will be processed before the node itself
1282 for (const caffe::LayerParameter* current : sortedNodes)
1284 auto it = ms_CaffeLayerNameToParsingFunctions.find(current->type());
1285 if (it == ms_CaffeLayerNameToParsingFunctions.end())
1287 throw ParseException("Unsupported layer type '" + current->type() + "'");
1289 auto func = it->second;
1290 (this->*func)(*current);
1293 // Add ArmNN output layers connected to each requested output
1294 for (const std::string& requestedOutput : m_RequestedOutputs)
1296 armnn::IOutputSlot& outputSlot = GetArmnnOutputSlotForCaffeTop(requestedOutput);
1298 const armnn::LayerBindingId outputId = boost::numeric_cast<armnn::LayerBindingId>(
1299 m_NetworkOutputsBindingInfo.size());
1300 armnn::IConnectableLayer* const outputLayer = m_Network->AddOutputLayer(outputId, requestedOutput.c_str());
1301 outputSlot.Connect(outputLayer->GetInputSlot(0));
1303 TrackOutputBinding(outputLayer, outputId, outputLayer->GetInputSlot(0).GetConnection()->GetTensorInfo());
1307 INetworkPtr CaffeParser::CreateNetworkFromTextFile(const char* graphFile,
1308 const std::map<std::string, armnn::TensorShape>& inputShapes,
1309 const std::vector<std::string>& requestedOutputs)
1311 FILE* fd = fopen(graphFile, "r");
1315 std::stringstream error;
1316 error << "Graph file " << graphFile << " failed to open";
1317 throw FileNotFoundException(error.str());
1320 // Parse the file into a message
1321 NetParameter netParam;
1322 auto input = new google::protobuf::io::FileInputStream(fileno(fd));
1323 bool success = google::protobuf::TextFormat::Parse(input, &netParam);
1329 std::stringstream error;
1330 error << "Failed to parse graph file";
1331 throw ParseException(error.str());
1334 return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs);
1337 INetworkPtr CaffeParser::CreateNetworkFromString(const char* protoText,
1338 const std::map<std::string, armnn::TensorShape>& inputShapes,
1339 const std::vector<std::string>& requestedOutputs)
1341 // Parse the string into a message
1342 NetParameter netParam;
1343 bool success = google::protobuf::TextFormat::ParseFromString(protoText, &netParam);
1347 std::stringstream error;
1348 error << "Failed to parse graph string";
1349 throw ParseException(error.str());
1352 return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs);
1355 INetworkPtr CaffeParser::CreateNetworkFromBinaryFile(const char* graphFile,
1356 const std::map<std::string, armnn::TensorShape>& inputShapes,
1357 const std::vector<std::string>& requestedOutputs)
1359 FILE* fd = fopen(graphFile, "rb");
1363 std::stringstream error;
1364 error << "Graph file " << graphFile << " failed to open";
1365 throw FileNotFoundException(error.str());
1368 // Parse the file into a message
1369 NetParameter netParam;
1371 FileInputStream inStream(fileno(fd));
1372 CodedInputStream codedStream(&inStream);
1373 codedStream.SetTotalBytesLimit(INT_MAX, INT_MAX);
1374 bool success = netParam.ParseFromCodedStream(&codedStream);
1379 std::stringstream error;
1380 error << "Failed to parse protobuf file" << graphFile;
1381 throw ParseException(error.str());
1384 return CreateNetworkFromNetParameter(netParam, inputShapes, requestedOutputs);
1387 INetworkPtr CaffeParser::CreateNetworkFromNetParameter(NetParameter& netParam,
1388 const std::map<std::string, armnn::TensorShape>& inputShapes,
1389 const std::vector<std::string>& requestedOutputs)
1391 m_NetworkInputsBindingInfo.clear();
1392 m_NetworkOutputsBindingInfo.clear();
1394 m_Network = INetwork::Create();
1396 m_InputShapes = inputShapes;
1397 if (requestedOutputs.size() == 0)
1399 throw ParseException("requestedOutputs must have at least one entry");
1401 m_RequestedOutputs = requestedOutputs;
1405 LoadNetParam(netParam);
1407 catch (const ParseException& e)
1415 return move(m_Network);
1418 void CaffeParser::Cleanup()
1420 // cleanup, in case we reuse this parser
1421 m_CaffeLayersByTopName.clear();
1422 m_InputShapes.clear();
1423 m_RequestedOutputs.clear();
1424 m_ArmnnOutputSlotForCaffeTop.clear();