{
class ILayerVisitor
{
+protected:
+ ILayerVisitor() {}
+ virtual ~ILayerVisitor() {}
+
public:
/// Function that an InputLayer should call back to when its Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
const ConstTensor& weights,
const char* name = nullptr) = 0;
+ /// Function that a Detection PostProcess layer should call back to when its
+ /// Accept(ILayerVisitor&) function is invoked.
+ /// @param layer - pointer to the layer which is calling back to this visit function.
+ /// @param descriptor - Description of the Detection PostProcess layer.
+ /// @param name - Optional name for the layer.
+ virtual void VisitDetectionPostProcessLayer(const IConnectableLayer* layer,
+ const DetectionPostProcessDescriptor& descriptor,
+ const char* name = nullptr) = 0;
+
/// Function that a 2D depthwise convolution layer with biases should call back to when its
/// Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
#include <armnn/DescriptorsFwd.hpp>
#include <armnn/TensorFwd.hpp>
#include <armnn/Optional.hpp>
+#include <armnn/ILayerVisitor.hpp>
#include <armnn/Types.hpp>
virtual std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const = 0;
virtual LayerGuid GetGuid() const = 0;
+
+ virtual void Accept(ILayerVisitor& visitor) const = 0;
protected:
/// Objects are not deletable via the handle
~IConnectableLayer() {}
inferredShapes[0]);
}
+void ActivationLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitActivationLayer(this, GetParameters(), GetName());
+}
+
} // namespace armnn
/// Check if the input tensor shape(s) will lead to a valid configuration of @ref ActivationLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
+
protected:
/// Constructor to create an ActivationLayer.
/// @param [in] param ActivationDescriptor to configure the activation operation.
return CloneBase<AdditionLayer>(graph, GetName());
}
+void AdditionLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitAdditionLayer(this, GetName());
+}
+
} // namespace armnn
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
AdditionLayer* Clone(Graph& graph) const override;
+
+ void Accept(ILayerVisitor& visitor) const override;
protected:
/// Constructor to create an AdditionLayer.
return {m_Mean, m_Variance, m_Beta, m_Gamma};
}
+void BatchNormalizationLayer::Accept(ILayerVisitor& visitor) const
+{
+ ConstTensor dummy;
+ visitor.VisitBatchNormalizationLayer(this, GetParameters(), dummy, dummy, dummy, dummy);
+}
+
} // namespace armnn
/// will lead to a valid configuration of @ref BatchNormalizationLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a BatchNormalizationLayer.
/// @param [in] param BatchNormalizationDescriptor to configure the batch normalization operation.
return std::vector<TensorShape>({ TensorShape({ outputBatchSize, channelSize, outputHeight, outputWidth }) });
}
}
+
+void BatchToSpaceNdLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitBatchToSpaceNdLayer(this, GetParameters(), GetName());
+}
+
} // namespace armnn
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a BatchToSpaceNdLayer.
/// @param [in] param BatchToSpaceNdDescriptor to configure the BatchToSpaceNd operation.
outShape);
}
+void ConstantLayer::Accept(ILayerVisitor& visitor) const
+{
+ ConstTensor dummy;
+ visitor.VisitConstantLayer(this, dummy, GetName());
+}
+
} // namespace armnn
/// Free up the constant source data stored by the layer.
void ReleaseConstantData() override {};
+ void Accept(ILayerVisitor& visitor) const override;
+
std::unique_ptr<ScopedCpuTensorHandle> m_LayerOutput;
protected:
/// Constructor to create a ConstantLayer.
inferredShapes[0]);
}
+void ConvertFp16ToFp32Layer::Accept(ILayerVisitor& visitor) const
+{
+ // these conversion layers are only inserted by the
+ // optimizer and so will never be in an input graph.
+ throw armnn::Exception("ConvertFp16ToFp32Layer should never appear in an input graph");
+}
+
} // namespace armnn
/// will lead to a valid configuration of @ref ConvertFp16ToFp32Layer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a ConvertFp16ToFp32Layer.
/// @param [in] name Optional name for the layer.
inferredShapes[0]);
}
+void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const
+{
+ // These conversion layers are only inserted by the
+ // optimizer and so will never be in an input graph.
+ throw armnn::Exception("ConvertFp32ToFp16Layer should never appear in an input graph");
+}
+
} // namespace armnn
/// will lead to a valid configuration of @ref ConvertFp32ToFp16Layer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a ConvertFp32ToFp16Layer.
/// @param [in] name Optional name for the layer.
return {m_Weight, m_Bias};
}
+void Convolution2dLayer::Accept(ILayerVisitor& visitor) const
+{
+ ConstTensor dummy;
+ if (GetParameters().m_BiasEnabled)
+ {
+ visitor.VisitConvolution2dLayer(this, GetParameters(), dummy, dummy, GetName());
+ }
+ else
+ {
+ visitor.VisitConvolution2dLayer(this, GetParameters(), dummy, GetName());
+ }
+}
+
} // namespace armnn
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a Convolution2dLayer.
/// @param [in] param Convolution2dDescriptor to configure the convolution2d operation.
inferredShapes[0]);
}
+void DebugLayer::Accept(ILayerVisitor& visitor) const
+{
+ // by design debug layers are never in input graphs
+ throw armnn::Exception("DebugLayer should never appear in an input graph");
+}
+
} // namespace armnn
/// will lead to a valid configuration of @ref DebugLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a DebugLayer.
/// @param [in] param DebugDescriptor to configure the debug layer.
return {m_Weight, m_Bias};
}
+void DepthwiseConvolution2dLayer::Accept(ILayerVisitor& visitor) const
+{
+ ConstTensor dummy;
+ if (GetParameters().m_BiasEnabled)
+ {
+ visitor.VisitDepthwiseConvolution2dLayer(this, GetParameters(), dummy, dummy, GetName());
+ }
+ else
+ {
+ visitor.VisitDepthwiseConvolution2dLayer(this, GetParameters(), dummy, GetName());
+ }
+}
+
} // namespace armnn
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a DepthwiseConvolution2dLayer.
/// @param [in] param DepthwiseConvolution2dDescriptor to configure the depthwise convolution2d.
VerifyLayerConnections(2, CHECK_LOCATION());
}
+void DetectionPostProcessLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitDetectionPostProcessLayer(this, GetParameters(), GetName());
+}
+
} // namespace armnn
/// will lead to a valid configuration of @ref DetectionPostProcessLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a DetectionPostProcessLayer.
/// @param [in] param DetectionPostProcessDescriptor to configure the detection postprocess.
return CloneBase<DivisionLayer>(graph, GetName());
}
+void DivisionLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitDivisionLayer(this, GetName());
+}
+
} // namespace armnn
/// @param [in] graph The graph into which this layer is being cloned.
DivisionLayer* Clone(Graph& graph) const override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a DivisionLayer.
/// @param [in] name Optional name for the layer.
return CloneBase<EqualLayer>(graph, GetName());
}
+void EqualLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitEqualLayer(this, GetName());
+}
+
} // namespace armnn
/// @param [in] graph The graph into which this layer is being cloned.
EqualLayer* Clone(Graph& graph) const override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a EqualLayer.
/// @param [in] name Optional name for the layer.
inferredShapes[0]);
}
+void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const
+{
+ throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
+}
+
} // namespace armnn
/// will lead to a valid configuration of @ref FakeQuantizationLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a FakeQuantizationLayer.
/// @param [in] param FakeQuantizationDescriptor to configure the fake quantization operation.
inferredShapes[0]);
}
+void FloorLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitFloorLayer(this, GetName());
+}
+
} // namespace armnn
/// will lead to a valid configuration of @ref FloorLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a FloorLayer.
/// @param [in] name Optional name for the layer.
return {m_Weight, m_Bias};
}
+void FullyConnectedLayer::Accept(ILayerVisitor& visitor) const
+{
+ ConstTensor dummy;
+ if (GetParameters().m_BiasEnabled)
+ {
+ visitor.VisitFullyConnectedLayer(this, GetParameters(), dummy, dummy, GetName());
+ }
+ else
+ {
+ visitor.VisitFullyConnectedLayer(this, GetParameters(), dummy, GetName());
+ }
+}
+
} // namespace armnn
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a FullyConnectedLayer.
/// @param [in] param FullyConnectedDescriptor to configure the fully connected operation.
inferredShape);
}
+void GatherLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitGatherLayer(this, GetName());
+}
+
} // namespace armnn
/// will lead to a valid configuration of @ref GatherLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a GatherLayer.
/// @param [in] name Optional name for the layer.
return CloneBase<GreaterLayer>(graph, GetName());
}
+void GreaterLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitGreaterLayer(this, GetName());
+}
+
} // namespace armnn
/// @param [in] graph The graph into which this layer is being cloned.
GreaterLayer* Clone(Graph& graph) const override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a GreaterLayer.
/// @param [in] name Optional name for the layer.
"InputLayer should already have the TensorInfo set.");
}
+void InputLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitInputLayer(this, this->GetBindingId(), GetName());
+}
+
} // namespace
/// will lead to a valid configuration of @ref InputLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create an InputLayer.
/// @param id The layer binding id number.
inferredShapes[0]);
}
+void L2NormalizationLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitL2NormalizationLayer(this, GetParameters(), GetName());
+}
+
} // namespace armnn
/// will lead to a valid configuration of @ref L2NormalizationLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a L2NormalizationLayer.
/// @param [in] param L2NormalizationDescriptor to configure the L2 normalization operation.
m_PeepholeParameters.m_CellToOutputWeights};
}
+void LstmLayer::Accept(ILayerVisitor& visitor) const
+{
+ LstmInputParams dummy;
+ visitor.VisitLstmLayer(this, GetParameters(), dummy, GetName());
+}
+
} // namespace armnn
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a LstmLayer.
/// @param [in] param LstmDescriptor to configure the lstm operation.
return CloneBase<MaximumLayer>(graph, GetName());
}
+void MaximumLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitMaximumLayer(this, GetName());
+}
+
} // namespace armnn
/// @param [in] graph The graph into which this layer is being cloned.
MaximumLayer* Clone(Graph& graph) const override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a MaximumLayer.
/// @param [in] name Optional name for the layer.
inferredShape);
}
+void MeanLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitMeanLayer(this, GetParameters(), GetName());
+}
+
} // namespace armnn
/// will lead to a valid configuration of @ref MeanLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a MeanLayer.
/// @param [in] param MeanDescriptor to configure the mean operation.
inferredShapes[0]);
}
+void MemCopyLayer::Accept(ILayerVisitor& visitor) const
+{
+ throw armnn::Exception("MemCopyLayer should not appear in an input graph");
+}
+
} // namespace armnn
/// will lead to a valid configuration of @ref MemCopyLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a MemCopyLayer.
/// @param [in] name Optional name for the layer.
inferredShapes[0]);
}
+void MergerLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitMergerLayer(this, GetParameters(), GetName());
+}
+
} // namespace armnn armnn
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a MergerLayer.
/// @param [in] param OriginsDescriptor to configure the merger operation.
return CloneBase<MinimumLayer>(graph, GetName());
}
+void MinimumLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitMinimumLayer(this, GetName());
+}
+
} // namespace armnn
/// @param [in] graph The graph into which this layer is being cloned.
MinimumLayer* Clone(Graph& graph) const override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a MinimumLayer.
/// @param [in] name Optional name for the layer.
return CloneBase<MultiplicationLayer>(graph, GetName());
}
+void MultiplicationLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitMultiplicationLayer(this, GetName());
+}
+
} // namespace armnn
/// @param [in] graph The graph into which this layer is being cloned.
MultiplicationLayer* Clone(Graph& graph) const override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a MultiplicationLayer.
/// @param [in] name Optional name for the layer
inferredShapes[0]);
}
+void NormalizationLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitNormalizationLayer(this, GetParameters(), GetName());
+}
+
} // namespace armnn
/// will lead to a valid configuration of @ref NormalizationLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a NormalizationLayer.
/// @param [in] param NormalizationDescriptor to configure the normalization operation.
"OutputLayer: Input slot must be connected.");
}
+void OutputLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitOutputLayer(this, GetBindingId(), GetName());
+}
+
} // namespace armnn
/// will lead to a valid configuration of @ref OutputLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create an OutputLayer.
/// @param id The layer binding id number.
return;
}
+void PadLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitPadLayer(this, GetParameters(), GetName());
+}
+
} // namespace armnn
\ No newline at end of file
/// will lead to a valid configuration of @ref PadLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a PadLayer.
/// @param [in] param PadDescriptor to configure the pad operation.
inferredShapes[0]);
}
+void PermuteLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitPermuteLayer(this, GetParameters(), GetName());
+}
+
} // namespace armnn
GetPermutation().IsEqual(boost::polymorphic_downcast<const PermuteLayer*>(&other)->GetPermutation());
}
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a PermuteLayer.
/// @param [in] param PermuteDescriptor to configure the permute operation.
inferredShapes[0]);
}
+void Pooling2dLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitPooling2dLayer(this, GetParameters(), GetName());
+}
+
} // namespace armnn
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a Pooling2dLayer.
/// @param [in] param Pooling2dDescriptor to configure the pooling2d operation.
m_PreCompiledObject = preCompiledObject;
}
+void PreCompiledLayer::Accept(ILayerVisitor& visitor) const
+{
+ throw armnn::Exception("PreCompiledLayer should not appear in an input graph");
+}
+
} // namespace armnn
void SetPreCompiledObject(const std::shared_ptr<void>& preCompiledObject);
+ void Accept(ILayerVisitor& visitor) const override;
+
private:
PreCompiledLayer(const PreCompiledLayer& other) = delete;
PreCompiledLayer& operator=(const PreCompiledLayer& other) = delete;
inferredShapes[0]);
}
+void ReshapeLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitReshapeLayer(this, GetParameters(), GetName());
+}
+
} // namespace armnn
m_Param.m_TargetShape == boost::polymorphic_downcast<const ReshapeLayer*>(&other)->m_Param.m_TargetShape;
}
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a ReshapeLayer.
/// @param [in] param ReshapeDescriptor to configure the reshape operation.
inferredShapes[0]);
}
+void ResizeBilinearLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitResizeBilinearLayer(this, GetParameters(), GetName());
+}
+
} // namespace armnn
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a ResizeBilinearLayerLayer.
/// @param [in] param ResizeBilinearDescriptor to configure the resize bilinear operation.
inferredShapes[0]);
}
+void RsqrtLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitRsqrtLayer(this, GetName());
+}
+
} // namespace armnn
\ No newline at end of file
/// will lead to a valid configuration of @ref RsqrtLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create an RsqrtLayer.
/// @param [in] name Optional name for the layer.
inferredShapes[0]);
}
+void SoftmaxLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitSoftmaxLayer(this, GetParameters(), GetName());
+}
+
} // namespace armnn
/// will lead to a valid configuration of @ref SoftmaxLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a SoftmaxLayer.
/// @param [in] param SoftmaxDescriptor to configure the softmax operation.
inferredShapes[0]);
}
+void SpaceToBatchNdLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitSpaceToBatchNdLayer(this, GetParameters(), GetName());
+}
+
} // namespace
/// will lead to a valid configuration of @ref SpaceToBatchNdLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a SpaceToBatchNdLayer.
/// @param [in] param SpaceToBatchNdDescriptor to configure the SpaceToBatchNdLayer operation.
}
}
+void SplitterLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitSplitterLayer(this, GetParameters(), GetName());
+}
+
} // namespace armnn
/// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a SplitterLayer.
/// @param [in] param ViewsDescriptor to configure the splitter operation.
inferredShapes[0]);
}
+void StridedSliceLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitStridedSliceLayer(this, GetParameters(), GetName());
+}
+
} // namespace armnn
/// will lead to a valid configuration of @ref StridedSliceLayer.
void ValidateTensorShapesFromInputs() override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a StridedSliceLayer.
/// @param [in] param StridedSliceDescriptor to configure the strided slice layer.
return CloneBase<SubtractionLayer>(graph, GetName());
}
+void SubtractionLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitSubtractionLayer(this, GetName());
+}
+
} // namespace armnn
/// @param [in] graph The graph into which this layer is being cloned.
SubtractionLayer* Clone(Graph& graph) const override;
+ void Accept(ILayerVisitor& visitor) const override;
+
protected:
/// Constructor to create a SubtractionLayer.
/// @param [in] name Optional name for the layer.