From cdc0149ffe40f14ff4695149d9bdf551f8e07702 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Tue, 9 Jun 2020 18:00:20 +0100 Subject: [PATCH] IVGCVSW-4928 Introduce "ShapeInferenceMethod" Option. Signed-off-by: Teresa Charlin Change-Id: I70ef1a9f3cefa1d4cf9220f0e13131d11e7c6418 --- include/armnn/INetwork.hpp | 8 +++++++- include/armnn/Types.hpp | 16 ++++++++++++++++ src/armnn/Graph.cpp | 4 ++-- src/armnn/Graph.hpp | 2 +- src/armnn/Layer.hpp | 3 ++- src/armnn/layers/AbsLayer.cpp | 4 +++- src/armnn/layers/AbsLayer.hpp | 4 +++- src/armnn/layers/ActivationLayer.cpp | 4 +++- src/armnn/layers/ActivationLayer.hpp | 4 +++- src/armnn/layers/ArgMinMaxLayer.cpp | 4 +++- src/armnn/layers/ArgMinMaxLayer.hpp | 4 +++- src/armnn/layers/BatchNormalizationLayer.cpp | 4 +++- src/armnn/layers/BatchNormalizationLayer.hpp | 4 +++- src/armnn/layers/BatchToSpaceNdLayer.cpp | 4 +++- src/armnn/layers/BatchToSpaceNdLayer.hpp | 4 +++- src/armnn/layers/ComparisonLayer.cpp | 4 +++- src/armnn/layers/ComparisonLayer.hpp | 4 +++- src/armnn/layers/ConcatLayer.cpp | 4 +++- src/armnn/layers/ConcatLayer.hpp | 4 +++- src/armnn/layers/ConstantLayer.cpp | 4 +++- src/armnn/layers/ConstantLayer.hpp | 4 +++- src/armnn/layers/ConvertBf16ToFp32Layer.cpp | 4 +++- src/armnn/layers/ConvertBf16ToFp32Layer.hpp | 4 +++- src/armnn/layers/ConvertFp16ToFp32Layer.cpp | 4 +++- src/armnn/layers/ConvertFp16ToFp32Layer.hpp | 4 +++- src/armnn/layers/ConvertFp32ToBf16Layer.cpp | 4 +++- src/armnn/layers/ConvertFp32ToBf16Layer.hpp | 4 +++- src/armnn/layers/ConvertFp32ToFp16Layer.cpp | 4 +++- src/armnn/layers/ConvertFp32ToFp16Layer.hpp | 4 +++- src/armnn/layers/Convolution2dLayer.cpp | 4 +++- src/armnn/layers/Convolution2dLayer.hpp | 4 +++- src/armnn/layers/DebugLayer.cpp | 4 +++- src/armnn/layers/DebugLayer.hpp | 4 +++- src/armnn/layers/DepthToSpaceLayer.cpp | 4 +++- src/armnn/layers/DepthToSpaceLayer.hpp | 4 +++- src/armnn/layers/DepthwiseConvolution2dLayer.cpp | 4 +++- src/armnn/layers/DepthwiseConvolution2dLayer.hpp | 4 +++- src/armnn/layers/DequantizeLayer.cpp | 4 +++- src/armnn/layers/DequantizeLayer.hpp | 4 +++- src/armnn/layers/DetectionPostProcessLayer.cpp | 4 +++- src/armnn/layers/DetectionPostProcessLayer.hpp | 4 +++- src/armnn/layers/ElementwiseBaseLayer.cpp | 4 +++- src/armnn/layers/ElementwiseBaseLayer.hpp | 4 +++- src/armnn/layers/ElementwiseUnaryLayer.cpp | 4 +++- src/armnn/layers/ElementwiseUnaryLayer.hpp | 4 +++- src/armnn/layers/FakeQuantizationLayer.cpp | 4 +++- src/armnn/layers/FakeQuantizationLayer.hpp | 4 +++- src/armnn/layers/FillLayer.cpp | 4 +++- src/armnn/layers/FillLayer.hpp | 4 +++- src/armnn/layers/FloorLayer.cpp | 4 +++- src/armnn/layers/FloorLayer.hpp | 4 +++- src/armnn/layers/FullyConnectedLayer.cpp | 4 +++- src/armnn/layers/FullyConnectedLayer.hpp | 4 +++- src/armnn/layers/GatherLayer.cpp | 4 +++- src/armnn/layers/GatherLayer.hpp | 4 +++- src/armnn/layers/InputLayer.cpp | 4 +++- src/armnn/layers/InputLayer.hpp | 4 +++- src/armnn/layers/InstanceNormalizationLayer.cpp | 4 +++- src/armnn/layers/InstanceNormalizationLayer.hpp | 4 +++- src/armnn/layers/L2NormalizationLayer.cpp | 4 +++- src/armnn/layers/L2NormalizationLayer.hpp | 4 +++- src/armnn/layers/LogSoftmaxLayer.cpp | 4 +++- src/armnn/layers/LogSoftmaxLayer.hpp | 4 +++- src/armnn/layers/LstmLayer.cpp | 4 +++- src/armnn/layers/LstmLayer.hpp | 4 +++- src/armnn/layers/MeanLayer.cpp | 4 +++- src/armnn/layers/MeanLayer.hpp | 4 +++- src/armnn/layers/MemCopyLayer.cpp | 4 +++- src/armnn/layers/MemCopyLayer.hpp | 4 +++- src/armnn/layers/MemImportLayer.cpp | 4 +++- src/armnn/layers/MemImportLayer.hpp | 4 +++- src/armnn/layers/MergeLayer.cpp | 4 +++- src/armnn/layers/MergeLayer.hpp | 4 +++- src/armnn/layers/NormalizationLayer.cpp | 4 +++- src/armnn/layers/NormalizationLayer.hpp | 4 +++- src/armnn/layers/OutputLayer.cpp | 4 +++- src/armnn/layers/OutputLayer.hpp | 4 +++- src/armnn/layers/PadLayer.cpp | 4 +++- src/armnn/layers/PadLayer.hpp | 4 +++- src/armnn/layers/PermuteLayer.cpp | 4 +++- src/armnn/layers/PermuteLayer.hpp | 4 +++- src/armnn/layers/Pooling2dLayer.cpp | 4 +++- src/armnn/layers/Pooling2dLayer.hpp | 4 +++- src/armnn/layers/PreCompiledLayer.cpp | 4 +++- src/armnn/layers/PreCompiledLayer.hpp | 3 ++- src/armnn/layers/PreluLayer.cpp | 4 +++- src/armnn/layers/PreluLayer.hpp | 4 +++- src/armnn/layers/QLstmLayer.cpp | 4 +++- src/armnn/layers/QLstmLayer.hpp | 4 +++- src/armnn/layers/QuantizeLayer.cpp | 4 +++- src/armnn/layers/QuantizeLayer.hpp | 3 ++- src/armnn/layers/QuantizedLstmLayer.cpp | 4 +++- src/armnn/layers/QuantizedLstmLayer.hpp | 4 +++- src/armnn/layers/ReshapeLayer.cpp | 4 +++- src/armnn/layers/ReshapeLayer.hpp | 4 +++- src/armnn/layers/ResizeLayer.cpp | 4 +++- src/armnn/layers/ResizeLayer.hpp | 4 +++- src/armnn/layers/RsqrtLayer.cpp | 4 +++- src/armnn/layers/RsqrtLayer.hpp | 4 +++- src/armnn/layers/SliceLayer.cpp | 4 +++- src/armnn/layers/SliceLayer.hpp | 4 +++- src/armnn/layers/SoftmaxLayer.cpp | 4 +++- src/armnn/layers/SoftmaxLayer.hpp | 4 +++- src/armnn/layers/SpaceToBatchNdLayer.cpp | 4 +++- src/armnn/layers/SpaceToBatchNdLayer.hpp | 4 +++- src/armnn/layers/SpaceToDepthLayer.cpp | 4 +++- src/armnn/layers/SpaceToDepthLayer.hpp | 4 +++- src/armnn/layers/SplitterLayer.cpp | 4 +++- src/armnn/layers/SplitterLayer.hpp | 4 +++- src/armnn/layers/StackLayer.cpp | 4 +++- src/armnn/layers/StackLayer.hpp | 4 +++- src/armnn/layers/StandInLayer.cpp | 4 +++- src/armnn/layers/StandInLayer.hpp | 4 +++- src/armnn/layers/StridedSliceLayer.cpp | 4 +++- src/armnn/layers/StridedSliceLayer.hpp | 4 +++- src/armnn/layers/SwitchLayer.cpp | 4 +++- src/armnn/layers/SwitchLayer.hpp | 4 +++- src/armnn/layers/TransposeConvolution2dLayer.cpp | 4 +++- src/armnn/layers/TransposeConvolution2dLayer.hpp | 4 +++- src/armnn/layers/TransposeLayer.cpp | 4 +++- src/armnn/layers/TransposeLayer.hpp | 4 +++- 121 files changed, 374 insertions(+), 121 deletions(-) diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index ade6c52..49cd582 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -599,12 +599,15 @@ struct OptimizerOptions : m_ReduceFp32ToFp16(false) , m_Debug(false) , m_ReduceFp32ToBf16(false) + , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly) {} - OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false) + OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false, + ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly) : m_ReduceFp32ToFp16(reduceFp32ToFp16) , m_Debug(debug) , m_ReduceFp32ToBf16(reduceFp32ToBf16) + , m_shapeInferenceMethod(shapeInferenceMethod) { if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16) { @@ -620,6 +623,9 @@ struct OptimizerOptions // Reduce Fp32 data to Bf16 for faster processing bool m_ReduceFp32ToBf16; + + // Infer output size when not available + ShapeInferenceMethod m_shapeInferenceMethod; }; /// Create an optimized version of the network diff --git a/include/armnn/Types.hpp b/include/armnn/Types.hpp index 7c8a533..fb6f134 100644 --- a/include/armnn/Types.hpp +++ b/include/armnn/Types.hpp @@ -143,6 +143,22 @@ enum class OutputShapeRounding Ceiling = 1 }; +/// +/// The ShapeInferenceMethod modify how the output shapes are treated. +/// When ValidateOnly is selected, the output shapes are inferred from the input parameters of the layer +/// and any mismatch is reported. +/// When InferAndValidate is selected 2 actions must be performed: (1)infer output shape from inputs and (2)validate the +/// shapes as in ValidateOnly. This option has been added to work with tensors which rank or dimension sizes are not +/// specified explicitly, however this information can be calculated from the inputs. +/// +enum class ShapeInferenceMethod +{ + /// Validate all output shapes + ValidateOnly = 0, + /// Infer missing output shapes and validate all output shapes + InferAndValidate = 1 +}; + /// Each backend should implement an IBackend. class IBackend { diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp index bd0cb34..cc33847 100644 --- a/src/armnn/Graph.cpp +++ b/src/armnn/Graph.cpp @@ -489,7 +489,7 @@ void Graph::EraseSubgraphLayers(SubgraphView &subgraph) subgraph.Clear(); } -void Graph::InferTensorInfos() +void Graph::InferTensorInfos(ShapeInferenceMethod shapeInferenceMethod) { for (auto&& layer : TopologicalSort()) { @@ -512,7 +512,7 @@ void Graph::InferTensorInfos() throw LayerValidationException("All inputs must have the TensorInfo set at this point."); } } - layer->ValidateTensorShapesFromInputs(); + layer->ValidateTensorShapesFromInputs(shapeInferenceMethod); } } diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp index ae2d1ee..9673df4 100644 --- a/src/armnn/Graph.hpp +++ b/src/armnn/Graph.hpp @@ -200,7 +200,7 @@ public: void SubstituteSubgraph(SubgraphView& subgraph, IConnectableLayer* substituteLayer); void SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& substituteSubgraph); - void InferTensorInfos(); + void InferTensorInfos(ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly); void AttachObservable(IGraphObservable* const observable, GraphEvent notifyOnEvent) { m_Views[notifyOnEvent].emplace_back(observable); diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp index 5947523..303de05 100644 --- a/src/armnn/Layer.hpp +++ b/src/armnn/Layer.hpp @@ -277,7 +277,8 @@ public: void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const; - virtual void ValidateTensorShapesFromInputs() = 0; + virtual void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) = 0; std::vector InferOutputShapes(const std::vector& inputShapes) const override; diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp index 490b03e..6f71415 100644 --- a/src/armnn/layers/AbsLayer.cpp +++ b/src/armnn/layers/AbsLayer.cpp @@ -30,8 +30,10 @@ AbsLayer* AbsLayer::Clone(Graph& graph) const return CloneBase(graph, GetName()); } -void AbsLayer::ValidateTensorShapesFromInputs() +void AbsLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/AbsLayer.hpp b/src/armnn/layers/AbsLayer.hpp index 6dc55b4..ab31014 100644 --- a/src/armnn/layers/AbsLayer.hpp +++ b/src/armnn/layers/AbsLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref AbsLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp index d310b7e..1aed59b 100644 --- a/src/armnn/layers/ActivationLayer.cpp +++ b/src/armnn/layers/ActivationLayer.cpp @@ -28,8 +28,10 @@ ActivationLayer* ActivationLayer::Clone(Graph& graph) const return CloneBase(graph, m_Param, GetName()); } -void ActivationLayer::ValidateTensorShapesFromInputs() +void ActivationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/ActivationLayer.hpp b/src/armnn/layers/ActivationLayer.hpp index 46845e2..3f0d520 100644 --- a/src/armnn/layers/ActivationLayer.hpp +++ b/src/armnn/layers/ActivationLayer.hpp @@ -23,7 +23,9 @@ public: ActivationLayer* Clone(Graph& graph) const override; /// Check if the input tensor shape(s) will lead to a valid configuration of @ref ActivationLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp index a990787..288192f 100644 --- a/src/armnn/layers/ArgMinMaxLayer.cpp +++ b/src/armnn/layers/ArgMinMaxLayer.cpp @@ -69,8 +69,10 @@ std::vector ArgMinMaxLayer::InferOutputShapes(const std::vector({ outputShape }); } -void ArgMinMaxLayer::ValidateTensorShapesFromInputs() +void ArgMinMaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/ArgMinMaxLayer.hpp b/src/armnn/layers/ArgMinMaxLayer.hpp index 2d7d223..27cfb20 100644 --- a/src/armnn/layers/ArgMinMaxLayer.hpp +++ b/src/armnn/layers/ArgMinMaxLayer.hpp @@ -31,7 +31,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref ArgMinMaxLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp index 7f61cad..9fcc30c 100644 --- a/src/armnn/layers/BatchNormalizationLayer.cpp +++ b/src/armnn/layers/BatchNormalizationLayer.cpp @@ -48,8 +48,10 @@ BatchNormalizationLayer* BatchNormalizationLayer::Clone(Graph& graph) const return std::move(layer); } -void BatchNormalizationLayer::ValidateTensorShapesFromInputs() +void BatchNormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/BatchNormalizationLayer.hpp b/src/armnn/layers/BatchNormalizationLayer.hpp index 14e6a17..88db81f 100644 --- a/src/armnn/layers/BatchNormalizationLayer.hpp +++ b/src/armnn/layers/BatchNormalizationLayer.hpp @@ -36,7 +36,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref BatchNormalizationLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp index 1da88c6..8341b85 100644 --- a/src/armnn/layers/BatchToSpaceNdLayer.cpp +++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp @@ -41,8 +41,10 @@ BatchToSpaceNdLayer* BatchToSpaceNdLayer::Clone(Graph& graph) const return std::move(layer); } -void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs() +void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()}); diff --git a/src/armnn/layers/BatchToSpaceNdLayer.hpp b/src/armnn/layers/BatchToSpaceNdLayer.hpp index 5d568cb..b99dc36 100644 --- a/src/armnn/layers/BatchToSpaceNdLayer.hpp +++ b/src/armnn/layers/BatchToSpaceNdLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref BatchToSpaceNdLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// By default returns inputShapes if the number of inputs are equal to number of outputs, /// otherwise infers the output shapes from given input shapes and layer properties. diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp index 9108045..483d2e1 100644 --- a/src/armnn/layers/ComparisonLayer.cpp +++ b/src/armnn/layers/ComparisonLayer.cpp @@ -55,8 +55,10 @@ std::vector ComparisonLayer::InferOutputShapes(const std::vector({ TensorShape(numDims, dims.data()) }); } -void ComparisonLayer::ValidateTensorShapesFromInputs() +void ComparisonLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(2, CHECK_LOCATION()); std::vector inferredShapes = InferOutputShapes({ diff --git a/src/armnn/layers/ComparisonLayer.hpp b/src/armnn/layers/ComparisonLayer.hpp index edc66b6..e20bcdf 100644 --- a/src/armnn/layers/ComparisonLayer.hpp +++ b/src/armnn/layers/ComparisonLayer.hpp @@ -32,7 +32,9 @@ public: /// Check if the input tensor shape(s) will lead to a valid configuration /// of @ref ComparisonLayer - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp index b51303b..9a0672b 100644 --- a/src/armnn/layers/ConcatLayer.cpp +++ b/src/armnn/layers/ConcatLayer.cpp @@ -242,8 +242,10 @@ std::vector ConcatLayer::InferOutputShapes(const std::vector({ TensorShape({numDims, extentMax.data()}) }); } -void ConcatLayer::ValidateTensorShapesFromInputs() +void ConcatLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + // Validates Concat layer. ConditionalThrowIfNotEqual( "ConcatLayer: Num Inputs must match num views.", diff --git a/src/armnn/layers/ConcatLayer.hpp b/src/armnn/layers/ConcatLayer.hpp index 0d54008..5bb11ba 100644 --- a/src/armnn/layers/ConcatLayer.hpp +++ b/src/armnn/layers/ConcatLayer.hpp @@ -34,7 +34,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref ConcatLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// By default returns inputShapes if the number of inputs are equal to number of outputs, /// otherwise infers the output shapes from given input shapes and layer properties. diff --git a/src/armnn/layers/ConstantLayer.cpp b/src/armnn/layers/ConstantLayer.cpp index 136616c..d354acc 100644 --- a/src/armnn/layers/ConstantLayer.cpp +++ b/src/armnn/layers/ConstantLayer.cpp @@ -40,8 +40,10 @@ std::vector ConstantLayer::InferOutputShapes(const std::vector({ inputShapes[0] }); } -void ConstantLayer::ValidateTensorShapesFromInputs() +void ConstantLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + // Get the output shape from the value of the constant layer. TensorShape const& outShape = m_LayerOutput->GetTensorInfo().GetShape(); ConditionalThrowIfNotEqual( diff --git a/src/armnn/layers/ConstantLayer.hpp b/src/armnn/layers/ConstantLayer.hpp index 9525522..23183d2 100644 --- a/src/armnn/layers/ConstantLayer.hpp +++ b/src/armnn/layers/ConstantLayer.hpp @@ -27,7 +27,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref ConstantLayer - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// By default returns inputShapes if the number of inputs are equal to number of outputs, /// otherwise infers the output shapes from given input shapes and layer properties. diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp index 30d20b8..b53986a 100644 --- a/src/armnn/layers/ConvertBf16ToFp32Layer.cpp +++ b/src/armnn/layers/ConvertBf16ToFp32Layer.cpp @@ -30,8 +30,10 @@ ConvertBf16ToFp32Layer* ConvertBf16ToFp32Layer::Clone(Graph& graph) const return CloneBase(graph, GetName()); } -void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs() +void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp index b419e5c..136cfed 100644 --- a/src/armnn/layers/ConvertBf16ToFp32Layer.hpp +++ b/src/armnn/layers/ConvertBf16ToFp32Layer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref ConvertBf16ToFp32Layer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp index 08f0e4a..30f9e63 100644 --- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp +++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp @@ -30,8 +30,10 @@ ConvertFp16ToFp32Layer* ConvertFp16ToFp32Layer::Clone(Graph& graph) const return CloneBase(graph, GetName()); } -void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs() +void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp index e3b798b..c4ac13b 100644 --- a/src/armnn/layers/ConvertFp16ToFp32Layer.hpp +++ b/src/armnn/layers/ConvertFp16ToFp32Layer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref ConvertFp16ToFp32Layer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp index c9e0962..9f523ae 100644 --- a/src/armnn/layers/ConvertFp32ToBf16Layer.cpp +++ b/src/armnn/layers/ConvertFp32ToBf16Layer.cpp @@ -30,8 +30,10 @@ ConvertFp32ToBf16Layer* ConvertFp32ToBf16Layer::Clone(Graph& graph) const return CloneBase(graph, GetName()); } -void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs() +void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp index 225b033..096dc7e 100644 --- a/src/armnn/layers/ConvertFp32ToBf16Layer.hpp +++ b/src/armnn/layers/ConvertFp32ToBf16Layer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref ConvertFp32ToBf16Layer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp index 95403e9..7ff98ed 100644 --- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp +++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp @@ -29,8 +29,10 @@ ConvertFp32ToFp16Layer* ConvertFp32ToFp16Layer::Clone(Graph& graph) const return CloneBase(graph, GetName()); } -void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs() +void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp index 8bb28f8..c8a5055 100644 --- a/src/armnn/layers/ConvertFp32ToFp16Layer.hpp +++ b/src/armnn/layers/ConvertFp32ToFp16Layer.hpp @@ -24,7 +24,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref ConvertFp32ToFp16Layer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp index d82908a..462d355 100644 --- a/src/armnn/layers/Convolution2dLayer.cpp +++ b/src/armnn/layers/Convolution2dLayer.cpp @@ -112,8 +112,10 @@ std::vector Convolution2dLayer::InferOutputShapes(const std::vector return std::vector({ tensorShape }); } -void Convolution2dLayer::ValidateTensorShapesFromInputs() +void Convolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); // check if we m_Weight data is not nullptr diff --git a/src/armnn/layers/Convolution2dLayer.hpp b/src/armnn/layers/Convolution2dLayer.hpp index bd30826..e88b44d 100644 --- a/src/armnn/layers/Convolution2dLayer.hpp +++ b/src/armnn/layers/Convolution2dLayer.hpp @@ -33,7 +33,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref Convolution2dLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// By default returns inputShapes if the number of inputs are equal to number of outputs, /// otherwise infers the output shapes from given input shapes and layer properties. diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp index 6aaf945..3422de6 100644 --- a/src/armnn/layers/DebugLayer.cpp +++ b/src/armnn/layers/DebugLayer.cpp @@ -34,8 +34,10 @@ DebugLayer* DebugLayer::Clone(Graph& graph) const return CloneBase(graph, GetName()); } -void DebugLayer::ValidateTensorShapesFromInputs() +void DebugLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); std::vector inferredShapes = InferOutputShapes({ diff --git a/src/armnn/layers/DebugLayer.hpp b/src/armnn/layers/DebugLayer.hpp index d50d618..227e056 100644 --- a/src/armnn/layers/DebugLayer.hpp +++ b/src/armnn/layers/DebugLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref DebugLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp index 2d13271..a334484 100644 --- a/src/armnn/layers/DepthToSpaceLayer.cpp +++ b/src/armnn/layers/DepthToSpaceLayer.cpp @@ -57,8 +57,10 @@ std::vector DepthToSpaceLayer::InferOutputShapes(const std::vector< return std::vector({ outputShape }); } -void DepthToSpaceLayer::ValidateTensorShapesFromInputs() +void DepthToSpaceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); std::vector inferredShapes = InferOutputShapes({ diff --git a/src/armnn/layers/DepthToSpaceLayer.hpp b/src/armnn/layers/DepthToSpaceLayer.hpp index 53ef6e3..a0ecdcf 100644 --- a/src/armnn/layers/DepthToSpaceLayer.hpp +++ b/src/armnn/layers/DepthToSpaceLayer.hpp @@ -32,7 +32,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref DepthToSpaceLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp index dc6b2c2..7efb307 100644 --- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp +++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp @@ -119,8 +119,10 @@ DepthwiseConvolution2dLayer::InferOutputShapes(const std::vector& i return std::vector{ tensorShape }; } -void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs() +void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); // on this level constant data should not be released.. diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp index 67b6da2..7b42a5f 100644 --- a/src/armnn/layers/DepthwiseConvolution2dLayer.hpp +++ b/src/armnn/layers/DepthwiseConvolution2dLayer.hpp @@ -32,7 +32,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref DepthwiseConvolution2dLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// By default returns inputShapes if the number of inputs are equal to number of outputs, /// otherwise infers the output shapes from given input shapes and layer properties. diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp index 5b57279..e0c3d0e 100644 --- a/src/armnn/layers/DequantizeLayer.cpp +++ b/src/armnn/layers/DequantizeLayer.cpp @@ -29,8 +29,10 @@ DequantizeLayer* DequantizeLayer::Clone(Graph& graph) const return CloneBase(graph, GetName()); } -void DequantizeLayer::ValidateTensorShapesFromInputs() +void DequantizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); std::vector inferredShapes = InferOutputShapes({ diff --git a/src/armnn/layers/DequantizeLayer.hpp b/src/armnn/layers/DequantizeLayer.hpp index c112b60..24c9869 100644 --- a/src/armnn/layers/DequantizeLayer.hpp +++ b/src/armnn/layers/DequantizeLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref DequantizeLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp index e8d14d9..2deca32 100644 --- a/src/armnn/layers/DetectionPostProcessLayer.cpp +++ b/src/armnn/layers/DetectionPostProcessLayer.cpp @@ -34,8 +34,10 @@ DetectionPostProcessLayer* DetectionPostProcessLayer::Clone(Graph& graph) const return std::move(layer); } -void DetectionPostProcessLayer::ValidateTensorShapesFromInputs() +void DetectionPostProcessLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(2, CHECK_LOCATION()); // on this level constant data should not be released. diff --git a/src/armnn/layers/DetectionPostProcessLayer.hpp b/src/armnn/layers/DetectionPostProcessLayer.hpp index a6eab11..d3c604f 100644 --- a/src/armnn/layers/DetectionPostProcessLayer.hpp +++ b/src/armnn/layers/DetectionPostProcessLayer.hpp @@ -31,7 +31,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref DetectionPostProcessLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/ElementwiseBaseLayer.cpp b/src/armnn/layers/ElementwiseBaseLayer.cpp index 2c1e871..44bbd0b 100644 --- a/src/armnn/layers/ElementwiseBaseLayer.cpp +++ b/src/armnn/layers/ElementwiseBaseLayer.cpp @@ -47,8 +47,10 @@ std::vector ElementwiseBaseLayer::InferOutputShapes(const std::vect return std::vector({ TensorShape(numDims, dims.data()) }); } -void ElementwiseBaseLayer::ValidateTensorShapesFromInputs() +void ElementwiseBaseLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(2, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ diff --git a/src/armnn/layers/ElementwiseBaseLayer.hpp b/src/armnn/layers/ElementwiseBaseLayer.hpp index 4f151b7..1f9888a 100644 --- a/src/armnn/layers/ElementwiseBaseLayer.hpp +++ b/src/armnn/layers/ElementwiseBaseLayer.hpp @@ -18,7 +18,9 @@ class ElementwiseBaseLayer : public Layer public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of the element wise operation. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// By default returns inputShapes if the number of inputs are equal to number of outputs, /// otherwise infers the output shapes from given input shapes and layer properties. diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp index c91057c..0908f39 100644 --- a/src/armnn/layers/ElementwiseUnaryLayer.cpp +++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp @@ -40,8 +40,10 @@ std::vector ElementwiseUnaryLayer::InferOutputShapes(const std::vec return std::vector({ input }); } -void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs() +void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); std::vector inferredShapes = InferOutputShapes({ diff --git a/src/armnn/layers/ElementwiseUnaryLayer.hpp b/src/armnn/layers/ElementwiseUnaryLayer.hpp index 850a814..ae88fcf 100644 --- a/src/armnn/layers/ElementwiseUnaryLayer.hpp +++ b/src/armnn/layers/ElementwiseUnaryLayer.hpp @@ -31,7 +31,9 @@ public: /// Check if the input tensor shape(s) will lead to a valid configuration /// of @ref ElementwiseUnaryLayer - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp index 2b4ad86..7ed6d75 100644 --- a/src/armnn/layers/FakeQuantizationLayer.cpp +++ b/src/armnn/layers/FakeQuantizationLayer.cpp @@ -29,8 +29,10 @@ FakeQuantizationLayer* FakeQuantizationLayer::Clone(Graph& graph) const return CloneBase(graph, m_Param, GetName()); } -void FakeQuantizationLayer::ValidateTensorShapesFromInputs() +void FakeQuantizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/FakeQuantizationLayer.hpp b/src/armnn/layers/FakeQuantizationLayer.hpp index 36c360f..dc22c23 100644 --- a/src/armnn/layers/FakeQuantizationLayer.hpp +++ b/src/armnn/layers/FakeQuantizationLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref FakeQuantizationLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/FillLayer.cpp b/src/armnn/layers/FillLayer.cpp index eb9f6af..688486b 100644 --- a/src/armnn/layers/FillLayer.cpp +++ b/src/armnn/layers/FillLayer.cpp @@ -29,8 +29,10 @@ FillLayer* FillLayer::Clone(Graph& graph) const return CloneBase(graph, m_Param, GetName()); } -void FillLayer::ValidateTensorShapesFromInputs() +void FillLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes( { GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/FillLayer.hpp b/src/armnn/layers/FillLayer.hpp index b9a972a..aa12fca 100644 --- a/src/armnn/layers/FillLayer.hpp +++ b/src/armnn/layers/FillLayer.hpp @@ -24,7 +24,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref FillLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp index fb918f6..9e46ebf 100644 --- a/src/armnn/layers/FloorLayer.cpp +++ b/src/armnn/layers/FloorLayer.cpp @@ -29,8 +29,10 @@ FloorLayer* FloorLayer::Clone(Graph& graph) const return CloneBase(graph, GetName()); } -void FloorLayer::ValidateTensorShapesFromInputs() +void FloorLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/FloorLayer.hpp b/src/armnn/layers/FloorLayer.hpp index e5b30d1..68361d0 100644 --- a/src/armnn/layers/FloorLayer.hpp +++ b/src/armnn/layers/FloorLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref FloorLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp index 4bbc9ba..bd947b7 100644 --- a/src/armnn/layers/FullyConnectedLayer.cpp +++ b/src/armnn/layers/FullyConnectedLayer.cpp @@ -61,8 +61,10 @@ std::vector FullyConnectedLayer::InferOutputShapes(const std::vecto return std::vector({ TensorShape({batches, weightShape[dimIdx]})}); } -void FullyConnectedLayer::ValidateTensorShapesFromInputs() +void FullyConnectedLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); // check if we m_Weight data is not nullptr diff --git a/src/armnn/layers/FullyConnectedLayer.hpp b/src/armnn/layers/FullyConnectedLayer.hpp index 7f03cc2..a2d0750 100644 --- a/src/armnn/layers/FullyConnectedLayer.hpp +++ b/src/armnn/layers/FullyConnectedLayer.hpp @@ -32,7 +32,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref FullyConnectedLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// By default returns inputShapes if the number of inputs are equal to number of outputs, /// otherwise infers the output shapes from given input shapes and layer properties. diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp index c276d82..a999130 100644 --- a/src/armnn/layers/GatherLayer.cpp +++ b/src/armnn/layers/GatherLayer.cpp @@ -29,8 +29,10 @@ GatherLayer* GatherLayer::Clone(Graph& graph) const return CloneBase(graph, GetName()); } -void GatherLayer::ValidateTensorShapesFromInputs() +void GatherLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(2, CHECK_LOCATION()); const TensorInfo& params = GetInputSlot(0).GetConnection()->GetTensorInfo(); diff --git a/src/armnn/layers/GatherLayer.hpp b/src/armnn/layers/GatherLayer.hpp index 08629d5..598ca44 100644 --- a/src/armnn/layers/GatherLayer.hpp +++ b/src/armnn/layers/GatherLayer.hpp @@ -26,7 +26,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref GatherLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/InputLayer.cpp b/src/armnn/layers/InputLayer.cpp index 84cc43c..42ce153 100644 --- a/src/armnn/layers/InputLayer.cpp +++ b/src/armnn/layers/InputLayer.cpp @@ -28,8 +28,10 @@ InputLayer* InputLayer::Clone(Graph& graph) const return CloneBase(graph, GetBindingId(), GetName()); } -void InputLayer::ValidateTensorShapesFromInputs() +void InputLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + //The input layer should already have it's inputs set during graph building phase in the driver/parser. ConditionalThrow(GetOutputSlot(0).IsTensorInfoSet(), "InputLayer should already have the TensorInfo set."); diff --git a/src/armnn/layers/InputLayer.hpp b/src/armnn/layers/InputLayer.hpp index 64138fd..430abcb 100644 --- a/src/armnn/layers/InputLayer.hpp +++ b/src/armnn/layers/InputLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref InputLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp index 25b133a..f79e423 100644 --- a/src/armnn/layers/InstanceNormalizationLayer.cpp +++ b/src/armnn/layers/InstanceNormalizationLayer.cpp @@ -29,8 +29,10 @@ InstanceNormalizationLayer* InstanceNormalizationLayer::Clone(Graph& graph) cons return CloneBase(graph, m_Param, GetName()); } -void InstanceNormalizationLayer::ValidateTensorShapesFromInputs() +void InstanceNormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/InstanceNormalizationLayer.hpp b/src/armnn/layers/InstanceNormalizationLayer.hpp index 2b59b0d..affc028 100644 --- a/src/armnn/layers/InstanceNormalizationLayer.hpp +++ b/src/armnn/layers/InstanceNormalizationLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref InstanceNormalizationLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp index e6d5f06..4a2945e 100644 --- a/src/armnn/layers/L2NormalizationLayer.cpp +++ b/src/armnn/layers/L2NormalizationLayer.cpp @@ -29,8 +29,10 @@ L2NormalizationLayer* L2NormalizationLayer::Clone(Graph& graph) const return CloneBase(graph, m_Param, GetName()); } -void L2NormalizationLayer::ValidateTensorShapesFromInputs() +void L2NormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/L2NormalizationLayer.hpp b/src/armnn/layers/L2NormalizationLayer.hpp index be506b7..1c7e483 100644 --- a/src/armnn/layers/L2NormalizationLayer.hpp +++ b/src/armnn/layers/L2NormalizationLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref L2NormalizationLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp index 627aa4c..ab05fd2 100644 --- a/src/armnn/layers/LogSoftmaxLayer.cpp +++ b/src/armnn/layers/LogSoftmaxLayer.cpp @@ -29,8 +29,10 @@ LogSoftmaxLayer* LogSoftmaxLayer::Clone(Graph& graph) const return CloneBase(graph, m_Param, GetName()); } -void LogSoftmaxLayer::ValidateTensorShapesFromInputs() +void LogSoftmaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/LogSoftmaxLayer.hpp b/src/armnn/layers/LogSoftmaxLayer.hpp index 732e47e..a1907b9 100644 --- a/src/armnn/layers/LogSoftmaxLayer.hpp +++ b/src/armnn/layers/LogSoftmaxLayer.hpp @@ -26,7 +26,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref LogSoftmaxLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp index 653b18a..af708e4 100644 --- a/src/armnn/layers/LstmLayer.cpp +++ b/src/armnn/layers/LstmLayer.cpp @@ -163,8 +163,10 @@ std::vector LstmLayer::InferOutputShapes(const std::vectorGetTensorInfo(); diff --git a/src/armnn/layers/MeanLayer.hpp b/src/armnn/layers/MeanLayer.hpp index b7c5ed3..3aacd59 100644 --- a/src/armnn/layers/MeanLayer.hpp +++ b/src/armnn/layers/MeanLayer.hpp @@ -26,7 +26,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref MeanLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp index e4009de..c087290 100644 --- a/src/armnn/layers/MemCopyLayer.cpp +++ b/src/armnn/layers/MemCopyLayer.cpp @@ -33,8 +33,10 @@ std::unique_ptr MemCopyLayer::CreateWorkload(const IWorkloadFactory& return std::make_unique(descriptor, PrepInfoAndDesc(descriptor)); } -void MemCopyLayer::ValidateTensorShapesFromInputs() +void MemCopyLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/MemCopyLayer.hpp b/src/armnn/layers/MemCopyLayer.hpp index d466d0e..10a9f55 100644 --- a/src/armnn/layers/MemCopyLayer.hpp +++ b/src/armnn/layers/MemCopyLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref MemCopyLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp index bcccba1..02092f4 100644 --- a/src/armnn/layers/MemImportLayer.cpp +++ b/src/armnn/layers/MemImportLayer.cpp @@ -33,8 +33,10 @@ std::unique_ptr MemImportLayer::CreateWorkload(const IWorkloadFactory return std::make_unique(descriptor, PrepInfoAndDesc(descriptor)); } -void MemImportLayer::ValidateTensorShapesFromInputs() +void MemImportLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/MemImportLayer.hpp b/src/armnn/layers/MemImportLayer.hpp index 452e5e3..9b9c888 100644 --- a/src/armnn/layers/MemImportLayer.hpp +++ b/src/armnn/layers/MemImportLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref MemImportLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp index ad7d8b1..b05eb68 100644 --- a/src/armnn/layers/MergeLayer.cpp +++ b/src/armnn/layers/MergeLayer.cpp @@ -27,8 +27,10 @@ MergeLayer* MergeLayer::Clone(Graph& graph) const return CloneBase(graph, GetName()); } -void MergeLayer::ValidateTensorShapesFromInputs() +void MergeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(2, CHECK_LOCATION()); std::vector inferredShapes = InferOutputShapes({ diff --git a/src/armnn/layers/MergeLayer.hpp b/src/armnn/layers/MergeLayer.hpp index 1452844..3d0cf52 100644 --- a/src/armnn/layers/MergeLayer.hpp +++ b/src/armnn/layers/MergeLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref MergeLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// Infers the output shapes from given input shapes. /// @param [in] inputShapes The input shapes layer has. diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp index 44179fd..9011ece 100644 --- a/src/armnn/layers/NormalizationLayer.cpp +++ b/src/armnn/layers/NormalizationLayer.cpp @@ -29,8 +29,10 @@ NormalizationLayer* NormalizationLayer::Clone(Graph& graph) const return CloneBase(graph, m_Param, GetName()); } -void NormalizationLayer::ValidateTensorShapesFromInputs() +void NormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/NormalizationLayer.hpp b/src/armnn/layers/NormalizationLayer.hpp index 8ba3f53..25787a8 100644 --- a/src/armnn/layers/NormalizationLayer.hpp +++ b/src/armnn/layers/NormalizationLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref NormalizationLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/OutputLayer.cpp b/src/armnn/layers/OutputLayer.cpp index f00e0a5..5ff9188 100644 --- a/src/armnn/layers/OutputLayer.cpp +++ b/src/armnn/layers/OutputLayer.cpp @@ -29,8 +29,10 @@ OutputLayer* OutputLayer::Clone(Graph& graph) const return CloneBase(graph, GetBindingId(), GetName()); } -void OutputLayer::ValidateTensorShapesFromInputs() +void OutputLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + // Just validates that the input is connected. ConditionalThrow(GetInputSlot(0).GetConnection() != nullptr, "OutputLayer: Input slot must be connected."); diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp index 89bcfd6..26c5a0a 100644 --- a/src/armnn/layers/OutputLayer.hpp +++ b/src/armnn/layers/OutputLayer.hpp @@ -37,7 +37,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref OutputLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp index 4fab88e..690318d 100644 --- a/src/armnn/layers/PadLayer.cpp +++ b/src/armnn/layers/PadLayer.cpp @@ -36,8 +36,10 @@ PadLayer* PadLayer::Clone(Graph& graph) const return std::move(layer); } -void PadLayer::ValidateTensorShapesFromInputs() +void PadLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + return; } diff --git a/src/armnn/layers/PadLayer.hpp b/src/armnn/layers/PadLayer.hpp index f3cfb00..a15563d 100644 --- a/src/armnn/layers/PadLayer.hpp +++ b/src/armnn/layers/PadLayer.hpp @@ -26,7 +26,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref PadLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp index e565b48..a585e66 100644 --- a/src/armnn/layers/PermuteLayer.cpp +++ b/src/armnn/layers/PermuteLayer.cpp @@ -40,8 +40,10 @@ std::vector PermuteLayer::InferOutputShapes(const std::vector ({armnnUtils::Permuted(inShape, m_Param.m_DimMappings)}); } -void PermuteLayer::ValidateTensorShapesFromInputs() +void PermuteLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/PermuteLayer.hpp b/src/armnn/layers/PermuteLayer.hpp index f2057d4..9af1d9b 100644 --- a/src/armnn/layers/PermuteLayer.hpp +++ b/src/armnn/layers/PermuteLayer.hpp @@ -27,7 +27,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref PermuteLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// By default returns inputShapes if the number of inputs are equal to number of outputs, /// otherwise infers the output shapes from given input shapes and layer properties. diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp index ad2c82f..defed97 100644 --- a/src/armnn/layers/Pooling2dLayer.cpp +++ b/src/armnn/layers/Pooling2dLayer.cpp @@ -100,8 +100,10 @@ std::vector Pooling2dLayer::InferOutputShapes(const std::vector({ tensorShape }); } -void Pooling2dLayer::ValidateTensorShapesFromInputs() +void Pooling2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/Pooling2dLayer.hpp b/src/armnn/layers/Pooling2dLayer.hpp index 2563eb1..2a5703b 100644 --- a/src/armnn/layers/Pooling2dLayer.hpp +++ b/src/armnn/layers/Pooling2dLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref Pooling2dLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// By default returns inputShapes if the number of inputs are equal to number of outputs, /// otherwise infers the output shapes from given input shapes and layer properties. diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp index 3444afc..577e19f 100644 --- a/src/armnn/layers/PreCompiledLayer.cpp +++ b/src/armnn/layers/PreCompiledLayer.cpp @@ -35,8 +35,10 @@ std::unique_ptr PreCompiledLayer::CreateWorkload(const armnn::IWorklo return factory.CreatePreCompiled(descriptor, PrepInfoAndDesc(descriptor)); } -void PreCompiledLayer::ValidateTensorShapesFromInputs() +void PreCompiledLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + // NOTE: since the PreCompiledLayer is an internal layer created from a valid SubgraphView, // we do not need to validate its input shapes } diff --git a/src/armnn/layers/PreCompiledLayer.hpp b/src/armnn/layers/PreCompiledLayer.hpp index 1a87f61..a4b1c78 100644 --- a/src/armnn/layers/PreCompiledLayer.hpp +++ b/src/armnn/layers/PreCompiledLayer.hpp @@ -29,7 +29,8 @@ public: PreCompiledLayer* Clone(Graph &graph) const override; - void ValidateTensorShapesFromInputs() override; + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void SetPreCompiledObject(PreCompiledObjectPtr preCompiledObject); diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp index 6094806..2527cb9 100644 --- a/src/armnn/layers/PreluLayer.cpp +++ b/src/armnn/layers/PreluLayer.cpp @@ -94,8 +94,10 @@ std::vector PreluLayer::InferOutputShapes(const std::vector inferredShapes = InferOutputShapes( diff --git a/src/armnn/layers/PreluLayer.hpp b/src/armnn/layers/PreluLayer.hpp index 2f2704b..6febdf9 100644 --- a/src/armnn/layers/PreluLayer.hpp +++ b/src/armnn/layers/PreluLayer.hpp @@ -32,7 +32,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref PreluLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp index 7e61548..c5155d7 100644 --- a/src/armnn/layers/QLstmLayer.cpp +++ b/src/armnn/layers/QLstmLayer.cpp @@ -165,8 +165,10 @@ std::vector QLstmLayer::InferOutputShapes(const std::vectorGetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/QuantizeLayer.hpp b/src/armnn/layers/QuantizeLayer.hpp index 32cd53f..a223f59 100644 --- a/src/armnn/layers/QuantizeLayer.hpp +++ b/src/armnn/layers/QuantizeLayer.hpp @@ -21,7 +21,8 @@ public: Layer* Clone(Graph& graph) const override; - void ValidateTensorShapesFromInputs() override; + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp index b56ae3f..e26857e 100644 --- a/src/armnn/layers/QuantizedLstmLayer.cpp +++ b/src/armnn/layers/QuantizedLstmLayer.cpp @@ -91,8 +91,10 @@ std::vector QuantizedLstmLayer::InferOutputShapes(const std::vector return outShapes; } -void QuantizedLstmLayer::ValidateTensorShapesFromInputs() +void QuantizedLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(3, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes( diff --git a/src/armnn/layers/QuantizedLstmLayer.hpp b/src/armnn/layers/QuantizedLstmLayer.hpp index 9e0186f..1353a06 100644 --- a/src/armnn/layers/QuantizedLstmLayer.hpp +++ b/src/armnn/layers/QuantizedLstmLayer.hpp @@ -60,7 +60,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref QuantizedLstmLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// By default returns inputShapes if the number of inputs are equal to number of outputs, /// otherwise infers the output shapes from given input shapes and layer properties. diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp index b496dbb..0257ca9 100644 --- a/src/armnn/layers/ReshapeLayer.cpp +++ b/src/armnn/layers/ReshapeLayer.cpp @@ -36,8 +36,10 @@ std::vector ReshapeLayer::InferOutputShapes(const std::vector({ m_Param.m_TargetShape }); } -void ReshapeLayer::ValidateTensorShapesFromInputs() +void ReshapeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ }); diff --git a/src/armnn/layers/ReshapeLayer.hpp b/src/armnn/layers/ReshapeLayer.hpp index 5e0e883..4f0300a 100644 --- a/src/armnn/layers/ReshapeLayer.hpp +++ b/src/armnn/layers/ReshapeLayer.hpp @@ -27,7 +27,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref ReshapeLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// By default returns inputShapes if the number of inputs are equal to number of outputs, /// otherwise infers the output shapes from given input shapes and layer properties. diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp index b16adeb..b07eb9a 100644 --- a/src/armnn/layers/ResizeLayer.cpp +++ b/src/armnn/layers/ResizeLayer.cpp @@ -58,8 +58,10 @@ std::vector ResizeLayer::InferOutputShapes(const std::vector({ tensorShape }); } -void ResizeLayer::ValidateTensorShapesFromInputs() +void ResizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/ResizeLayer.hpp b/src/armnn/layers/ResizeLayer.hpp index 9ad4910..0adda94 100644 --- a/src/armnn/layers/ResizeLayer.hpp +++ b/src/armnn/layers/ResizeLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref ResizeLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// By default returns inputShapes if the number of inputs are equal to number of outputs, /// otherwise infers the output shapes from given input shapes and layer properties. diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp index dfd466d..b3aecb2 100644 --- a/src/armnn/layers/RsqrtLayer.cpp +++ b/src/armnn/layers/RsqrtLayer.cpp @@ -30,8 +30,10 @@ RsqrtLayer* RsqrtLayer::Clone(Graph& graph) const return CloneBase(graph, GetName()); } -void RsqrtLayer::ValidateTensorShapesFromInputs() +void RsqrtLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/RsqrtLayer.hpp b/src/armnn/layers/RsqrtLayer.hpp index 1e51cc0..d4183ef 100644 --- a/src/armnn/layers/RsqrtLayer.hpp +++ b/src/armnn/layers/RsqrtLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref RsqrtLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp index d92ed6f..2aa32e3 100644 --- a/src/armnn/layers/SliceLayer.cpp +++ b/src/armnn/layers/SliceLayer.cpp @@ -33,8 +33,10 @@ SliceLayer* SliceLayer::Clone(Graph& graph) const return CloneBase(graph, m_Param, GetName()); } -void SliceLayer::ValidateTensorShapesFromInputs() +void SliceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/SliceLayer.hpp b/src/armnn/layers/SliceLayer.hpp index abfe472..3d9a7fe 100644 --- a/src/armnn/layers/SliceLayer.hpp +++ b/src/armnn/layers/SliceLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref SliceLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// By default returns inputShapes if the number of inputs are equal to number of outputs, /// otherwise infers the output shapes from given input shapes and layer properties. diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp index 738347c..56c3792 100644 --- a/src/armnn/layers/SoftmaxLayer.cpp +++ b/src/armnn/layers/SoftmaxLayer.cpp @@ -29,8 +29,10 @@ SoftmaxLayer* SoftmaxLayer::Clone(Graph& graph) const return CloneBase(graph, m_Param, GetName()); } -void SoftmaxLayer::ValidateTensorShapesFromInputs() +void SoftmaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/SoftmaxLayer.hpp b/src/armnn/layers/SoftmaxLayer.hpp index 839170e..84aae85 100644 --- a/src/armnn/layers/SoftmaxLayer.hpp +++ b/src/armnn/layers/SoftmaxLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref SoftmaxLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp index ce48b5b..fbc3ca7 100644 --- a/src/armnn/layers/SpaceToBatchNdLayer.cpp +++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp @@ -66,8 +66,10 @@ std::vector SpaceToBatchNdLayer::InferOutputShapes(const std::vecto return std::vector({ outputShape }); } -void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs() +void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); std::vector inferredShapes = InferOutputShapes({ diff --git a/src/armnn/layers/SpaceToBatchNdLayer.hpp b/src/armnn/layers/SpaceToBatchNdLayer.hpp index cb8162f..707017b 100644 --- a/src/armnn/layers/SpaceToBatchNdLayer.hpp +++ b/src/armnn/layers/SpaceToBatchNdLayer.hpp @@ -32,7 +32,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref SpaceToBatchNdLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp index bf65240..5c8e2d4 100644 --- a/src/armnn/layers/SpaceToDepthLayer.cpp +++ b/src/armnn/layers/SpaceToDepthLayer.cpp @@ -59,8 +59,10 @@ std::vector SpaceToDepthLayer::InferOutputShapes(const std::vector< return std::vector({ outputShape }); } -void SpaceToDepthLayer::ValidateTensorShapesFromInputs() +void SpaceToDepthLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); std::vector inferredShapes = InferOutputShapes({ diff --git a/src/armnn/layers/SpaceToDepthLayer.hpp b/src/armnn/layers/SpaceToDepthLayer.hpp index 799c367..ca0d804 100644 --- a/src/armnn/layers/SpaceToDepthLayer.hpp +++ b/src/armnn/layers/SpaceToDepthLayer.hpp @@ -32,7 +32,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref SpaceToDepthLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp index 8ec8121..9455c88 100644 --- a/src/armnn/layers/SplitterLayer.cpp +++ b/src/armnn/layers/SplitterLayer.cpp @@ -139,8 +139,10 @@ std::vector SplitterLayer::InferOutputShapes(const std::vector views; for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++) { diff --git a/src/armnn/layers/SplitterLayer.hpp b/src/armnn/layers/SplitterLayer.hpp index a6c8cbe..39aab90 100644 --- a/src/armnn/layers/SplitterLayer.hpp +++ b/src/armnn/layers/SplitterLayer.hpp @@ -35,7 +35,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref SplitterLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// By default returns inputShapes if the number of inputs are equal to number of outputs, /// otherwise infers the output shapes from given input shapes and layer properties. diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp index e034cb4..6e81890 100644 --- a/src/armnn/layers/StackLayer.cpp +++ b/src/armnn/layers/StackLayer.cpp @@ -58,8 +58,10 @@ std::vector StackLayer::InferOutputShapes(const std::vector({ targetShape }); } -void StackLayer::ValidateTensorShapesFromInputs() +void StackLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + // Validates Stack layer. ConditionalThrowIfNotEqual( "StackLayer: Num Input Slots must match Num Inputs.", diff --git a/src/armnn/layers/StackLayer.hpp b/src/armnn/layers/StackLayer.hpp index 5ec2e8a..5e937db 100644 --- a/src/armnn/layers/StackLayer.hpp +++ b/src/armnn/layers/StackLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref StackLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// By default returns inputShapes if the number of inputs are equal to number of outputs, /// otherwise infers the output shapes from given input shapes and layer properties. diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp index d23d1d0..d79caf6 100644 --- a/src/armnn/layers/StandInLayer.cpp +++ b/src/armnn/layers/StandInLayer.cpp @@ -34,8 +34,10 @@ std::vector StandInLayer::InferOutputShapes(const std::vector StridedSliceLayer::InferOutputShapes( TensorShape(boost::numeric_cast(outputShape.size()), &outputShape[0]) }); } -void StridedSliceLayer::ValidateTensorShapesFromInputs() +void StridedSliceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()}); diff --git a/src/armnn/layers/StridedSliceLayer.hpp b/src/armnn/layers/StridedSliceLayer.hpp index 0721969..f9ba7e2 100644 --- a/src/armnn/layers/StridedSliceLayer.hpp +++ b/src/armnn/layers/StridedSliceLayer.hpp @@ -31,7 +31,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref StridedSliceLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp index c4b065a..d408de8 100644 --- a/src/armnn/layers/SwitchLayer.cpp +++ b/src/armnn/layers/SwitchLayer.cpp @@ -27,8 +27,10 @@ SwitchLayer* SwitchLayer::Clone(Graph& graph) const return CloneBase(graph, GetName()); } -void SwitchLayer::ValidateTensorShapesFromInputs() +void SwitchLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(2, CHECK_LOCATION()); ARMNN_ASSERT_MSG(GetNumOutputSlots() == 2, "SwitchLayer: The layer should return 2 outputs."); diff --git a/src/armnn/layers/SwitchLayer.hpp b/src/armnn/layers/SwitchLayer.hpp index 2a6a09d..7022348 100644 --- a/src/armnn/layers/SwitchLayer.hpp +++ b/src/armnn/layers/SwitchLayer.hpp @@ -25,7 +25,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref SwitchLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; void Accept(ILayerVisitor& visitor) const override; diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp index 2825882..ffe92bb 100644 --- a/src/armnn/layers/TransposeConvolution2dLayer.cpp +++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp @@ -103,8 +103,10 @@ std::vector TransposeConvolution2dLayer::InferOutputShapes( return std::vector({ tensorShape }); } -void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs() +void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null."); diff --git a/src/armnn/layers/TransposeConvolution2dLayer.hpp b/src/armnn/layers/TransposeConvolution2dLayer.hpp index 24c0e49..ecdf7dc 100644 --- a/src/armnn/layers/TransposeConvolution2dLayer.hpp +++ b/src/armnn/layers/TransposeConvolution2dLayer.hpp @@ -32,7 +32,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref TransposeConvolution2dLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// Infers the output shapes from given input shapes and layer properties. /// @param [in] inputShapes The input shapes the layer has. diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp index c058332..90f8d1a 100644 --- a/src/armnn/layers/TransposeLayer.cpp +++ b/src/armnn/layers/TransposeLayer.cpp @@ -40,8 +40,10 @@ std::vector TransposeLayer::InferOutputShapes(const std::vector ({armnnUtils::TransposeTensorShape(inShape, m_Param.m_DimMappings)}); } -void TransposeLayer::ValidateTensorShapesFromInputs() +void TransposeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) { + IgnoreUnused(shapeInferenceMethod); + VerifyLayerConnections(1, CHECK_LOCATION()); auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); diff --git a/src/armnn/layers/TransposeLayer.hpp b/src/armnn/layers/TransposeLayer.hpp index a668ce8..3e94a9f 100644 --- a/src/armnn/layers/TransposeLayer.hpp +++ b/src/armnn/layers/TransposeLayer.hpp @@ -26,7 +26,9 @@ public: /// Check if the input tensor shape(s) /// will lead to a valid configuration of @ref TransposeLayer. - void ValidateTensorShapesFromInputs() override; + /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated. + void ValidateTensorShapesFromInputs( + ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override; /// Infers the output shapes from given input shapes and the permutation vector. /// @param [in] inputShapes The input shapes layer has. -- 2.7.4