namespace armnn
{
+struct BackendOptions;
+using NetworkOptions = std::vector<BackendOptions>;
/// Struct for the users to pass backend specific options
struct BackendOptions
//
#pragma once
+#include <armnn/BackendOptions.hpp>
#include <armnn/Deprecated.hpp>
#include <armnn/DescriptorsFwd.hpp>
#include <armnn/ILayerVisitor.hpp>
#include <armnn/Optional.hpp>
#include <armnn/TensorFwd.hpp>
#include <armnn/Types.hpp>
-#include <armnn/Deprecated.hpp>
#include <memory>
#include <vector>
class INetwork
{
public:
- static INetwork* CreateRaw();
- static INetworkPtr Create();
+ static INetwork* CreateRaw(NetworkOptions networkOptions = {});
+ static INetworkPtr Create(NetworkOptions networkOptions = {});
static void Destroy(INetwork* network);
virtual Status PrintGraph() = 0;
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
subgraph.Clear();
}
-void Graph::InferTensorInfos(ShapeInferenceMethod shapeInferenceMethod)
+void Graph::InferTensorInfos()
{
for (auto&& layer : TopologicalSort())
{
{
throw LayerValidationException("All inputs must have the TensorInfo set at this point.");
}
+
+ if (layer->m_ShapeInferenceMethod == ShapeInferenceMethod::ValidateOnly)
+ {
+ layer->ValidateTensorShapesFromInputs();
+ }
}
- layer->ValidateTensorShapesFromInputs(shapeInferenceMethod);
}
}
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
const Graph& m_Graph;
};
- Graph() : m_LayersInOrder(true) {}
+ Graph(bool shapeInferenceMethod = false)
+ : m_LayersInOrder(true)
+ , m_ShapeInferenceMethod(shapeInferenceMethod ? ShapeInferenceMethod::InferAndValidate :
+ ShapeInferenceMethod::ValidateOnly)
+ {}
Graph(const Graph& other);
void SubstituteSubgraph(SubgraphView& subgraph, IConnectableLayer* substituteLayer);
void SubstituteSubgraph(SubgraphView& subgraph, const SubgraphView& substituteSubgraph);
- void InferTensorInfos(ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly);
+ void InferTensorInfos();
void AttachObservable(IGraphObservable* const observable, GraphEvent notifyOnEvent) {
m_Views[notifyOnEvent].emplace_back(observable);
mutable bool m_LayersInOrder;
std::map<const GraphEvent, std::list<IGraphObservable*>> m_Views;
+ ShapeInferenceMethod m_ShapeInferenceMethod;
};
/// Common base class for layers in the graph.
((LayerEnumOf<LayerT>() == LayerType::Input) || (LayerEnumOf<LayerT>() == LayerType::Output));
LayerT* const layer = new LayerInGraph<LayerT>(*this, std::forward<Args>(args)...);
+ layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
+
NotifyObservables(GraphEvent::LayerAdded, layer);
return layer;
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "Layer.hpp"
bool OutputSlot::IsTensorInfoSet() const
{
+ if (GetOwningLayer().GetShapeInferenceMethod() == ShapeInferenceMethod::InferAndValidate)
+ {
+ GetOwningLayer().ValidateTensorShapesFromInputs();
+ }
return GetOutputHandler().IsTensorInfoSet();
}
DataLayout layout,
const char* name)
: m_OutputHandlers(numOutputSlots)
+, m_ShapeInferenceMethod(ShapeInferenceMethod::ValidateOnly)
, m_LayerName(name ? name : "")
, m_Type(type)
, m_BackendId()
% GetNameStr()
% location.AsString()));
}
- if(! GetInputSlot(i).GetConnection()->IsTensorInfoSet())
- {
- throw LayerValidationException(
- boost::str(
- boost::format(
- "TensorInfo of Input connection #%1% must be set on connected OutputSlot for "
- "%2% layer %3% %4%")
- % i
- % GetLayerTypeAsCString(this->GetType())
- % GetNameStr()
- % location.AsString()));
- }
}
}
outputShape.AreAllDimensionsSpecified(),
"Unspecified dimension while using ShapeInferenceMethod::ValidateOnly");
}
- else
- {
- if (outputShape.GetDimensionality() == Dimensionality::Specified)
- {
- ConditionalThrow<LayerValidationException>(
- !outputShape.AreAllDimensionsSpecified(),
- "No unspecified dimension while using ShapeInferenceMethod::InferAndValidate");
- }
- }
}
void Layer::SerializeLayerParameters(ParameterStringifyFunction& fn) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
return const_cast<OutputHandler&>(const_cast<const Layer*>(this)->GetOutputHandler(i));
}
+ ShapeInferenceMethod GetShapeInferenceMethod() const { return m_ShapeInferenceMethod; };
+
const std::vector<InputSlot>& GetInputSlots() const { return m_InputSlots; }
const std::vector<OutputSlot>& GetOutputSlots() const { return m_OutputSlots; }
void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const;
- virtual void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) = 0;
+ virtual void ValidateTensorShapesFromInputs() = 0;
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
}
Optional<BackendId> GetBackendHint() const { return m_BackendHint; }
+ void SetShapeInferenceMethod(ShapeInferenceMethod shapeInferenceMethod)
+ {
+ m_ShapeInferenceMethod = shapeInferenceMethod;
+ }
+
protected:
// Graph needs access to the virtual destructor.
friend class Graph;
protected:
std::vector<OutputHandler> m_OutputHandlers;
+ ShapeInferenceMethod m_ShapeInferenceMethod;
private:
const std::string m_LayerName;
LayerGuid m_Guid;
std::list<std::string> m_RelatedLayerNames;
+
};
// A layer user-provided data can be bound to (e.g. inputs, outputs).
namespace armnn
{
-armnn::INetwork* INetwork::CreateRaw()
+armnn::INetwork* INetwork::CreateRaw(NetworkOptions networkOptions)
{
- return new Network();
+ return new Network(networkOptions);
}
-armnn::INetworkPtr INetwork::Create()
+armnn::INetworkPtr INetwork::Create(NetworkOptions networkOptions)
{
- return INetworkPtr(CreateRaw(), &INetwork::Destroy);
+ return INetworkPtr(CreateRaw(networkOptions), &INetwork::Destroy);
}
void INetwork::Destroy(INetwork* network)
return optNet;
}
-
-Network::Network()
-: m_Graph(std::make_unique<Graph>())
+bool Network::GetShapeInferenceMethod()
{
+ if (m_NetworkOptions.size() > 0 && m_NetworkOptions[0].GetBackendId().Get() == "ShapeInferenceMethod")
+ {
+ return m_NetworkOptions[0].GetOption(0).GetValue().AsBool();
+ }
+
+ return false;
}
+Network::Network(NetworkOptions networkOptions)
+: m_NetworkOptions(networkOptions),
+ m_Graph(std::make_unique<Graph>(GetShapeInferenceMethod()))
+{}
Network::~Network()
{
class Network final : public INetwork
{
public:
- Network();
+ Network(NetworkOptions networkOptions = {});
~Network();
const Graph& GetGraph() const { return *m_Graph; }
const Optional<ConstTensor>& biases,
const char* name);
+ bool GetShapeInferenceMethod();
+ NetworkOptions m_NetworkOptions;
+
std::unique_ptr<Graph> m_Graph;
};
return CloneBase<AbsLayer>(graph, GetName());
}
-void AbsLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void AbsLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "AbsLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "AbsLayer");
}
void AbsLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref AbsLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return CloneBase<ActivationLayer>(graph, m_Param, GetName());
}
-void ActivationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ActivationLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ActivationLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ActivationLayer");
}
void ActivationLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s) will lead to a valid configuration of @ref ActivationLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
return std::vector<TensorShape>({ outputShape });
}
-void ArgMinMaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ArgMinMaxLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ArgMinMaxLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ArgMinMaxLayer");
}
void ArgMinMaxLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ArgMinMaxLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return std::move(layer);
}
-void BatchNormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void BatchNormalizationLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "BatchNormalizationLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "BatchNormalizationLayer");
}
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref BatchNormalizationLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return std::move(layer);
}
-void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void BatchToSpaceNdLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape &outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "BatchToSpaceNdLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "BatchToSpaceNdLayer");
}
std::vector<TensorShape> BatchToSpaceNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref BatchToSpaceNdLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
return std::vector<TensorShape>({ TensorShape(numDims, dims.data()) });
}
-void ComparisonLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ComparisonLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(2, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
});
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ComparisonLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ComparisonLayer");
}
void ComparisonLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s) will lead to a valid configuration
/// of @ref ComparisonLayer
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return std::vector<TensorShape>({ TensorShape({numDims, extentMax.data()}) });
}
-void ConcatLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConcatLayer::ValidateTensorShapesFromInputs()
{
// Validates Concat layer.
ConditionalThrowIfNotEqual<LayerValidationException>(
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inputShapes;
for (unsigned int i = 0; i < GetNumInputSlots(); ++i)
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ConcatLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConcatLayer");
}
void ConcatLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConcatLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
return std::vector<TensorShape>({ inputShapes[0] });
}
-void ConstantLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConstantLayer::ValidateTensorShapesFromInputs()
{
- IgnoreUnused(shapeInferenceMethod);
// Get the output shape from the value of the constant layer.
TensorShape const& outShape = m_LayerOutput->GetTensorInfo().GetShape();
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConstantLayer
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
return CloneBase<ConvertBf16ToFp32Layer>(graph, GetName());
}
-void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConvertBf16ToFp32Layer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ConvertBf16ToFp32Layer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertBf16ToFp32Layer");
}
void ConvertBf16ToFp32Layer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConvertBf16ToFp32Layer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return CloneBase<ConvertFp16ToFp32Layer>(graph, GetName());
}
-void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConvertFp16ToFp32Layer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ConvertFp16ToFp32Layer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertFp16ToFp32Layer");
}
void ConvertFp16ToFp32Layer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConvertFp16ToFp32Layer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return CloneBase<ConvertFp32ToBf16Layer>(graph, GetName());
}
-void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConvertFp32ToBf16Layer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LayerName");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
}
void ConvertFp32ToBf16Layer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConvertFp32ToBf16Layer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return CloneBase<ConvertFp32ToFp16Layer>(graph, GetName());
}
-void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ConvertFp32ToFp16Layer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LayerName");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
}
void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ConvertFp32ToFp16Layer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return std::vector<TensorShape>({ tensorShape });
}
-void Convolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void Convolution2dLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
// check if we m_Weight data is not nullptr
ARMNN_ASSERT_MSG(m_Weight != nullptr, "Convolution2dLayer: Weights data should not be null.");
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "Convolution2dLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Convolution2dLayer");
}
Layer::ConstantTensors Convolution2dLayer::GetConstantTensorsByRef()
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref Convolution2dLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
return CloneBase<DebugLayer>(graph, GetName());
}
-void DebugLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void DebugLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DebugLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DebugLayer");
}
void DebugLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref DebugLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return std::vector<TensorShape>({ outputShape });
}
-void DepthToSpaceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void DepthToSpaceLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DepthToSpaceLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DepthToSpaceLayer");
}
void DepthToSpaceLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref DepthToSpaceLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return std::vector<TensorShape>{ tensorShape };
}
-void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void DepthwiseConvolution2dLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
// on this level constant data should not be released..
ARMNN_ASSERT_MSG(m_Weight != nullptr, "DepthwiseConvolution2dLayer: Weights data should not be null.");
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DepthwiseConvolution2dLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DepthwiseConvolution2dLayer");
}
Layer::ConstantTensors DepthwiseConvolution2dLayer::GetConstantTensorsByRef()
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref DepthwiseConvolution2dLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
return CloneBase<DequantizeLayer>(graph, GetName());
}
-void DequantizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void DequantizeLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "DequantizeLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DequantizeLayer");
}
void DequantizeLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref DequantizeLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return std::move(layer);
}
-void DetectionPostProcessLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void DetectionPostProcessLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(2, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
// on this level constant data should not be released.
ARMNN_ASSERT_MSG(m_Anchors != nullptr, "DetectionPostProcessLayer: Anchors data should not be null.");
const TensorShape& inferredDetectionScores = TensorShape({ 1, detectedBoxes });
const TensorShape& inferredNumberDetections = TensorShape({ 1 });
- ValidateAndCopyShape(outputShape, inferredDetectionBoxes, shapeInferenceMethod, "DetectionPostProcessLayer");
+ ValidateAndCopyShape(outputShape, inferredDetectionBoxes, m_ShapeInferenceMethod, "DetectionPostProcessLayer");
ValidateAndCopyShape(GetOutputSlot(1).GetTensorInfo().GetShape(),
inferredDetectionScores,
- shapeInferenceMethod,
+ m_ShapeInferenceMethod,
"DetectionPostProcessLayer", 1);
ValidateAndCopyShape(GetOutputSlot(2).GetTensorInfo().GetShape(),
inferredDetectionScores,
- shapeInferenceMethod,
+ m_ShapeInferenceMethod,
"DetectionPostProcessLayer", 2);
ValidateAndCopyShape(GetOutputSlot(3).GetTensorInfo().GetShape(),
inferredNumberDetections,
- shapeInferenceMethod,
+ m_ShapeInferenceMethod,
"DetectionPostProcessLayer", 3);
}
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref DetectionPostProcessLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
return std::vector<TensorShape>({ TensorShape(numDims, dims.data()) });
}
-void ElementwiseBaseLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ElementwiseBaseLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(2, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, GetLayerTypeAsCString(GetType()));
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
}
} // namespace armnn
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of the element wise operation.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
return std::vector<TensorShape>({ input });
}
-void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ElementwiseUnaryLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, GetLayerTypeAsCString(GetType()));
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
}
void ElementwiseUnaryLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s) will lead to a valid configuration
/// of @ref ElementwiseUnaryLayer
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return CloneBase<FakeQuantizationLayer>(graph, m_Param, GetName());
}
-void FakeQuantizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void FakeQuantizationLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "FakeQuantizationLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FakeQuantizationLayer");
}
void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref FakeQuantizationLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return CloneBase<FillLayer>(graph, m_Param, GetName());
}
-void FillLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void FillLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref FillLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return CloneBase<FloorLayer>(graph, GetName());
}
-void FloorLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void FloorLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "FloorLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FloorLayer");
}
void FloorLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref FloorLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return std::vector<TensorShape>({ TensorShape({batches, weightShape[dimIdx]})});
}
-void FullyConnectedLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void FullyConnectedLayer::ValidateTensorShapesFromInputs()
{
- IgnoreUnused(shapeInferenceMethod);
-
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
// check if we m_Weight data is not nullptr
ARMNN_ASSERT_MSG(m_Weight != nullptr, "FullyConnectedLayer: Weights data should not be null.");
ARMNN_ASSERT(inferredShapes.size() == 1);
ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "FullyConnectedLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FullyConnectedLayer");
}
Layer::ConstantTensors FullyConnectedLayer::GetConstantTensorsByRef()
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref FullyConnectedLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
return CloneBase<GatherLayer>(graph, m_Param, GetName());
}
-void GatherLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void GatherLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(2, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
const TensorInfo& params = GetInputSlot(0).GetConnection()->GetTensorInfo();
const TensorInfo& indices = GetInputSlot(1).GetConnection()->GetTensorInfo();
const TensorShape& inferredShape = TensorShape(outputDim, dimSizes.data());
- ValidateAndCopyShape(outputShape, inferredShape, shapeInferenceMethod, "GatherLayer");
+ ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "GatherLayer");
}
void GatherLayer::Accept(ILayerVisitor& visitor) const
/// Check if the input tensor shape(s).
/// will lead to a valid configuration of @ref GatherLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return CloneBase<InputLayer>(graph, GetBindingId(), GetName());
}
-void InputLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void InputLayer::ValidateTensorShapesFromInputs()
{
- IgnoreUnused(shapeInferenceMethod);
-
//The input layer should already have it's inputs set during graph building phase in the driver/parser.
- ConditionalThrow<LayerValidationException>(GetOutputSlot(0).IsTensorInfoSet(),
+ ConditionalThrow<LayerValidationException>(GetOutputHandler(0).IsTensorInfoSet(),
"InputLayer should already have the TensorInfo set.");
}
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref InputLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return CloneBase<InstanceNormalizationLayer>(graph, m_Param, GetName());
}
-void InstanceNormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void InstanceNormalizationLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "InstanceNormalizationLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "InstanceNormalizationLayer");
}
void InstanceNormalizationLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref InstanceNormalizationLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return CloneBase<L2NormalizationLayer>(graph, m_Param, GetName());
}
-void L2NormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void L2NormalizationLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "L2NormalizationLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "L2NormalizationLayer");
}
void L2NormalizationLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref L2NormalizationLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
layer->SetBackendId(GetBackendId());
layer->SetGuid(GetGuid());
+ layer->SetShapeInferenceMethod(m_ShapeInferenceMethod);
return layer;
}
return CloneBase<LogSoftmaxLayer>(graph, m_Param, GetName());
}
-void LogSoftmaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void LogSoftmaxLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LogSoftmaxLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogSoftmaxLayer");
}
void LogSoftmaxLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref LogSoftmaxLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return outShapes;
}
-void LstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void LstmLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(3, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes( {
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
"LstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LstmLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LstmLayer");
}
else
{
ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
"LstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled.");
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "LstmLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LstmLayer");
}
if (m_Param.m_ProjectionEnabled)
}
ValidateAndCopyShape(
- GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], shapeInferenceMethod, "LstmLayer", 1);
+ GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], m_ShapeInferenceMethod, "LstmLayer", 1);
ValidateAndCopyShape(
- GetOutputSlot(2).GetTensorInfo().GetShape(), inferredShapes[2], shapeInferenceMethod, "LstmLayer", 2);
+ GetOutputSlot(2).GetTensorInfo().GetShape(), inferredShapes[2], m_ShapeInferenceMethod, "LstmLayer", 2);
ValidateAndCopyShape(
- GetOutputSlot(3).GetTensorInfo().GetShape(), inferredShapes[3], shapeInferenceMethod, "LstmLayer", 3);
+ GetOutputSlot(3).GetTensorInfo().GetShape(), inferredShapes[3], m_ShapeInferenceMethod, "LstmLayer", 3);
if (m_Param.m_LayerNormEnabled)
{
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref LstmLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
return std::move(layer);
}
-void MeanLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void MeanLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo();
}
const TensorShape& inferredShape = TensorShape(outputRank, dimSizes.data());
- ValidateAndCopyShape(outputShape, inferredShape, shapeInferenceMethod, "MeanLayer");
+ ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "MeanLayer");
}
void MeanLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref MeanLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return std::make_unique<CopyMemGenericWorkload>(descriptor, PrepInfoAndDesc(descriptor));
}
-void MemCopyLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void MemCopyLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "MemCopyLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemCopyLayer");
}
void MemCopyLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref MemCopyLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return std::make_unique<ImportMemGenericWorkload>(descriptor, PrepInfoAndDesc(descriptor));
}
-void MemImportLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void MemImportLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "MemImportLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemImportLayer");
}
void MemImportLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref MemImportLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return CloneBase<MergeLayer>(graph, GetName());
}
-void MergeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void MergeLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(2, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "MergeLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MergeLayer");
}
std::vector<TensorShape> MergeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref MergeLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// Infers the output shapes from given input shapes.
/// @param [in] inputShapes The input shapes layer has.
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
return CloneBase<NormalizationLayer>(graph, m_Param, GetName());
}
-void NormalizationLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void NormalizationLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "NormalizationLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "NormalizationLayer");
}
void NormalizationLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref NormalizationLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return CloneBase<OutputLayer>(graph, GetBindingId(), GetName());
}
-void OutputLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void OutputLayer::ValidateTensorShapesFromInputs()
{
- IgnoreUnused(shapeInferenceMethod);
// Just validates that the input is connected.
ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr,
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref OutputLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return std::move(layer);
}
-void PadLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void PadLayer::ValidateTensorShapesFromInputs()
{
- IgnoreUnused(shapeInferenceMethod);
return;
}
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref PadLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return std::vector<TensorShape> ({armnnUtils::Permuted(inShape, m_Param.m_DimMappings)});
}
-void PermuteLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void PermuteLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "PermuteLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PermuteLayer");
}
void PermuteLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref PermuteLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
return std::vector<TensorShape>({ tensorShape });
}
-void Pooling2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void Pooling2dLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "Pooling2dLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling2dLayer");
}
void Pooling2dLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref Pooling2dLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
return factory.CreatePreCompiled(descriptor, PrepInfoAndDesc(descriptor));
}
-void PreCompiledLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void PreCompiledLayer::ValidateTensorShapesFromInputs()
{
- IgnoreUnused(shapeInferenceMethod);
// NOTE: since the PreCompiledLayer is an internal layer created from a valid SubgraphView,
// we do not need to validate its input shapes
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
PreCompiledLayer* Clone(Graph &graph) const override;
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void SetPreCompiledObject(PreCompiledObjectPtr preCompiledObject);
return { outputShape };
}
-void PreluLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void PreluLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(2, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes(
{
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "PreluLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PreluLayer");
}
void PreluLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref PreluLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return outShapes;
}
-void QLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void QLstmLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(3, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes(
{
ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
"QLstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QLstmLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QLstmLayer");
}
else
{
ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
"QLstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled.");
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QLstmLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QLstmLayer");
}
if (m_Param.m_ProjectionEnabled)
}
ValidateAndCopyShape(
- GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], shapeInferenceMethod, "QLstmLayer", 1);
+ GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], m_ShapeInferenceMethod, "QLstmLayer", 1);
ValidateAndCopyShape(
- GetOutputSlot(2).GetTensorInfo().GetShape(), inferredShapes[2], shapeInferenceMethod, "QLstmLayer", 2);
+ GetOutputSlot(2).GetTensorInfo().GetShape(), inferredShapes[2], m_ShapeInferenceMethod, "QLstmLayer", 2);
if (m_Param.m_LayerNormEnabled)
{
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref QLstmLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
return clone;
}
-void QuantizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void QuantizeLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QuantizeLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QuantizeLayer");
}
void QuantizeLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
Layer* Clone(Graph& graph) const override;
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return outShapes;
}
-void QuantizedLstmLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void QuantizedLstmLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(3, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes(
{
"QuantizedLstmLayer: m_QuantizedLstmParameters.m_OutputGateBias should not be null.");
// Check output TensorShape(s) match inferred shape
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "QuantizedLstmLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QuantizedLstmLayer");
ValidateAndCopyShape(GetOutputSlot(1).GetTensorInfo().GetShape(),
inferredShapes[1],
- shapeInferenceMethod,
+ m_ShapeInferenceMethod,
"QuantizedLstmLayer",
1);
}
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref QuantizedLstmLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
return clone;
}
-void RankLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void RankLayer::ValidateTensorShapesFromInputs()
{
- IgnoreUnused(shapeInferenceMethod);
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
const TensorShape inferredShape = TensorShape(Dimensionality::Scalar);
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
- ValidateAndCopyShape(outputShape, inferredShape, shapeInferenceMethod, "RankLayer");
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
+ ValidateAndCopyShape(outputShape, inferredShape, m_ShapeInferenceMethod, "RankLayer");
}
void RankLayer::Accept(ILayerVisitor& visitor) const
{
Layer* Clone(Graph& graph) const override;
- void ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return std::vector<TensorShape>({ m_Param.m_TargetShape });
}
-void ReshapeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ReshapeLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ReshapeLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ReshapeLayer");
}
void ReshapeLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ReshapeLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
return std::vector<TensorShape>({ tensorShape });
}
-void ResizeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void ResizeLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "ResizeLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ResizeLayer");
}
void ResizeLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref ResizeLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
return CloneBase<RsqrtLayer>(graph, GetName());
}
-void RsqrtLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void RsqrtLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "RsqrtLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "RsqrtLayer");
}
void RsqrtLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref RsqrtLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return CloneBase<SliceLayer>(graph, m_Param, GetName());
}
-void SliceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SliceLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SliceLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SliceLayer");
}
std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SliceLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
return CloneBase<SoftmaxLayer>(graph, m_Param, GetName());
}
-void SoftmaxLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SoftmaxLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SoftmaxLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SoftmaxLayer");
}
void SoftmaxLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SoftmaxLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return std::vector<TensorShape>({ outputShape });
}
-void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SpaceToBatchNdLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToBatchNdLayer");
}
void SpaceToBatchNdLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SpaceToBatchNdLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return std::vector<TensorShape>({ outputShape });
}
-void SpaceToDepthLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SpaceToDepthLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
std::vector<TensorShape> inferredShapes = InferOutputShapes({
GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SpaceToDepthLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToDepthLayer");
}
void SpaceToDepthLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SpaceToDepthLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return outShapes;
}
-void SplitterLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SplitterLayer::ValidateTensorShapesFromInputs()
{
std::for_each(BeginOutputSlots(), EndOutputSlots(), [&](OutputSlot& outputSlot)
{
- VerifyShapeInferenceType(outputSlot.GetTensorInfo().GetShape(), shapeInferenceMethod);
+ VerifyShapeInferenceType(outputSlot.GetTensorInfo().GetShape(), m_ShapeInferenceMethod);
});
std::vector<TensorShape> views;
{
ValidateAndCopyShape(GetOutputSlot(viewIdx).GetTensorInfo().GetShape(),
inferredShapes[viewIdx],
- shapeInferenceMethod,
+ m_ShapeInferenceMethod,
"SplitterLayer",
viewIdx);
}
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SplitterLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
return std::vector<TensorShape>({ targetShape });
}
-void StackLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void StackLayer::ValidateTensorShapesFromInputs()
{
// Validates Stack layer.
ConditionalThrowIfNotEqual<LayerValidationException>(
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
// Constructs and validates input shapes
std::vector<TensorShape> inputShapes;
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "StackLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StackLayer");
}
void StackLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref StackLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// By default returns inputShapes if the number of inputs are equal to number of outputs,
/// otherwise infers the output shapes from given input shapes and layer properties.
throw Exception("Stand in layer does not support infering output shapes");
}
-void StandInLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void StandInLayer::ValidateTensorShapesFromInputs()
{
- IgnoreUnused(shapeInferenceMethod);
// Cannot validate this layer since no implementation details can be known by the framework
// so do nothing here.
//
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
/// Check if the input tensor shape(s)
/// Does nothing since cannot validate any properties of this layer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// Empty implementation that throws Exception if called.
/// otherwise infers the output shapes from given input shapes and layer properties.
TensorShape(boost::numeric_cast<unsigned int>(outputShape.size()), &outputShape[0]) });
}
-void StridedSliceLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void StridedSliceLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape()});
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "StridedSliceLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StridedSliceLayer");
}
void StridedSliceLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref StridedSliceLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
return CloneBase<SwitchLayer>(graph, GetName());
}
-void SwitchLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void SwitchLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(2, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
ARMNN_ASSERT_MSG(GetNumOutputSlots() == 2, "SwitchLayer: The layer should return 2 outputs.");
ARMNN_ASSERT(inferredShapes.size() == 2);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "SwitchLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SwitchLayer");
ValidateAndCopyShape(
- GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], shapeInferenceMethod, "SwitchLayer", 1);
+ GetOutputSlot(1).GetTensorInfo().GetShape(), inferredShapes[1], m_ShapeInferenceMethod, "SwitchLayer", 1);
}
void SwitchLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref SwitchLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
return std::vector<TensorShape>({ tensorShape });
}
-void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
ARMNN_ASSERT(expectedOutputShape.size() == 1);
- ValidateAndCopyShape(outputShape, expectedOutputShape[0], shapeInferenceMethod, "TransposeConvolution2dLayer");
+ ValidateAndCopyShape(outputShape, expectedOutputShape[0], m_ShapeInferenceMethod, "TransposeConvolution2dLayer");
}
Layer::ConstantTensors TransposeConvolution2dLayer::GetConstantTensorsByRef()
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref TransposeConvolution2dLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// Infers the output shapes from given input shapes and layer properties.
/// @param [in] inputShapes The input shapes the layer has.
return std::vector<TensorShape> ({armnnUtils::TransposeTensorShape(inShape, m_Param.m_DimMappings)});
}
-void TransposeLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+void TransposeLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(1, CHECK_LOCATION());
const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
- VerifyShapeInferenceType(outputShape, shapeInferenceMethod);
+ VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
ARMNN_ASSERT(inferredShapes.size() == 1);
- ValidateAndCopyShape(outputShape, inferredShapes[0], shapeInferenceMethod, "TransposeLayer");
+ ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "TransposeLayer");
}
void TransposeLayer::Accept(ILayerVisitor& visitor) const
//
-// Copyright © 2020 Arm Ltd. All rights reserved.
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref TransposeLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
- void ValidateTensorShapesFromInputs(
- ShapeInferenceMethod shapeInferenceMethod = ShapeInferenceMethod::ValidateOnly) override;
+ void ValidateTensorShapesFromInputs() override;
/// Infers the output shapes from given input shapes and the permutation vector.
/// @param [in] inputShapes The input shapes layer has.
const unsigned int outputSize = layer->GetNumOutputSlots();
- const auto runTestWithMask = [&](const bool maskPermutations[], ShapeInferenceMethod shapeInferenceMethod)
+ const auto runTestWithMask = [&](const bool maskPermutations[])
{
for (unsigned int i = 0; i < outputSize; ++i)
{
DataType::Float32});
}
- layer->ValidateTensorShapesFromInputs(shapeInferenceMethod);
+ layer->ValidateTensorShapesFromInputs();
for (unsigned int i = 0; i < outputSize; ++i)
{
layer->GetOutputSlot(j).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32});
}
- BOOST_CHECK_THROW(
- layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly), LayerValidationException);
+ layer->SetShapeInferenceMethod(ShapeInferenceMethod::ValidateOnly);
- layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::InferAndValidate);
+ BOOST_CHECK_THROW(layer->ValidateTensorShapesFromInputs(), LayerValidationException);
+
+ layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
+ layer->ValidateTensorShapesFromInputs();
for (unsigned int i = 0; i < outputSize; ++i)
{
}
// Test inference with Dimensionality::Specified and various combinations of dimensions of unknown size
- for (unsigned int i = 0; i <= numDimensions[0]; ++i)
+ for (unsigned int i = 0; i < numDimensions[0]; ++i)
{
- runTestWithMask(maskPermutations[i], ShapeInferenceMethod::InferAndValidate);
+ runTestWithMask(maskPermutations[i]);
}
// maskPermutations[5] equates to all dimensions being known
- runTestWithMask(maskPermutations[5], ShapeInferenceMethod::ValidateOnly);
-
- BOOST_CHECK_THROW(
- runTestWithMask(maskPermutations[5], ShapeInferenceMethod::InferAndValidate), LayerValidationException);
+ runTestWithMask(maskPermutations[5]);
}
template<typename LayerT, typename... Args>
const std::vector<std::initializer_list<unsigned int>> dimensionSizeLists,
Args &&... args)
{
- Graph graph;
+ Graph graph(true);
auto layer = BuildGraph<LayerT>(&graph, inputShapes, std::forward<Args>(args)...);
RunShapeInferenceTest<LayerT>(layer, dimensionSizeLists);
}
+BOOST_AUTO_TEST_CASE(NetworkOptionsTest)
+{
+ BackendOptions ShapeInferenceMethodOption("ShapeInferenceMethod",
+ {
+ { "InferAndValidate", true }
+ });
+
+ INetworkPtr network = INetwork::Create({ShapeInferenceMethodOption});
+ TensorInfo tensorInfo({ 5, 7, 6, 2 }, DataType::Float32);
+
+ auto inputLayer = network->AddInputLayer(1, "inputLayer");
+ inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ ActivationDescriptor descriptor;
+ descriptor.m_Function = ActivationFunction::Abs;
+ auto activationLayer = network->AddActivationLayer(descriptor, "activation");
+
+ inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
+ activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
+
+ BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
+
+ BOOST_CHECK(activationLayer->GetOutputSlot(0).GetTensorInfo() == tensorInfo);
+
+
+ ShapeInferenceMethodOption = BackendOptions("ShapeInferenceMethod",
+ {
+ { "InferAndValidate", false }
+ });
+
+ network = INetwork::Create({ShapeInferenceMethodOption});
+
+ inputLayer = network->AddInputLayer(1, "inputLayer");
+ inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ activationLayer = network->AddActivationLayer(descriptor, "activation");
+
+ inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
+ activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
+
+ BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
+
+ network = INetwork::Create();
+
+ inputLayer = network->AddInputLayer(1, "inputLayer");
+ inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ activationLayer = network->AddActivationLayer(descriptor, "activation");
+
+ inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
+ activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
+
+ BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
+}
+
BOOST_AUTO_TEST_CASE(AbsTest)
{
ActivationDescriptor descriptor;
layer->GetOutputSlot(0).SetTensorInfo({{1, 1, 3, 3}, DataType::Float32});
- layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly);
+ layer->ValidateTensorShapesFromInputs();
BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == outputShape);
}
layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32});
BOOST_CHECK_THROW(
- layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly), LayerValidationException);
+ layer->ValidateTensorShapesFromInputs(), LayerValidationException);
+
+ layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
- layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::InferAndValidate);
+ layer->ValidateTensorShapesFromInputs();
BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::Scalar), DataType::Float32});
- layer->ValidateTensorShapesFromInputs(ShapeInferenceMethod::ValidateOnly);
+ layer->ValidateTensorShapesFromInputs();
BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
}