namespace armnn
{
+/// An ActivationDescriptor for the ActivationLayer.
struct ActivationDescriptor
{
ActivationDescriptor() : m_Function(ActivationFunction::Sigmoid), m_A(0), m_B(0) {};
+ /// @brief The activation function to use
+ /// (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square).
ActivationFunction m_Function;
+ /// Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH).
float m_A;
+ /// Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
float m_B;
};
+/// A PermuteDescriptor for the PermuteLayer.
struct PermuteDescriptor
{
PermuteDescriptor()
: m_DimMappings(dimMappings)
{
}
-
+ /// @brief Indicates how to translate tensor elements from a given source into the target destination, when
+ /// source and target potentially have different memory layouts e.g. {0U, 3U, 1U, 2U}.
PermutationVector m_DimMappings;
};
+/// A SoftmaxDescriptor for the SoftmaxLayer.
struct SoftmaxDescriptor
{
SoftmaxDescriptor() : m_Beta(1.0f) {};
-
+ /// Exponentiation value.
float m_Beta;
};
-
+/// @brief An OriginsDescriptor for the MergerLayer.
+/// Descriptor to configure the merging process. Number of views must be equal to the number of inputs, and
+/// their order must match - e.g. first view corresponds to the first input, second view to the second input, etc.
struct OriginsDescriptor
{
OriginsDescriptor();
OriginsDescriptor& operator=(OriginsDescriptor rhs);
+ /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
+ /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
+ /// If the coord is greater than or equal to GetNumViews(), then the coord argument is out of range.
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
+ /// Get the number of views.
uint32_t GetNumViews() const;
+ /// Get the number of dimensions.
uint32_t GetNumDimensions() const;
+ /// Return the view origin at the int value idx.
const uint32_t* GetViewOrigin(uint32_t idx) const;
+ /// @brief Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
+ /// The number of views must match number of elements in the new ordering array.
void ReorderOrigins(unsigned int* newOrdering, unsigned int numNewOrdering);
+ /// Swap the ViewsDescriptor values first and second.
friend void swap(OriginsDescriptor& first, OriginsDescriptor& second);
+ /// Set the concatenation axis value.
void SetConcatAxis(unsigned int concatAxis);
+ /// Get the concatenation axis value.
unsigned int GetConcatAxis() const;
private:
uint32_t** m_ViewOrigins;
};
+/// @brief A ViewsDescriptor for the SplitterLayer.
+/// Descriptor to configure the splitting process. Number of Views must be equal to the number of outputs, and
+/// their order must match - e.g. first view corresponds to the first output, second view to the second output, etc.
struct ViewsDescriptor
{
ViewsDescriptor(uint32_t numViews, uint32_t numDimensions = 4);
ViewsDescriptor& operator=(ViewsDescriptor rhs);
+ /// @Brief Set the view origin coordinates. The arguments are: view, dimension, value.
+ /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
+ /// If the coord is greater than or equal to GetNumViews(), then the coord argument is out of range.
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value);
+ /// @brief Set the size of the views. The arguments are: view, dimension, value.
+ /// If the view is greater than or equal to GetNumViews(), then the view argument is out of range.
+ /// If the coord is greater than or equal to GetNumViews(), then the coord argument is out of range.
Status SetViewSize(uint32_t view, uint32_t coord, uint32_t value);
+ /// Get the number of views.
uint32_t GetNumViews() const;
+ /// Get the number of dimensions.
uint32_t GetNumDimensions() const;
+ /// Get the view origin at the int value idx.
const uint32_t* GetViewOrigin(uint32_t idx) const;
+ /// Get the view sizes at the int value idx.
const uint32_t* GetViewSizes(uint32_t idx) const;
+ /// Swap the ViewsDescriptor value first and second.
friend void swap(ViewsDescriptor& first, ViewsDescriptor& second);
private:
OriginsDescriptor m_Origins;
uint32_t** m_ViewSizes;
};
-/// Convenience template to create an OriginsDescriptor to use when creating a Merger layer for performing concatenation
-/// of a number of input tensors
+/// @brief Convenience template to create an OriginsDescriptor to use when creating a MergerLayer for performing
+/// concatenation of a number of input tensors.
template <typename TensorShapeIt>
OriginsDescriptor CreateMergerDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last,
unsigned int concatenationDimension)
return viewsDescriptor;
}
+/// A Pooling2dDescriptor for the Pooling2dLayer.
struct Pooling2dDescriptor
{
Pooling2dDescriptor()
, m_DataLayout(DataLayout::NCHW)
{};
+ /// The pooling algorithm to use (Max. Average, L2).
PoolingAlgorithm m_PoolType;
+ /// Padding left value in the width dimension.
uint32_t m_PadLeft;
+ /// Padding right value in the width dimension.
uint32_t m_PadRight;
+ /// Padding top value in the height dimension.
uint32_t m_PadTop;
+ /// Padding bottom value in the height dimension.
uint32_t m_PadBottom;
+ /// Pooling width value.
uint32_t m_PoolWidth;
+ /// Pooling height value.
uint32_t m_PoolHeight;
+ /// Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX;
+ /// Stride value when proceeding through input for the height dimension.
uint32_t m_StrideY;
+ /// The rounding method for the output shape. (Floor, Ceiling).
OutputShapeRounding m_OutputShapeRounding;
+ /// The padding method to be used. (Exclude, IgnoreValue).
PaddingMethod m_PaddingMethod;
+ /// The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout;
};
+/// A FullyConnectedDescriptor for the FullyConnectedLayer.
struct FullyConnectedDescriptor
{
FullyConnectedDescriptor()
, m_TransposeWeightMatrix(false)
{};
+ /// Enable/disable bias.
bool m_BiasEnabled;
+ /// Enable/disable transpose weight matrix.
bool m_TransposeWeightMatrix;
};
+/// A Convolution2dDescriptor for the Convolution2dLayer.
struct Convolution2dDescriptor
{
Convolution2dDescriptor()
, m_DataLayout(DataLayout::NCHW)
{};
+ /// Padding left value in the width dimension.
uint32_t m_PadLeft;
+ /// Padding right value in the width dimension.
uint32_t m_PadRight;
+ /// Padding top value in the height dimension.
uint32_t m_PadTop;
+ /// Padding bottom value in the height dimension.
uint32_t m_PadBottom;
+ /// Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX;
+ /// Stride value when proceeding through input for the height dimension.
uint32_t m_StrideY;
+ /// Enable/disable bias.
bool m_BiasEnabled;
+ /// The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout;
};
+/// A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
struct DepthwiseConvolution2dDescriptor
{
DepthwiseConvolution2dDescriptor()
, m_DataLayout(DataLayout::NCHW)
{}
+ /// Padding left value in the width dimension.
uint32_t m_PadLeft;
+ /// Padding right value in the width dimension.
uint32_t m_PadRight;
+ /// Padding top value in the height dimension.
uint32_t m_PadTop;
+ /// Padding bottom value in the height dimension.
uint32_t m_PadBottom;
+ /// Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX;
+ /// Stride value when proceeding through input for the height dimension.
uint32_t m_StrideY;
+ /// Enable/disable bias.
bool m_BiasEnabled;
+ /// The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout;
};
-
+/// A NormalizationDescriptor for the NormalizationLayer.
struct NormalizationDescriptor
{
NormalizationDescriptor()
, m_DataLayout(DataLayout::NCHW)
{}
+ /// Normalization channel algorithm to use (Across, Within).
NormalizationAlgorithmChannel m_NormChannelType;
+ /// Normalization method algorithm to use (LocalBrightness, LocalContrast).
NormalizationAlgorithmMethod m_NormMethodType;
+ /// Depth radius value.
uint32_t m_NormSize;
+ /// Alpha value for the normalization equation.
float m_Alpha;
+ /// Beta value for the normalization equation.
float m_Beta;
+ /// Kappa value used for the across channel normalization equation.
float m_K;
+ /// The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout;
};
+/// A L2NormalizationDescriptor for the L2NormalizationLayer.
struct L2NormalizationDescriptor
{
L2NormalizationDescriptor()
: m_DataLayout(DataLayout::NCHW)
{}
+ /// The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout;
};
+/// A BatchNormalizationDescriptor for the BatchNormalizationLayer.
struct BatchNormalizationDescriptor
{
BatchNormalizationDescriptor()
, m_DataLayout(DataLayout::NCHW)
{}
+ /// Value to add to the variance. Used to avoid dividing by zero.
float m_Eps;
+ /// The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout;
};
+/// A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
struct BatchToSpaceNdDescriptor
{
BatchToSpaceNdDescriptor()
, m_DataLayout(DataLayout::NCHW)
{}
+ /// Block shape values.
std::vector<unsigned int> m_BlockShape;
+ /// The values to crop from the input dimension.
std::vector<std::pair<unsigned int, unsigned int>> m_Crops;
+ /// The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout;
};
+/// A FakeQuantizationDescriptor for the FakeQuantizationLayer.
struct FakeQuantizationDescriptor
{
FakeQuantizationDescriptor()
, m_Max(6.0f)
{}
+ /// Minimum value.
float m_Min;
+ /// Maximum value.
float m_Max;
};
+/// A ResizeBilinearDescriptor for the ResizeBilinearLayer.
struct ResizeBilinearDescriptor
{
ResizeBilinearDescriptor()
, m_DataLayout(DataLayout::NCHW)
{}
+ /// Target width value.
uint32_t m_TargetWidth;
+ /// Target height value.
uint32_t m_TargetHeight;
+ /// The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout;
};
+/// A ReshapeDescriptor for the ReshapeLayer.
struct ReshapeDescriptor
{
ReshapeDescriptor()
: m_TargetShape(shape)
{}
+ /// Target shape value.
TensorShape m_TargetShape;
};
+/// A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
struct SpaceToBatchNdDescriptor
{
SpaceToBatchNdDescriptor()
, m_DataLayout(DataLayout::NCHW)
{}
+ /// Block shape value.
std::vector<unsigned int> m_BlockShape;
+ /// @brief Specifies the padding values for the input dimension:
+ /// heightPad{top, bottom} widthPad{left, right}.
std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
+ /// The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout;
};
-// temporary descriptor for Lstm
+/// An LstmDescriptor for the LstmLayer.
struct LstmDescriptor
{
LstmDescriptor()
, m_ProjectionEnabled(false)
{}
+ /// @brief The activation function to use.
+ /// 0: None, 1: Relu, 3: Relu6, 4: Tanh, 6: Sigmoid.
uint32_t m_ActivationFunc;
+ /// Clipping threshold value for the cell state.
float m_ClippingThresCell;
+ /// Clipping threshold value for the projection.
float m_ClippingThresProj;
+ /// Enable/disable cifg (coupled input & forget gate).
bool m_CifgEnabled;
+ /// Enable/disable peephole.
bool m_PeepholeEnabled;
+ /// Enable/disable the projection layer.
bool m_ProjectionEnabled;
};
+/// A MeanDescriptor for the MeanLayer.
struct MeanDescriptor
{
MeanDescriptor()
, m_KeepDims(keepDims)
{}
+ /// Values for the dimensions to reduce.
std::vector<unsigned int> m_Axis;
+ /// Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
bool m_KeepDims;
};
+/// A PadDescriptor for the PadLayer.
struct PadDescriptor
{
PadDescriptor()
: m_PadList(padList)
{}
- // first is number of values to add before the tensor in the dimension,
- // second is the number of values to add after the tensor in the dimension
- // the number of pairs should match the number of dimensions in the input tensor.
+ /// @brief Specifies the padding for input dimension.
+ /// First is the number of values to add before the tensor in the dimension.
+ /// Second is the number of values to add after the tensor in the dimension.
+ /// The number of pairs should match the number of dimensions in the input tensor.
std::vector<std::pair<unsigned int, unsigned int>> m_PadList;
};
+/// A StridedSliceDescriptor for the StridedSliceLayer.
struct StridedSliceDescriptor
{
StridedSliceDescriptor(const std::vector<int>& begin,
unsigned int axis,
int startForAxis) const;
+ /// Begin values for the input that will be sliced.
std::vector<int> m_Begin;
+ /// End values for the input that will be sliced.
std::vector<int> m_End;
+ /// Stride values for the input that will be sliced.
std::vector<int> m_Stride;
+ /// @brief Begin mask value. If set, then the begin is disregarded and the fullest
+ /// range is used for the dimension.
int32_t m_BeginMask;
+ /// @brief End mask value. If set, then the end is disregarded and the fullest range
+ /// is used for the dimension.
int32_t m_EndMask;
+ /// Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
int32_t m_ShrinkAxisMask;
+ /// Ellipsis mask value.
int32_t m_EllipsisMask;
+ /// @brief New axis mask value. If set, the begin, end and stride is disregarded and
+ /// a new 1 dimension is inserted to this location of the output tensor.
int32_t m_NewAxisMask;
+ /// The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout;
};
+/// A DebugDescriptor for the DebugLayer.
struct DebugDescriptor
{
DebugDescriptor()
, m_SlotIndex(index)
{}
+ /// The name of the debug layer.
std::string m_LayerName;
+ /// The slot index of the debug layer.
unsigned int m_SlotIndex;
};
namespace armnn
{
-
+/// This layer represents an activation operation with the specified activation function.
class ActivationLayer : public LayerWithParameters<ActivationDescriptor>
{
public:
+ /// Makes a workload for the Activation type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
ActivationLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s) will lead to a valid configuration of @ref ActivationLayer.
void ValidateTensorShapesFromInputs() override;
protected:
+ /// Constructor to create an ActivationLayer.
+ /// @param [in] param ActivationDescriptor to configure the activation operation.
+ /// @param [in] name Optional name for the layer.
ActivationLayer(const ActivationDescriptor ¶m, const char* name);
+
+ /// Default destructor
~ActivationLayer() = default;
};
namespace armnn
{
-
+/// This layer represents an addition operation.
class AdditionLayer : public ElementwiseBaseLayer
{
public:
+ /// Makes a workload for the Addition type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
AdditionLayer* Clone(Graph& graph) const override;
protected:
+ /// Constructor to create an AdditionLayer.
+ /// @param [in] name Optional name for the layer.
AdditionLayer(const char* name);
+
+ /// Default destructor
~AdditionLayer() = default;
};
class ScopedCpuTensorHandle;
+/// This layer represents a batch normalization operation.
class BatchNormalizationLayer : public LayerWithParameters<BatchNormalizationDescriptor>
{
public:
+ /// A unique pointer to store Mean values
std::unique_ptr<ScopedCpuTensorHandle> m_Mean;
+ /// A unique pointer to store Variance values
std::unique_ptr<ScopedCpuTensorHandle> m_Variance;
+ /// A unique pointer to store Beta values
std::unique_ptr<ScopedCpuTensorHandle> m_Beta;
+ /// A unique pointer to store Gamma values
std::unique_ptr<ScopedCpuTensorHandle> m_Gamma;
+ /// Makes a workload for the BatchNormalization type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
BatchNormalizationLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref BatchNormalizationLayer.
void ValidateTensorShapesFromInputs() override;
protected:
+ /// Constructor to create a BatchNormalizationLayer.
+ /// @param [in] param BatchNormalizationDescriptor to configure the batch normalization operation.
+ /// @param [in] name Optional name for the layer.
BatchNormalizationLayer(const BatchNormalizationDescriptor& param, const char* name);
+
+ /// Default destructor
~BatchNormalizationLayer() = default;
+ /// Retrieve the handles to the constant values stored by the layer.
+ /// @return A vector of the constant tensors stored by this layer.
ConstantTensors GetConstantTensorsByRef() override;
};
namespace armnn
{
+/// This layer represents a BatchToSpaceNd operation.
class BatchToSpaceNdLayer : public LayerWithParameters<BatchToSpaceNdDescriptor>
{
public:
+ /// Makes a workload for the BatchToSpaceNd type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
BatchToSpaceNdLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref BatchToSpaceNdLayer.
void ValidateTensorShapesFromInputs() override;
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
protected:
+ /// Constructor to create a BatchToSpaceNdLayer.
+ /// @param [in] param BatchToSpaceNdDescriptor to configure the BatchToSpaceNd operation.
+ /// @param [in] name Optional name for the layer.
BatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& param, const char* name);
+
+ /// Default destructor
~BatchToSpaceNdLayer() = default;
};
class ScopedCpuTensorHandle;
+/// A layer that the constant data can be bound to.
class ConstantLayer : public Layer
{
public:
+ /// Makes a workload for the Constant type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
ConstantLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref ConstantLayer
void ValidateTensorShapesFromInputs() override;
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return a vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
- // Free up the constant source data
+ /// Free up the constant source data stored by the layer.
void ReleaseConstantData() override {};
std::unique_ptr<ScopedCpuTensorHandle> m_LayerOutput;
protected:
+ /// Constructor to create a ConstantLayer.
+ /// @param [in] name Optional name for the layer.
ConstantLayer(const char* name);
+
+ /// Default destructor
~ConstantLayer() = default;
+ /// Retrieve the handles to the constant values stored by the layer.
ConstantTensors GetConstantTensorsByRef() override { return {m_LayerOutput}; }
};
namespace armnn
{
+/// This layer converts data type Float 16 to Float 32.
class ConvertFp16ToFp32Layer : public Layer
{
public:
+ /// Makes a workload for the ConvertFp16ToFp32 type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
ConvertFp16ToFp32Layer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref ConvertFp16ToFp32Layer.
void ValidateTensorShapesFromInputs() override;
protected:
+ /// Constructor to create a ConvertFp16ToFp32Layer.
+ /// @param [in] name Optional name for the layer.
ConvertFp16ToFp32Layer(const char* name);
+
+ /// Default destructor
~ConvertFp16ToFp32Layer() = default;
};
namespace armnn
{
+/// This layer converts data type Float 32 to Float 16.
class ConvertFp32ToFp16Layer : public Layer
{
public:
+ /// Makes a workload for the ConvertFp32ToFp16 type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
ConvertFp32ToFp16Layer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref ConvertFp32ToFp16Layer.
void ValidateTensorShapesFromInputs() override;
protected:
+ /// Constructor to create a ConvertFp32ToFp16Layer.
+ /// @param [in] name Optional name for the layer.
ConvertFp32ToFp16Layer(const char* name);
+
+ /// Default destructor
~ConvertFp32ToFp16Layer() = default;
};
class ScopedCpuTensorHandle;
+/// This layer represents a convolution 2d operation.
class Convolution2dLayer : public LayerWithParameters<Convolution2dDescriptor>
{
public:
+ /// A unique pointer to store Weight values.
std::unique_ptr<ScopedCpuTensorHandle> m_Weight;
+ /// A unique pointer to store Bias values.
std::unique_ptr<ScopedCpuTensorHandle> m_Bias;
+ /// Makes a workload for the Convolution2d type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
Convolution2dLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref Convolution2dLayer.
void ValidateTensorShapesFromInputs() override;
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
protected:
+ /// Constructor to create a Convolution2dLayer.
+ /// @param [in] param Convolution2dDescriptor to configure the convolution2d operation.
+ /// @param [in] name Optional name for the layer.
Convolution2dLayer(const Convolution2dDescriptor& param, const char* name);
+
+ /// Default destructor
~Convolution2dLayer() = default;
+ /// Retrieve the handles to the constant values stored by the layer.
+ /// @return A vector of the constant tensors stored by this layer.
ConstantTensors GetConstantTensorsByRef() override;
};
namespace armnn
{
+/// This layer visualizes the data flowing through the network.
class DebugLayer : public LayerWithParameters<DebugDescriptor>
{
public:
+ /// Makes a workload for the Debug type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
DebugLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref DebugLayer.
void ValidateTensorShapesFromInputs() override;
protected:
+ /// Constructor to create a DebugLayer.
+ /// @param [in] param DebugDescriptor to configure the debug layer.
+ /// @param [in] name Optional name for the layer.
DebugLayer(const DebugDescriptor& param, const char* name);
+
+ /// Default destructor
~DebugLayer() = default;
};
class ScopedCpuTensorHandle;
+/// This layer represents a depthwise convolution 2d operation.
class DepthwiseConvolution2dLayer : public LayerWithParameters<DepthwiseConvolution2dDescriptor>
{
public:
+ /// A unique pointer to store Weight values.
std::unique_ptr<ScopedCpuTensorHandle> m_Weight;
+ /// A unique pointer to store Bias values.
std::unique_ptr<ScopedCpuTensorHandle> m_Bias;
+ /// Makes a workload for the DepthwiseConvolution2d type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
DepthwiseConvolution2dLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref DepthwiseConvolution2dLayer.
void ValidateTensorShapesFromInputs() override;
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
protected:
+ /// Constructor to create a DepthwiseConvolution2dLayer.
+ /// @param [in] param DepthwiseConvolution2dDescriptor to configure the depthwise convolution2d.
+ /// @param [in] name Optional name for the layer.
DepthwiseConvolution2dLayer(const DepthwiseConvolution2dDescriptor& param, const char* name);
+
+ /// Default destructor
~DepthwiseConvolution2dLayer() = default;
+ /// Retrieve the handles to the constant values stored by the layer.
+ /// @return A vector of the constant tensors stored by this layer.
ConstantTensors GetConstantTensorsByRef() override;
};
namespace armnn
{
+/// This layer represents a division operation.
class DivisionLayer : public ElementwiseBaseLayer
{
public:
+ /// Makes a workload for the Division type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
DivisionLayer* Clone(Graph& graph) const override;
protected:
+ /// Constructor to create a DivisionLayer.
+ /// @param [in] name Optional name for the layer.
DivisionLayer(const char* name);
+
+ /// Default destructor
~DivisionLayer() = default;
};
namespace armnn
{
-/// NOTE: this is an abstract class, it does not implement:
-/// std::unique_ptr<IWorkload> Layer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const = 0;
-/// Layer* Clone(Graph& graph) const = 0;
+/// NOTE: this is an abstract class to encapsulate the element wise operations, it does not implement:
+/// std::unique_ptr<IWorkload> Layer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const = 0;
+/// Layer* Clone(Graph& graph) const = 0;
class ElementwiseBaseLayer : public Layer
{
public:
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of the element wise operation.
void ValidateTensorShapesFromInputs() override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
protected:
+ /// @param numInputSlots The number of input slots for the layer.
+ /// @param numOutputSlots The number of output slots for the layer.
+ /// @param type The layer type.
+ /// @param name Optional name for the layer.
ElementwiseBaseLayer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name);
+
+ /// Default destructor
~ElementwiseBaseLayer() = default;
};
namespace armnn
{
-
+/// This layer represents an equal operation.
class EqualLayer : public ElementwiseBaseLayer
{
public:
+ /// Makes a workload for the Equal type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
EqualLayer* Clone(Graph& graph) const override;
protected:
+ /// Constructor to create a EqualLayer.
+ /// @param [in] name Optional name for the layer.
EqualLayer(const char* name);
+
+ /// Default destructor
~EqualLayer() = default;
};
namespace armnn
{
+/// This layer represents a fake quantization operation.
class FakeQuantizationLayer : public LayerWithParameters<FakeQuantizationDescriptor>
{
public:
+ /// Makes a workload for the FakeQuantization type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
FakeQuantizationLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref FakeQuantizationLayer.
void ValidateTensorShapesFromInputs() override;
protected:
+ /// Constructor to create a FakeQuantizationLayer.
+ /// @param [in] param FakeQuantizationDescriptor to configure the fake quantization operation.
+ /// @param [in] name Optional name for the layer.
FakeQuantizationLayer(const FakeQuantizationDescriptor& descriptor, const char* name);
+
+ /// Default destructor
~FakeQuantizationLayer() = default;
};
namespace armnn
{
+/// This layer represents a floor operation.
class FloorLayer : public Layer
{
public:
+ /// Makes a workload for the Floor type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
FloorLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref FloorLayer.
void ValidateTensorShapesFromInputs() override;
protected:
+ /// Constructor to create a FloorLayer.
+ /// @param [in] name Optional name for the layer.
FloorLayer(const char* name);
+
+ /// Default destructor
~FloorLayer() = default;
};
class ScopedCpuTensorHandle;
+/// This layer represents a fully connected operation.
class FullyConnectedLayer : public LayerWithParameters<FullyConnectedDescriptor>
{
public:
+ /// A unique pointer to store Weight values.
std::unique_ptr<ScopedCpuTensorHandle> m_Weight;
+ /// A unique pointer to store Bias values.
std::unique_ptr<ScopedCpuTensorHandle> m_Bias;
+ /// Makes a workload for the FullyConnected type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
FullyConnectedLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref FullyConnectedLayer.
void ValidateTensorShapesFromInputs() override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
protected:
+ /// Constructor to create a FullyConnectedLayer.
+ /// @param [in] param FullyConnectedDescriptor to configure the fully connected operation.
+ /// @param [in] name Optional name for the layer.
FullyConnectedLayer(const FullyConnectedDescriptor& param, const char* name);
+
+ /// Default destructor
~FullyConnectedLayer() = default;
+ /// Retrieve the handles to the constant values stored by the layer.
+ /// @return A vector of the constant tensors stored by this layer.
ConstantTensors GetConstantTensorsByRef() override;
};
namespace armnn
{
+/// This layer represents a greater operation.
class GreaterLayer : public ElementwiseBaseLayer
{
public:
+ /// Makes a workload for the Greater type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
GreaterLayer* Clone(Graph& graph) const override;
protected:
+ /// Constructor to create a GreaterLayer.
+ /// @param [in] name Optional name for the layer.
GreaterLayer(const char* name);
+
+ /// Default destructor
~GreaterLayer() = default;
};
namespace armnn
{
+/// A layer user-provided data can be bound to (e.g. inputs, outputs).
class InputLayer : public BindableLayer
{
public:
+ /// Makes a workload for the Input type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
InputLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref InputLayer.
void ValidateTensorShapesFromInputs() override;
protected:
+ /// Constructor to create an InputLayer.
+ /// @param id The layer binding id number.
+ /// @param name Optional name for the layer.
InputLayer(LayerBindingId id, const char* name);
+
+ /// Default destructor
~InputLayer() = default;
};
namespace armnn
{
+/// This layer represents a L2 normalization operation.
class L2NormalizationLayer : public LayerWithParameters<L2NormalizationDescriptor>
{
public:
+ /// Makes a workload for the L2Normalization type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
L2NormalizationLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref L2NormalizationLayer.
void ValidateTensorShapesFromInputs() override;
protected:
+ /// Constructor to create a L2NormalizationLayer.
+ /// @param [in] param L2NormalizationDescriptor to configure the L2 normalization operation.
+ /// @param [in] name Optional name for the layer.
L2NormalizationLayer(const L2NormalizationDescriptor& param, const char* name);
+
+ /// Default destructor
~L2NormalizationLayer() = default;
};
struct LstmOptCifgParameters
{
+ /// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
std::unique_ptr<ScopedCpuTensorHandle> m_InputToInputWeights;
+ /// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToInputWeights;
+ /// A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::unique_ptr<ScopedCpuTensorHandle> m_CellToInputWeights;
+ /// A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::unique_ptr<ScopedCpuTensorHandle> m_InputGateBias;
};
struct LstmOptProjectionParameters
{
+ /// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionWeights;
+ /// A unique pointer to represent 1D weights tensor with dimensions [output_size].
std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionBias;
};
struct LstmOptPeepholeParameters
{
+ /// A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::unique_ptr<ScopedCpuTensorHandle> m_CellToForgetWeights;
+ /// A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::unique_ptr<ScopedCpuTensorHandle> m_CellToOutputWeights;
};
struct LstmBasicParameters
{
+ /// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
std::unique_ptr<ScopedCpuTensorHandle> m_InputToForgetWeights;
+ /// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
std::unique_ptr<ScopedCpuTensorHandle> m_InputToCellWeights;
+ /// A unique pointer to represent 2D weights tensor with dimensions [input_size, num_units].
std::unique_ptr<ScopedCpuTensorHandle> m_InputToOutputWeights;
+ /// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToForgetWeights;
+ /// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToCellWeights;
+ /// A unique pointer to represent 2D weights tensor with dimensions [output_size, num_units].
std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToOutputWeights;
+ /// A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::unique_ptr<ScopedCpuTensorHandle> m_ForgetGateBias;
+ /// A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::unique_ptr<ScopedCpuTensorHandle> m_CellBias;
+ /// A unique pointer to represent 1D weights tensor with dimensions [num_units].
std::unique_ptr<ScopedCpuTensorHandle> m_OutputGateBias;
};
+/// This layer represents a LSTM operation.
class LstmLayer : public LayerWithParameters<LstmDescriptor>
{
public:
LstmOptProjectionParameters m_ProjectionParameters;
LstmOptPeepholeParameters m_PeepholeParameters;
+ /// Makes a workload for the LSTM type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
LstmLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref LstmLayer.
void ValidateTensorShapesFromInputs() override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
protected:
+ /// Constructor to create a LstmLayer.
+ /// @param [in] param LstmDescriptor to configure the lstm operation.
+ /// @param [in] name Optional name for the layer.
LstmLayer(const LstmDescriptor& param, const char* name);
+
+ /// Default destructor
~LstmLayer() = default;
+ /// Retrieve the handles to the constant values stored by the layer.
+ /// @return A vector of the constant tensors stored by this layer.
Layer::ConstantTensors GetConstantTensorsByRef() override;
};
namespace armnn
{
+/// This layer represents a maximum operation.
class MaximumLayer : public ElementwiseBaseLayer
{
public:
+ /// Makes a workload for the Maximum type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
MaximumLayer* Clone(Graph& graph) const override;
protected:
+ /// Constructor to create a MaximumLayer.
+ /// @param [in] name Optional name for the layer.
MaximumLayer(const char* name);
+ /// Default destructor
~MaximumLayer() = default;
};
namespace armnn
{
+/// This layer represents a mean operation.
class MeanLayer : public LayerWithParameters<MeanDescriptor>
{
public:
+ /// Makes a workload for the Mean type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
MeanLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref MeanLayer.
void ValidateTensorShapesFromInputs() override;
protected:
+ /// Constructor to create a MeanLayer.
+ /// @param [in] param MeanDescriptor to configure the mean operation.
+ /// @param [in] name Optional name for the layer.
MeanLayer(const MeanDescriptor& param, const char* name);
+
+ /// Default destructor
~MeanLayer() = default;
};
namespace armnn
{
+/// This layer represents a memory copy operation.
class MemCopyLayer : public Layer
{
public:
- virtual std::unique_ptr<IWorkload>
- CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const override;
+ /// Makes a workload for the MemCopy type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
MemCopyLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref MemCopyLayer.
void ValidateTensorShapesFromInputs() override;
protected:
+ /// Constructor to create a MemCopyLayer.
+ /// @param [in] name Optional name for the layer.
MemCopyLayer(const char* name);
+
+ /// Default destructor
~MemCopyLayer() = default;
};
namespace armnn
{
+/// This layer represents a merge operation.
class MergerLayer : public LayerWithParameters<OriginsDescriptor>
{
public:
+ /// Makes a workload for the Merger type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+
+ /// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported
+ /// otherwise creates tensor handlers.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
MergerLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref MergerLayer.
void ValidateTensorShapesFromInputs() override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
protected:
+ /// Constructor to create a MergerLayer.
+ /// @param [in] param OriginsDescriptor to configure the merger operation.
+ /// @param [in] name Optional name for the layer.
MergerLayer(const OriginsDescriptor& param, const char* name);
+
+ /// Default destructor
~MergerLayer() = default;
};
namespace armnn
{
+/// This layer represents a minimum operation.
class MinimumLayer : public ElementwiseBaseLayer
{
public:
+ /// Makes a workload for the Minimum type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
MinimumLayer* Clone(Graph& graph) const override;
protected:
+ /// Constructor to create a MinimumLayer.
+ /// @param [in] name Optional name for the layer.
MinimumLayer(const char* name);
+
+ /// Default destructor
~MinimumLayer() = default;
};
namespace armnn
{
+/// This layer represents a multiplication operation.
class MultiplicationLayer : public ElementwiseBaseLayer
{
public:
+ /// Makes a workload for the Multiplication type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
MultiplicationLayer* Clone(Graph& graph) const override;
protected:
+ /// Constructor to create a MultiplicationLayer.
+ /// @param [in] name Optional name for the layer
MultiplicationLayer(const char* name);
+
+ /// Default destructor
~MultiplicationLayer() = default;
};
namespace armnn
{
+/// This layer represents a normalization operation.
class NormalizationLayer : public LayerWithParameters<NormalizationDescriptor>
{
public:
+ /// Makes a workload for the Normalization type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
NormalizationLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref NormalizationLayer.
void ValidateTensorShapesFromInputs() override;
protected:
+ /// Constructor to create a NormalizationLayer.
+ /// @param [in] param NormalizationDescriptor to configure the normalization operation.
+ /// @param [in] name Optional name for the layer.
NormalizationLayer(const NormalizationDescriptor& param, const char* name);
+
+ /// Default destructor
~NormalizationLayer() = default;
};
namespace armnn
{
+/// A layer user-provided data can be bound to (e.g. inputs, outputs).
class OutputLayer : public BindableLayer
{
public:
+ /// Returns nullptr for Output type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+
+ /// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported
+ /// otherwise creates tensor handlers by default. Ignores parameters for Output type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override
{
boost::ignore_unused(graph, factory);
}
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
OutputLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref OutputLayer.
void ValidateTensorShapesFromInputs() override;
protected:
+ /// Constructor to create an OutputLayer.
+ /// @param id The layer binding id number.
+ /// @param name Optional name for the layer.
OutputLayer(LayerBindingId id, const char* name);
+
+ /// Default destructor
~OutputLayer() = default;
};
namespace armnn
{
+/// This layer represents a pad operation.
class PadLayer : public LayerWithParameters<PadDescriptor>
{
public:
+ /// Makes a workload for the Pad type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
PadLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref PadLayer.
void ValidateTensorShapesFromInputs() override;
protected:
+ /// Constructor to create a PadLayer.
+ /// @param [in] param PadDescriptor to configure the pad operation.
+ /// @param [in] name Optional name for the layer.
PadLayer(const PadDescriptor& param, const char* name);
+
+ /// Default destructor
~PadLayer() = default;
};
namespace armnn
{
+/// This layer represents a permutation operation.
class PermuteLayer : public LayerWithParameters<PermuteDescriptor>
{
public:
+ /// Makes a workload for the Permute type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
PermuteLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref PermuteLayer.
void ValidateTensorShapesFromInputs() override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ /// @return a permutation vector represents the memory layout of the tensor elements.
const PermutationVector& GetPermutation() const
{
return m_Param.m_DimMappings;
}
+ /// Indicates if the other layer received is inverse of this one.
+ /// @param other The other layer to be compared with.
+ /// @return true if other layer is inverse of this false otherwise.
bool IsInverse(const Layer& other) const
{
return (other.GetType() == LayerType::Permute) &&
GetPermutation().IsInverse(boost::polymorphic_downcast<const PermuteLayer*>(&other)->GetPermutation());
}
+ /// Indicates if the other layer received is equal to this one.
+ /// @param other The other layer to be compare with.
+ /// @return true if other layer is equal to this false otherwise.
bool IsEqual(const Layer& other) const
{
return (other.GetType() == LayerType::Permute) &&
}
protected:
+ /// Constructor to create a PermuteLayer.
+ /// @param [in] param PermuteDescriptor to configure the permute operation.
+ /// @param [in] name Optional name for the layer.
PermuteLayer(const PermuteDescriptor& param, const char* name);
+
+ /// Default destructor
~PermuteLayer() = default;
};
namespace armnn
{
+/// This layer represents a pooling 2d operation.
class Pooling2dLayer : public LayerWithParameters<Pooling2dDescriptor>
{
public:
+ /// Makes a workload for the Pooling2d type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
Pooling2dLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref Pooling2dLayer.
void ValidateTensorShapesFromInputs() override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
protected:
+ /// Constructor to create a Pooling2dLayer.
+ /// @param [in] param Pooling2dDescriptor to configure the pooling2d operation.
+ /// @param [in] name Optional name for the layer.
Pooling2dLayer(const Pooling2dDescriptor& param, const char* name);
+
+ /// Default destructor
~Pooling2dLayer() = default;
};
namespace armnn
{
+/// This layer represents a reshape operation.
class ReshapeLayer : public LayerWithParameters<ReshapeDescriptor>
{
public:
+ /// Makes a workload for the Reshape type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
- const IWorkloadFactory& factory) const override;
+ const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
ReshapeLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref ReshapeLayer.
void ValidateTensorShapesFromInputs() override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ /// Indicates if the other layer received is equal to this one.
+ /// @param other The other layer to be compared with.
+ /// @return true if other layer is equal to this false otherwise.
bool IsEqual(const Layer& other) const
{
return (other.GetType() == LayerType::Reshape) &&
}
protected:
+ /// Constructor to create a ReshapeLayer.
+ /// @param [in] param ReshapeDescriptor to configure the reshape operation.
+ /// @param [in] name Optional name for the layer.
ReshapeLayer(const ReshapeDescriptor& desc, const char* name);
+
+ /// Default destructor
~ReshapeLayer() = default;
};
namespace armnn
{
+/// This layer represents a resize bilinear operation.
class ResizeBilinearLayer : public LayerWithParameters<ResizeBilinearDescriptor>
{
public:
- virtual std::unique_ptr<IWorkload>
- CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const override;
+ /// Makes a workload for the ResizeBilinear type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload>CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
ResizeBilinearLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref ResizeBilinearLayer.
void ValidateTensorShapesFromInputs() override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
protected:
+ /// Constructor to create a ResizeBilinearLayerLayer.
+ /// @param [in] param ResizeBilinearDescriptor to configure the resize bilinear operation.
+ /// @param [in] name Optional name for the layer.
ResizeBilinearLayer(const ResizeBilinearDescriptor& param, const char* name);
+
+ /// Default destructor
~ResizeBilinearLayer() = default;
};
namespace armnn
{
+/// This layer represents a softmax operation.
class SoftmaxLayer : public LayerWithParameters<SoftmaxDescriptor>
{
public:
+ /// Makes a workload for the Softmax type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
SoftmaxLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref SoftmaxLayer.
void ValidateTensorShapesFromInputs() override;
protected:
+ /// Constructor to create a SoftmaxLayer.
+ /// @param [in] param SoftmaxDescriptor to configure the softmax operation.
+ /// @param [in] name Optional name for the layer.
SoftmaxLayer(const SoftmaxDescriptor& param, const char* name);
+
+ /// Default destructor
~SoftmaxLayer() = default;
};
namespace armnn
{
+/// This layer represents a SpaceToBatchNd operation.
class SpaceToBatchNdLayer : public LayerWithParameters<SpaceToBatchNdDescriptor>
{
public:
+ /// Makes a workload for the SpaceToBatchNd type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
SpaceToBatchNdLayer* Clone(Graph& graph) const override;
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref SpaceToBatchNdLayer.
void ValidateTensorShapesFromInputs() override;
protected:
+ /// Constructor to create a SpaceToBatchNdLayer.
+ /// @param [in] param SpaceToBatchNdDescriptor to configure the SpaceToBatchNdLayer operation.
+ /// @param [in] name Optional name for the layer.
SpaceToBatchNdLayer(const SpaceToBatchNdDescriptor param, const char* name);
+
+ /// Default destructor
~SpaceToBatchNdLayer() = default;
};
namespace armnn
{
+/// This layer represents a split operation.
class SplitterLayer : public LayerWithParameters<ViewsDescriptor>
{
public:
+ /// Makes a workload for the Splitter type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+
+ /// Set the outputs to be appropriate sub tensors of the input if sub tensors are supported
+ /// otherwise creates tensor handlers.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
virtual void CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
SplitterLayer* Clone(Graph& graph) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref SplitterLayer.
void ValidateTensorShapesFromInputs() override;
+
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
protected:
+ /// Constructor to create a SplitterLayer.
+ /// @param [in] param ViewsDescriptor to configure the splitter operation.
+ /// @param [in] name Optional name for the layer.
SplitterLayer(const ViewsDescriptor& param, const char* name);
+
+ /// Default destructor
~SplitterLayer() = default;
};
namespace armnn
{
+/// This layer represents a strided slice operation.
class StridedSliceLayer : public LayerWithParameters<StridedSliceDescriptor>
{
public:
+ /// Makes a workload for the StridedSlice type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
StridedSliceLayer* Clone(Graph& graph) const override;
+ /// By default returns inputShapes if the number of inputs are equal to number of outputs,
+ /// otherwise infers the output shapes from given input shapes and layer properties.
+ /// @param [in] inputShapes The input shapes layer has.
+ /// @return A vector to the inferred output shape.
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref StridedSliceLayer.
void ValidateTensorShapesFromInputs() override;
protected:
+ /// Constructor to create a StridedSliceLayer.
+ /// @param [in] param StridedSliceDescriptor to configure the strided slice layer.
+ /// @param [in] name Optional name for the layer.
StridedSliceLayer(const StridedSliceDescriptor& param, const char* name);
+
+ /// Default destructor
~StridedSliceLayer() = default;
};
namespace armnn
{
+/// This layer represents a subtraction operation.
class SubtractionLayer : public ElementwiseBaseLayer
{
public:
+ /// Makes a workload for the Subtraction type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const override;
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
SubtractionLayer* Clone(Graph& graph) const override;
protected:
+ /// Constructor to create a SubtractionLayer.
+ /// @param [in] name Optional name for the layer.
SubtractionLayer(const char* name);
+
+ /// Default destructor
~SubtractionLayer() = default;
};