src/armnn/layers/ConvertFp32ToFp16Layer.cpp \
src/armnn/layers/DebugLayer.cpp \
src/armnn/layers/DepthwiseConvolution2dLayer.cpp \
+ src/armnn/layers/DetectionPostProcessLayer.cpp \
src/armnn/layers/DivisionLayer.cpp \
src/armnn/layers/ElementwiseBaseLayer.cpp \
src/armnn/layers/EqualLayer.cpp \
src/armnn/layers/DebugLayer.cpp
src/armnn/layers/DepthwiseConvolution2dLayer.hpp
src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+ src/armnn/layers/DetectionPostProcessLayer.hpp
+ src/armnn/layers/DetectionPostProcessLayer.cpp
src/armnn/layers/ElementwiseBaseLayer.hpp
src/armnn/layers/ElementwiseBaseLayer.cpp
src/armnn/layers/EqualLayer.hpp
DataLayout m_DataLayout;
};
+struct DetectionPostProcessDescriptor
+{
+};
+
/// A NormalizationDescriptor for the NormalizationLayer.
struct NormalizationDescriptor
{
struct Convolution2dDescriptor;
struct DebugDescriptor;
struct DepthwiseConvolution2dDescriptor;
+struct DetectionPostProcessDescriptor;
struct FakeQuantizationDescriptor;
struct FullyConnectedDescriptor;
struct L2NormalizationDescriptor;
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ virtual bool IsDetectionPostProcessSupported(
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const DetectionPostProcessDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
virtual bool IsDivisionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
const ConstTensor& biases,
const char* name = nullptr) = 0;
+ /// Adds a Detection PostProcess layer to the network.
+ /// @param descriptor - Description of the Detection PostProcess layer.
+ /// @param name - Optional name for the layer.
+ /// @return - Interface for configuring the layer.
+ virtual IConnectableLayer* AddDetectionPostProcessLayer(
+ const DetectionPostProcessDescriptor& descriptor, const char* name = nullptr) =0;
+
/// Adds a fully connected layer to the network.
/// @param fullyConnectedDescriptor - Description of the fully connected layer.
/// @param weights - Tensor for the weights data.
case LayerType::Convolution2d: return "Convolution2d";
case LayerType::Debug: return "Debug";
case LayerType::DepthwiseConvolution2d: return "DepthwiseConvolution2d";
+ case LayerType::DetectionPostProcess: return "DetectionPostProcess";
case LayerType::Division: return "Division";
case LayerType::Equal: return "Equal";
case LayerType::FakeQuantization: return "FakeQuantization";
Convolution2d,
Debug,
DepthwiseConvolution2d,
+ DetectionPostProcess,
Division,
Equal,
FakeQuantization,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases);
}
+bool IsDetectionPostProcessSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const DetectionPostProcessDescriptor& descriptor,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength);
+
bool IsDivisionSupported(const BackendId& backend,
const TensorInfo& input0,
const TensorInfo& input1,
#include "layers/Convolution2dLayer.hpp"
#include "layers/DebugLayer.hpp"
#include "layers/DepthwiseConvolution2dLayer.hpp"
+#include "layers/DetectionPostProcessLayer.hpp"
#include "layers/DivisionLayer.hpp"
#include "layers/EqualLayer.hpp"
#include "layers/FakeQuantizationLayer.hpp"
DECLARE_LAYER(Convolution2d)
DECLARE_LAYER(Debug)
DECLARE_LAYER(DepthwiseConvolution2d)
+DECLARE_LAYER(DetectionPostProcess)
DECLARE_LAYER(Division)
DECLARE_LAYER(Equal)
DECLARE_LAYER(FakeQuantization)
return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, &biases, name);
}
+IConnectableLayer* Network::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
+ const char* name)
+{
+ return m_Graph->AddLayer<DetectionPostProcessLayer>(descriptor, name);
+}
+
IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
const char* name)
{
const ConstTensor& biases,
const char* name = nullptr) override;
+ IConnectableLayer* AddDetectionPostProcessLayer(
+ const DetectionPostProcessDescriptor& descriptor,
+ const char* name = nullptr) override;
+
IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
const char* name = nullptr) override;
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "DetectionPostProcessLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+DetectionPostProcessLayer::DetectionPostProcessLayer(const DetectionPostProcessDescriptor& param, const char* name)
+ : LayerWithParameters(2, 4, LayerType::DetectionPostProcess, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> DetectionPostProcessLayer::CreateWorkload(const armnn::Graph& graph,
+ const armnn::IWorkloadFactory& factory) const
+{
+ DetectionPostProcessQueueDescriptor descriptor;
+ return factory.CreateDetectionPostProcess(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+DetectionPostProcessLayer* DetectionPostProcessLayer::Clone(Graph& graph) const
+{
+ return CloneBase<DetectionPostProcessLayer>(graph, m_Param, GetName());
+}
+
+void DetectionPostProcessLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(2, CHECK_LOCATION());
+}
+
+} // namespace armnn
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a detection postprocess operator.
+class DetectionPostProcessLayer : public LayerWithParameters<DetectionPostProcessDescriptor>
+{
+public:
+ /// Makes a workload for the DetectionPostProcess type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ DetectionPostProcessLayer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref DetectionPostProcessLayer.
+ void ValidateTensorShapesFromInputs() override;
+
+protected:
+ /// Constructor to create a DetectionPostProcessLayer.
+ /// @param [in] param DetectionPostProcessDescriptor to configure the detection postprocess.
+ /// @param [in] name Optional name for the layer.
+ DetectionPostProcessLayer(const DetectionPostProcessDescriptor& param, const char* name);
+
+ /// Default destructor
+ ~DetectionPostProcessLayer() = default;
+};
+
+} // namespace armnn
+
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsDetectionPostProcessSupported(const armnn::TensorInfo& input0,
+ const armnn::TensorInfo& input1,
+ const armnn::DetectionPostProcessDescriptor& descriptor,
+ armnn::Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
bool LayerSupportBase::IsDivisionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsDetectionPostProcessSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const DetectionPostProcessDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsDivisionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct DetectionPostProcessQueueDescriptor : QueueDescriptorWithParameters<DetectionPostProcessDescriptor>
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
// Normalization layer workload data.
struct NormalizationQueueDescriptor : QueueDescriptorWithParameters<NormalizationDescriptor>
{
reason);
break;
}
+ case LayerType::DetectionPostProcess:
+ {
+ const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
+ auto cLayer = boost::polymorphic_downcast<const DetectionPostProcessLayer*>(&layer);
+ const DetectionPostProcessDescriptor& descriptor = cLayer->GetParameters();
+ result = layerSupportObject->IsDetectionPostProcessSupported(input0,
+ input1,
+ descriptor,
+ reason);
+ break;
+ }
case LayerType::Equal:
{
const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
virtual std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(
const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateDetectionPostProcess(
+ const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info) const = 0;
+
virtual std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const = 0;
DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
+DECLARE_LAYER_POLICY_2_PARAM(DetectionPostProcess)
+
DECLARE_LAYER_POLICY_1_PARAM(Equal)
DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization)
return MakeWorkload<ClDepthwiseConvolutionWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDetectionPostProcess(
+ const armnn::DetectionPostProcessQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const
+{
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
return std::make_unique<NeonDepthwiseConvolutionWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDetectionPostProcess(
+ const armnn::DetectionPostProcessQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const
+{
+ return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+}
+
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateNormalization(
const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
RefDepthwiseConvolution2dUint8Workload>(descriptor, info);
}
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDetectionPostProcess(
+ const armnn::DetectionPostProcessQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const
+{
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateNormalization(
const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;