From ee391d59dbe3305734de4ff7d98c27c8a5252624 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Thu, 5 Sep 2019 17:50:44 +0100 Subject: [PATCH] IVGCVSW-3722 Add front end support for ArgMinMax Change-Id: I31c5616bea3097f30cde68442d3222e0b0fe2235 Signed-off-by: Nikhil Raj --- Android.mk | 1 + CMakeLists.txt | 2 + include/armnn/Descriptors.hpp | 9 ++++ include/armnn/DescriptorsFwd.hpp | 1 + include/armnn/ILayerSupport.hpp | 6 +++ include/armnn/ILayerVisitor.hpp | 8 ++++ include/armnn/INetwork.hpp | 7 +++ include/armnn/LayerVisitorBase.hpp | 4 ++ src/armnn/InternalTypes.hpp | 1 + src/armnn/LayerSupport.cpp | 10 +++++ src/armnn/LayersFwd.hpp | 2 + src/armnn/Network.cpp | 6 +++ src/armnn/Network.hpp | 3 ++ src/armnn/layers/ArgMinMaxLayer.cpp | 52 ++++++++++++++++++++++ src/armnn/layers/ArgMinMaxLayer.hpp | 44 ++++++++++++++++++ src/armnnSerializer/Serializer.cpp | 9 ++++ src/armnnSerializer/Serializer.hpp | 4 ++ src/backends/backendsCommon/LayerSupportBase.cpp | 7 +++ src/backends/backendsCommon/LayerSupportBase.hpp | 5 +++ src/backends/backendsCommon/WorkloadData.cpp | 23 ++++++++++ src/backends/backendsCommon/WorkloadData.hpp | 5 +++ src/backends/backendsCommon/WorkloadDataFwd.hpp | 1 + src/backends/backendsCommon/WorkloadFactory.cpp | 20 +++++++++ src/backends/backendsCommon/WorkloadFactory.hpp | 3 ++ .../test/IsLayerSupportedTestImpl.hpp | 2 + 25 files changed, 235 insertions(+) create mode 100644 src/armnn/layers/ArgMinMaxLayer.cpp create mode 100644 src/armnn/layers/ArgMinMaxLayer.hpp diff --git a/Android.mk b/Android.mk index 21b186e..3640e0c 100644 --- a/Android.mk +++ b/Android.mk @@ -115,6 +115,7 @@ LOCAL_SRC_FILES := \ src/armnn/layers/AbsLayer.cpp \ src/armnn/layers/ActivationLayer.cpp \ src/armnn/layers/AdditionLayer.cpp \ + src/armnn/layers/ArgMinMaxLayer.cpp \ src/armnn/layers/BatchNormalizationLayer.cpp \ src/armnn/layers/BatchToSpaceNdLayer.cpp \ src/armnn/layers/ConcatLayer.cpp \ diff --git a/CMakeLists.txt b/CMakeLists.txt index e4ca1fc..ef87c6d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -235,6 +235,8 @@ list(APPEND armnn_sources src/armnn/layers/ActivationLayer.cpp src/armnn/layers/AdditionLayer.hpp src/armnn/layers/AdditionLayer.cpp + src/armnn/layers/ArgMinMaxLayer.hpp + src/armnn/layers/ArgMinMaxLayer.cpp src/armnn/layers/BatchNormalizationLayer.hpp src/armnn/layers/BatchNormalizationLayer.cpp src/armnn/layers/BatchToSpaceNdLayer.hpp diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp index 9630d86..87f4bdb 100644 --- a/include/armnn/Descriptors.hpp +++ b/include/armnn/Descriptors.hpp @@ -30,6 +30,15 @@ struct ActivationDescriptor float m_B; }; +/// An ArgMinMaxDescriptor for ArgMinMaxLayer +struct ArgMinMaxDescriptor +{ + ArgMinMaxDescriptor() + : m_Axis(-1) {} + + int m_Axis; +}; + /// A PermuteDescriptor for the PermuteLayer. struct PermuteDescriptor { diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp index eddf91f..8f81b4f 100644 --- a/include/armnn/DescriptorsFwd.hpp +++ b/include/armnn/DescriptorsFwd.hpp @@ -8,6 +8,7 @@ namespace armnn { struct ActivationDescriptor; +struct ArgMinMaxDescriptor; struct BatchNormalizationDescriptor; struct BatchToSpaceNdDescriptor; struct Convolution2dDescriptor; diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp index c67569b..d168226 100644 --- a/include/armnn/ILayerSupport.hpp +++ b/include/armnn/ILayerSupport.hpp @@ -14,6 +14,7 @@ #include #include #include +#include "ArmNN.hpp" namespace armnn { @@ -41,6 +42,11 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const = 0; + virtual bool IsArgMinMaxSupported(const TensorInfo& input, + const TensorInfo& output, + const ArgMinMaxDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const = 0; + virtual bool IsBatchNormalizationSupported(const TensorInfo& input, const TensorInfo& output, const TensorInfo& mean, diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp index a22de87..a504a41 100644 --- a/include/armnn/ILayerVisitor.hpp +++ b/include/armnn/ILayerVisitor.hpp @@ -41,6 +41,14 @@ public: virtual void VisitAdditionLayer(const IConnectableLayer* layer, const char* name = nullptr) = 0; + /// Function that an arg min max layer should call back to when its Accept(ILayerVisitor&) function is invoked. + /// @param layer - pointer to the layer which is calling back to this visit function. + /// @param argMinMaxDescriptor - ArgMinMaxDescriptor to configure the activation. + /// @param name - Optional name for the layer. + virtual void VisitArgMinMaxLayer(const IConnectableLayer* layer, + const ArgMinMaxDescriptor& argMinMaxDescriptor, + const char* name = nullptr) = 0; + /// Function that a batch normalization layer should call back to when its Accept(ILayerVisitor&) /// function is invoked. /// @param layer - pointer to the layer which is calling back to this visit function. diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index ce0fda2..cd1b7a6 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -102,6 +102,13 @@ public: /// @return - Interface for configuring the layer. virtual IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name = nullptr) = 0; + /// Adds an ArgMinMax layer to the network. + /// @param desc - Parameters for the L2 normalization operation. + /// @param name - Optional name for the layer. + /// @return - Interface for configuring the layer. + virtual IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc, + const char* name = nullptr) = 0; + /// Adds a concatenation layer to the network. /// @param concatDescriptor - ConcatDescriptor (synonym for OriginsDescriptor) to configure the concatenation /// process. Number of Views must be equal to the number of inputs, and their order diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp index 363a091..0739b43 100644 --- a/include/armnn/LayerVisitorBase.hpp +++ b/include/armnn/LayerVisitorBase.hpp @@ -39,6 +39,10 @@ public: void VisitAdditionLayer(const IConnectableLayer*, const char*) override { DefaultPolicy::Apply(__func__); } + void VisitArgMinMaxLayer(const IConnectableLayer*, + const ArgMinMaxDescriptor&, + const char*) override { DefaultPolicy::Apply(__func__); } + void VisitBatchNormalizationLayer(const IConnectableLayer*, const BatchNormalizationDescriptor&, const ConstTensor&, diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index 6141f27..98308f9 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -17,6 +17,7 @@ enum class LayerType Abs = FirstLayer, Activation, Addition, + ArgMinMax, BatchNormalization, BatchToSpaceNd, Concat, diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp index d730205..f88e4e1 100644 --- a/src/armnn/LayerSupport.cpp +++ b/src/armnn/LayerSupport.cpp @@ -91,6 +91,16 @@ bool IsAdditionSupported(const BackendId& backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsAdditionSupported, input0, input1, output); } +bool IsArgMinMaxSupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + const ArgMinMaxDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(backend, IsArgMinMaxSupported, input, output, descriptor); +} + bool IsBatchNormalizationSupported(const BackendId& backend, const TensorInfo& input, const TensorInfo& output, diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index 94a3b89..6e4cf6a 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -9,6 +9,7 @@ #include "layers/AbsLayer.hpp" #include "layers/ActivationLayer.hpp" #include "layers/AdditionLayer.hpp" +#include "layers/ArgMinMaxLayer.hpp" #include "layers/BatchNormalizationLayer.hpp" #include "layers/BatchToSpaceNdLayer.hpp" #include "layers/ConcatLayer.hpp" @@ -89,6 +90,7 @@ constexpr LayerType LayerEnumOf(const T* = nullptr); DECLARE_LAYER(Abs) DECLARE_LAYER(Activation) DECLARE_LAYER(Addition) +DECLARE_LAYER(ArgMinMax) DECLARE_LAYER(BatchNormalization) DECLARE_LAYER(BatchToSpaceNd) DECLARE_LAYER(Concat) diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index dc26a1b..6971cb8 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -1116,6 +1116,12 @@ IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activ return m_Graph->AddLayer(activationDescriptor, name); } +IConnectableLayer* Network::AddArgMinMaxLayer(const ArgMinMaxDescriptor& argMinMaxDescriptor, + const char* name) +{ + return m_Graph->AddLayer(argMinMaxDescriptor, name); +} + IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor, const char* name) diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index 4516c0a..aac875a 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -36,6 +36,9 @@ public: IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name=nullptr) override; + IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc, + const char* name = nullptr) override; + IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor, const char* name = nullptr) override; diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp new file mode 100644 index 0000000..aad95eb --- /dev/null +++ b/src/armnn/layers/ArgMinMaxLayer.cpp @@ -0,0 +1,52 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#include "ArgMinMaxLayer.hpp" + +#include "LayerCloneBase.hpp" + +#include +#include +#include + +namespace armnn +{ + +ArgMinMaxLayer::ArgMinMaxLayer(const ArgMinMaxDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::ArgMinMax, param, name) +{ +} + +std::unique_ptr ArgMinMaxLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + ArgMinMaxQueueDescriptor descriptor; + return factory.CreateArgMinMax(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +ArgMinMaxLayer* ArgMinMaxLayer::Clone(Graph& graph) const +{ + return CloneBase(graph, m_Param, GetName()); +} + +void ArgMinMaxLayer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(1, CHECK_LOCATION()); + + auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); + + BOOST_ASSERT(inferredShapes.size() == 1); + + ConditionalThrowIfNotEqual( + "ArgMinMaxLayer: TensorShape set on OutputSlot does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + inferredShapes[0]); +} + +void ArgMinMaxLayer::Accept(ILayerVisitor& visitor) const +{ + visitor.VisitArgMinMaxLayer(this, GetParameters(), GetName()); +} + +} // namespace armnn \ No newline at end of file diff --git a/src/armnn/layers/ArgMinMaxLayer.hpp b/src/armnn/layers/ArgMinMaxLayer.hpp new file mode 100644 index 0000000..ca1337f --- /dev/null +++ b/src/armnn/layers/ArgMinMaxLayer.hpp @@ -0,0 +1,44 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "LayerWithParameters.hpp" + +namespace armnn +{ + +/// This layer represents a ArgMinMax operation. +class ArgMinMaxLayer : public LayerWithParameters +{ +public: + /// Makes a workload for the ArgMinMax type. + /// @param [in] graph The graph where this layer can be found. + /// @param [in] factory The workload factory which will create the workload. + /// @return A pointer to the created workload, or nullptr if not created. + virtual std::unique_ptr CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + /// Creates a dynamically-allocated copy of this layer. + /// @param [in] graph The graph into which this layer is being cloned. + ArgMinMaxLayer* Clone(Graph& graph) const override; + + /// Check if the input tensor shape(s) + /// will lead to a valid configuration of @ref ArgMinMaxLayer. + void ValidateTensorShapesFromInputs() override; + + void Accept(ILayerVisitor& visitor) const override; + +protected: + /// Constructor to create a ArgMinMaxLayer. + /// @param [in] param ArgMinMaxDescriptor to configure the ArgMinMax operation. + /// @param [in] name Optional name for the layer. + ArgMinMaxLayer(const ArgMinMaxDescriptor& param, const char* name); + + /// Default destructor + ~ArgMinMaxLayer() = default; +}; + +} \ No newline at end of file diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index 56d313f..a8d9c23 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -146,6 +146,15 @@ void SerializerVisitor::VisitAdditionLayer(const armnn::IConnectableLayer* layer CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer); } +// Build FlatBuffer for ArgMinMax Layer +void SerializerVisitor::VisitArgMinMaxLayer(const armnn::IConnectableLayer *layer, + const armnn::ArgMinMaxDescriptor& descriptor, + const char *name) +{ + // This will be implemented in IVGCVSW-3724 + throw UnimplementedException("SerializerVisitor::VisitArgMinMaxLayer is not implemented"); +} + // Build FlatBuffer for BatchToSpaceNd Layer void SerializerVisitor::VisitBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer, const armnn::BatchToSpaceNdDescriptor& descriptor, diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp index 7400885..190ed23 100644 --- a/src/armnnSerializer/Serializer.hpp +++ b/src/armnnSerializer/Serializer.hpp @@ -52,6 +52,10 @@ public: void VisitAdditionLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr) override; + void VisitArgMinMaxLayer(const armnn::IConnectableLayer* layer, + const armnn::ArgMinMaxDescriptor& argMinMaxDescriptor, + const char* name = nullptr) override; + void VisitBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer, const armnn::BatchToSpaceNdDescriptor& descriptor, const char* name = nullptr) override; diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index 464ec4e..a8d1ead 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -57,6 +57,13 @@ bool LayerSupportBase::IsAdditionSupported(const TensorInfo& input0, return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } +bool LayerSupportBase::IsArgMinMaxSupported(const armnn::TensorInfo &input, const armnn::TensorInfo &output, + const armnn::ArgMinMaxDescriptor& descriptor, + armnn::Optional reasonIfUnsupported) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); +} + bool LayerSupportBase::IsBatchNormalizationSupported(const TensorInfo& input, const TensorInfo& output, const TensorInfo& mean, diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index 3cf3d4e..25dbdf2 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -27,6 +27,11 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsArgMinMaxSupported(const TensorInfo& input, + const TensorInfo& output, + const ArgMinMaxDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsBatchNormalizationSupported(const TensorInfo& input, const TensorInfo& output, const TensorInfo& mean, diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index fed159b..e7e6d52 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -461,6 +461,29 @@ void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); } +void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + const std::string descriptorName{"ArgMinMaxQueueDescriptor"}; + + ValidateNumInputs(workloadInfo, descriptorName, 1); + ValidateNumOutputs(workloadInfo, descriptorName, 1); + + const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; + const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; + + std::vector supportedTypes = + { + DataType::Float16, + DataType::Float32, + DataType::QuantisedAsymm8, + DataType::QuantisedSymm16 + }; + + ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName); + ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); + ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); +} + void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const { const std::string descriptorName{"SoftmaxQueueDescriptor"}; diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index a43c7cc..751d37f 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -132,6 +132,11 @@ struct ActivationQueueDescriptor : QueueDescriptorWithParameters +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + // Fully connected layer workload data. struct FullyConnectedQueueDescriptor : QueueDescriptorWithParameters { diff --git a/src/backends/backendsCommon/WorkloadDataFwd.hpp b/src/backends/backendsCommon/WorkloadDataFwd.hpp index abee316..d4352c7 100644 --- a/src/backends/backendsCommon/WorkloadDataFwd.hpp +++ b/src/backends/backendsCommon/WorkloadDataFwd.hpp @@ -14,6 +14,7 @@ struct SoftmaxQueueDescriptor; struct SplitterQueueDescriptor; struct ConcatQueueDescriptor; struct ActivationQueueDescriptor; +struct ArgMinMaxQueueDescriptor; struct FullyConnectedQueueDescriptor; struct PermuteQueueDescriptor; struct Pooling2dQueueDescriptor; diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 9d081af..17bd98b 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -102,6 +102,20 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } + case LayerType::ArgMinMax: + { + auto cLayer = boost::polymorphic_downcast(&layer); + const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters(); + + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + result = layerSupportObject->IsArgMinMaxSupported( + OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + descriptor, + reason); + break; + } case LayerType::BatchNormalization: { auto cLayer = boost::polymorphic_downcast(&layer); @@ -979,6 +993,12 @@ std::unique_ptr IWorkloadFactory::CreateAddition(const AdditionQueueD return std::unique_ptr(); } +std::unique_ptr IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return std::unique_ptr(); +} + std::unique_ptr IWorkloadFactory::CreateBatchNormalization( const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const { diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index d0164b2..6fd334b 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -58,6 +58,9 @@ public: virtual std::unique_ptr CreateAddition(const AdditionQueueDescriptor& descriptor, const WorkloadInfo& info) const; + virtual std::unique_ptr CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor, + const WorkloadInfo& info) const; + virtual std::unique_ptr CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const; diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index caf5e58..1dc9e97 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -391,6 +391,8 @@ DECLARE_LAYER_POLICY_2_PARAM(Activation) DECLARE_LAYER_POLICY_1_PARAM(Addition) +DECLARE_LAYER_POLICY_2_PARAM(ArgMinMax) + DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization) DECLARE_LAYER_POLICY_2_PARAM(BatchToSpaceNd) -- 2.7.4