IVGCVSW-2377 Add no-op factory implementations for all backends for the
authorMatteo Martincigh <matteo.martincigh@arm.com>
Thu, 13 Dec 2018 12:48:25 +0000 (12:48 +0000)
committerMatteo Martincigh <matteo.martincigh@arm.com>
Fri, 14 Dec 2018 11:34:52 +0000 (11:34 +0000)
Greater operation

 * Added QueueDescriptor in WorkloadData.hpp
 * Added CreateGreater function in WorkloadFactory.hpp
 * Added stub implementation of the CreateGreater function in RefWorkloadFactory,
   NeonWorkloadFactory and ClWorkloadFactory
 * Added GreaterLayer stub implementation
 * Renamed ArithmeticBaseLayer to ElementwiseBaseLayer

Change-Id: I7e38c2936de905da921a92ba3f918478169ec7f5

38 files changed:
Android.mk
CMakeLists.txt
include/armnn/ILayerSupport.hpp
include/armnn/INetwork.hpp
include/armnn/LayerSupport.hpp
src/armnn/InternalTypes.cpp
src/armnn/InternalTypes.hpp
src/armnn/LayerSupport.cpp
src/armnn/LayersFwd.hpp
src/armnn/Network.cpp
src/armnn/Network.hpp
src/armnn/layers/AdditionLayer.cpp
src/armnn/layers/AdditionLayer.hpp
src/armnn/layers/DivisionLayer.cpp
src/armnn/layers/DivisionLayer.hpp
src/armnn/layers/ElementwiseBaseLayer.cpp [moved from src/armnn/layers/ArithmeticBaseLayer.cpp with 82% similarity]
src/armnn/layers/ElementwiseBaseLayer.hpp [moved from src/armnn/layers/ArithmeticBaseLayer.hpp with 73% similarity]
src/armnn/layers/GreaterLayer.cpp [new file with mode: 0644]
src/armnn/layers/GreaterLayer.hpp [new file with mode: 0644]
src/armnn/layers/MaximumLayer.cpp
src/armnn/layers/MaximumLayer.hpp
src/armnn/layers/MinimumLayer.cpp
src/armnn/layers/MinimumLayer.hpp
src/armnn/layers/MultiplicationLayer.cpp
src/armnn/layers/MultiplicationLayer.hpp
src/armnn/layers/SubtractionLayer.cpp
src/armnn/layers/SubtractionLayer.hpp
src/backends/backendsCommon/ILayerSupport.cpp
src/backends/backendsCommon/WorkloadData.hpp
src/backends/backendsCommon/WorkloadFactory.cpp
src/backends/backendsCommon/WorkloadFactory.hpp
src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
src/backends/cl/ClWorkloadFactory.cpp
src/backends/cl/ClWorkloadFactory.hpp
src/backends/neon/NeonWorkloadFactory.cpp
src/backends/neon/NeonWorkloadFactory.hpp
src/backends/reference/RefWorkloadFactory.cpp
src/backends/reference/RefWorkloadFactory.hpp

index fdac0dd..e4033d5 100644 (file)
@@ -84,7 +84,6 @@ LOCAL_SRC_FILES := \
         src/armnnUtils/VerificationHelpers.cpp \
         src/armnn/layers/ActivationLayer.cpp \
         src/armnn/layers/AdditionLayer.cpp \
-        src/armnn/layers/ArithmeticBaseLayer.cpp \
         src/armnn/layers/BatchNormalizationLayer.cpp \
         src/armnn/layers/BatchToSpaceNdLayer.cpp \
         src/armnn/layers/ConstantLayer.cpp \
@@ -93,9 +92,11 @@ LOCAL_SRC_FILES := \
         src/armnn/layers/ConvertFp32ToFp16Layer.cpp \
         src/armnn/layers/DebugLayer.cpp \
         src/armnn/layers/DepthwiseConvolution2dLayer.cpp \
+        src/armnn/layers/ElementwiseBaseLayer.cpp \
         src/armnn/layers/FakeQuantizationLayer.cpp \
         src/armnn/layers/FloorLayer.cpp \
         src/armnn/layers/FullyConnectedLayer.cpp \
+        src/armnn/layers/GreaterLayer.cpp \
         src/armnn/layers/InputLayer.cpp \
         src/armnn/layers/L2NormalizationLayer.cpp \
         src/armnn/layers/LstmLayer.cpp \
index 189a1ab..370b94c 100644 (file)
@@ -176,8 +176,6 @@ list(APPEND armnn_sources
     src/armnn/layers/ActivationLayer.cpp
     src/armnn/layers/AdditionLayer.hpp
     src/armnn/layers/AdditionLayer.cpp
-    src/armnn/layers/ArithmeticBaseLayer.hpp
-    src/armnn/layers/ArithmeticBaseLayer.cpp
     src/armnn/layers/BatchNormalizationLayer.hpp
     src/armnn/layers/BatchNormalizationLayer.cpp
     src/armnn/layers/BatchToSpaceNdLayer.hpp
@@ -194,12 +192,16 @@ list(APPEND armnn_sources
     src/armnn/layers/DebugLayer.cpp
     src/armnn/layers/DepthwiseConvolution2dLayer.hpp
     src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+    src/armnn/layers/ElementwiseBaseLayer.hpp
+    src/armnn/layers/ElementwiseBaseLayer.cpp
     src/armnn/layers/FakeQuantizationLayer.hpp
     src/armnn/layers/FakeQuantizationLayer.cpp
     src/armnn/layers/FloorLayer.hpp
     src/armnn/layers/FloorLayer.cpp
     src/armnn/layers/FullyConnectedLayer.hpp
     src/armnn/layers/FullyConnectedLayer.cpp
+    src/armnn/layers/GreaterLayer.cpp
+    src/armnn/layers/GreaterLayer.hpp
     src/armnn/layers/InputLayer.hpp
     src/armnn/layers/InputLayer.cpp
     src/armnn/layers/L2NormalizationLayer.hpp
index 2498aa9..71ad50d 100644 (file)
@@ -208,6 +208,11 @@ public:
                                         const TensorInfo& input1,
                                         const TensorInfo& output,
                                         Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+
+    virtual bool IsGreaterSupported(const TensorInfo& input0,
+                                    const TensorInfo& input1,
+                                    const TensorInfo& ouput,
+                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
 }; // class ILayerSupport
 
 using ILayerSupportSharedPtr = std::shared_ptr<ILayerSupport>;
index 33181ce..7c88cf0 100644 (file)
@@ -328,6 +328,11 @@ public:
     /// @ return - Interface for configuring the layer.
     virtual IConnectableLayer* AddMinimumLayer(const char* name = nullptr) = 0;
 
+    /// Add a Greater layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @ return - Interface for configuring the layer.
+    virtual IConnectableLayer* AddGreaterLayer(const char* name = nullptr) = 0;
+
 protected:
     ~INetwork() {}
 };
index 53efc15..5b96bc6 100644 (file)
@@ -287,4 +287,12 @@ bool IsMinimumSupported(const BackendId& backend,
                         char* reasonIfUnsupported = nullptr,
                         size_t reasonIfUnsupportedMaxLength = 1024);
 
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsGreaterSupported(const BackendId& backend,
+                        const TensorInfo& input0,
+                        const TensorInfo& input1,
+                        const TensorInfo& output,
+                        char* reasonIfUnsupported = nullptr,
+                        size_t reasonIfUnsupportedMaxLength = 1024);
+
 }
index cbf6f8a..845ba9e 100644 (file)
@@ -28,6 +28,7 @@ char const* GetLayerTypeAsCString(LayerType type)
         case LayerType::FakeQuantization: return "FakeQuantization";
         case LayerType::Floor: return "Floor";
         case LayerType::FullyConnected: return "FullyConnected";
+        case LayerType::Greater: return "Greater";
         case LayerType::Input: return "Input";
         case LayerType::L2Normalization: return "L2Normalization";
         case LayerType::Lstm: return "Lstm";
@@ -39,6 +40,7 @@ char const* GetLayerTypeAsCString(LayerType type)
         case LayerType::Multiplication: return "Multiplication";
         case LayerType::Normalization: return "Normalization";
         case LayerType::Output: return "Output";
+        case LayerType::Pad: return "Pad";
         case LayerType::Permute: return "Permute";
         case LayerType::Pooling2d: return "Pooling2d";
         case LayerType::Reshape: return "Reshape";
@@ -48,7 +50,6 @@ char const* GetLayerTypeAsCString(LayerType type)
         case LayerType::Splitter: return "Splitter";
         case LayerType::StridedSlice: return "StridedSlice";
         case LayerType::Subtraction: return "Subtraction";
-        case LayerType::Pad: return "Pad";
         default:
             BOOST_ASSERT_MSG(false, "Unknown layer type");
             return "Unknown";
index 3d4f043..27a1359 100644 (file)
@@ -28,6 +28,7 @@ enum class LayerType
     FakeQuantization,
     Floor,
     FullyConnected,
+    Greater,
     Input,
     L2Normalization,
     Lstm,
index 298b25c..d1161b6 100644 (file)
@@ -445,4 +445,14 @@ bool IsMinimumSupported(const BackendId& backend,
     FORWARD_LAYER_SUPPORT_FUNC(backend, IsMinimumSupported, input0, input1, output);
 }
 
+bool IsGreaterSupported(const BackendId& backend,
+                        const TensorInfo& input0,
+                        const TensorInfo& input1,
+                        const TensorInfo& output,
+                        char* reasonIfUnsupported,
+                        size_t reasonIfUnsupportedMaxLength)
+{
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsGreaterSupported, input0, input1, output);
+}
+
 }
index ebfa5db..39b0b20 100644 (file)
@@ -20,6 +20,7 @@
 #include "layers/FakeQuantizationLayer.hpp"
 #include "layers/FloorLayer.hpp"
 #include "layers/FullyConnectedLayer.hpp"
+#include "layers/GreaterLayer.hpp"
 #include "layers/InputLayer.hpp"
 #include "layers/L2NormalizationLayer.hpp"
 #include "layers/LstmLayer.hpp"
@@ -83,6 +84,7 @@ DECLARE_LAYER(Division)
 DECLARE_LAYER(FakeQuantization)
 DECLARE_LAYER(Floor)
 DECLARE_LAYER(FullyConnected)
+DECLARE_LAYER(Greater)
 DECLARE_LAYER(Input)
 DECLARE_LAYER(L2Normalization)
 DECLARE_LAYER(Lstm)
index ecab504..7a7e180 100644 (file)
@@ -717,6 +717,11 @@ IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& s
     return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
 }
 
+IConnectableLayer* Network::AddGreaterLayer(const char* name)
+{
+    return m_Graph->AddLayer<GreaterLayer>(name);
+}
+
 OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
     : m_Graph(std::move(graph))
 {
index e65b1d5..31e86ac 100644 (file)
@@ -135,6 +135,8 @@ public:
 
     IConnectableLayer* AddMinimumLayer(const char* name = nullptr) override;
 
+    IConnectableLayer* AddGreaterLayer(const char* name = nullptr) override;
+
 private:
     IConnectableLayer* AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
         const ConstTensor& weights,
index 0ccf398..2762266 100644 (file)
@@ -2,6 +2,7 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
 #include "AdditionLayer.hpp"
 
 #include "LayerCloneBase.hpp"
@@ -14,7 +15,7 @@ namespace armnn
 {
 
 AdditionLayer::AdditionLayer(const char* name)
-    : ArithmeticBaseLayer(2, 1, LayerType::Addition, name)
+    : ElementwiseBaseLayer(2, 1, LayerType::Addition, name)
 {
 }
 
index 211a2ef..9cdf09f 100644 (file)
@@ -2,14 +2,15 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
 #pragma once
 
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
 
 namespace armnn
 {
 
-class AdditionLayer : public ArithmeticBaseLayer
+class AdditionLayer : public ElementwiseBaseLayer
 {
 public:
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
index f667dc9..0316499 100644 (file)
@@ -2,6 +2,7 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
 #include "DivisionLayer.hpp"
 
 #include "LayerCloneBase.hpp"
@@ -14,7 +15,7 @@ namespace armnn
 {
 
 DivisionLayer::DivisionLayer(const char* name)
-    : ArithmeticBaseLayer(2, 1, LayerType::Division, name)
+    : ElementwiseBaseLayer(2, 1, LayerType::Division, name)
 {
 }
 
index eaede45..158f8e8 100644 (file)
@@ -2,14 +2,15 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
 #pragma once
 
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
 
 namespace armnn
 {
 
-class DivisionLayer : public ArithmeticBaseLayer
+class DivisionLayer : public ElementwiseBaseLayer
 {
 public:
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
similarity index 82%
rename from src/armnn/layers/ArithmeticBaseLayer.cpp
rename to src/armnn/layers/ElementwiseBaseLayer.cpp
index 3b89c22..7618141 100644 (file)
@@ -2,7 +2,8 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#include "ArithmeticBaseLayer.hpp"
+
+#include "ElementwiseBaseLayer.hpp"
 
 #include "InternalTypes.hpp"
 #include "armnn/Exceptions.hpp"
 namespace armnn
 {
 
-ArithmeticBaseLayer::ArithmeticBaseLayer(unsigned int numInputSlots, unsigned int numOutputSlots,
-                                         LayerType type, const char* name)
+ElementwiseBaseLayer::ElementwiseBaseLayer(unsigned int numInputSlots, unsigned int numOutputSlots,
+                                           LayerType type, const char* name)
     : Layer(numInputSlots, numOutputSlots, type, name)
 {
 }
 
-std::vector<TensorShape> ArithmeticBaseLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
     BOOST_ASSERT(inputShapes.size() == 2);
     auto& input0 = inputShapes[0];
@@ -47,7 +48,7 @@ std::vector<TensorShape> ArithmeticBaseLayer::InferOutputShapes(const std::vecto
     return std::vector<TensorShape>({ TensorShape(numDims, dims.data()) });
 }
 
-void ArithmeticBaseLayer::ValidateTensorShapesFromInputs()
+void ElementwiseBaseLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(2, CHECK_LOCATION());
 
similarity index 73%
rename from src/armnn/layers/ArithmeticBaseLayer.hpp
rename to src/armnn/layers/ElementwiseBaseLayer.hpp
index a8605a9..f0821ec 100644 (file)
@@ -2,6 +2,7 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
 #pragma once
 
 #include <Layer.hpp>
@@ -12,15 +13,15 @@ namespace armnn
 /// NOTE: this is an abstract class, it does not implement:
 ///  std::unique_ptr<IWorkload> Layer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const = 0;
 ///  Layer* Clone(Graph& graph) const = 0;
-class ArithmeticBaseLayer : public Layer
+class ElementwiseBaseLayer : public Layer
 {
 public:
     void ValidateTensorShapesFromInputs() override;
     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
 
 protected:
-    ArithmeticBaseLayer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name);
-    ~ArithmeticBaseLayer() = default;
+    ElementwiseBaseLayer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name);
+    ~ElementwiseBaseLayer() = default;
 };
 
 } // namespace
diff --git a/src/armnn/layers/GreaterLayer.cpp b/src/armnn/layers/GreaterLayer.cpp
new file mode 100644 (file)
index 0000000..d40c17c
--- /dev/null
@@ -0,0 +1,34 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GreaterLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+GreaterLayer::GreaterLayer(const char* name)
+    : ElementwiseBaseLayer(2, 1, LayerType::Greater, name)
+{
+}
+
+std::unique_ptr<IWorkload> GreaterLayer::CreateWorkload(const Graph& graph,
+                                                        const IWorkloadFactory& factory) const
+{
+    GreaterQueueDescriptor descriptor;
+    return factory.CreateGreater(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+GreaterLayer* GreaterLayer::Clone(Graph& graph) const
+{
+    return CloneBase<GreaterLayer>(graph, GetName());
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/GreaterLayer.hpp b/src/armnn/layers/GreaterLayer.hpp
new file mode 100644 (file)
index 0000000..9297a82
--- /dev/null
@@ -0,0 +1,26 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "ElementwiseBaseLayer.hpp"
+
+namespace armnn
+{
+
+class GreaterLayer : public ElementwiseBaseLayer
+{
+public:
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+                                                      const IWorkloadFactory& factory) const override;
+
+    GreaterLayer* Clone(Graph& graph) const override;
+
+protected:
+    GreaterLayer(const char* name);
+    ~GreaterLayer() = default;
+};
+
+} //namespace armnn
index 67a2342..c0da8d6 100644 (file)
@@ -14,8 +14,9 @@ namespace armnn
 {
 
 MaximumLayer::MaximumLayer(const char* name)
-: ArithmeticBaseLayer(2, 1, LayerType::Maximum, name)
-{}
+    : ElementwiseBaseLayer(2, 1, LayerType::Maximum, name)
+{
+}
 
 std::unique_ptr<IWorkload> MaximumLayer::CreateWorkload(const Graph& graph,
                                                         const IWorkloadFactory& factory) const
index da4c3ed..18a4ed3 100644 (file)
@@ -2,14 +2,15 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
 #pragma once
 
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
 
 namespace armnn
 {
 
-class MaximumLayer : public ArithmeticBaseLayer
+class MaximumLayer : public ElementwiseBaseLayer
 {
 public:
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
index 11f60eb..8e76041 100644 (file)
@@ -2,6 +2,7 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
 #include "MinimumLayer.hpp"
 
 #include "LayerCloneBase.hpp"
@@ -14,7 +15,7 @@ namespace armnn
 {
 
 MinimumLayer::MinimumLayer(const char* name)
-    : ArithmeticBaseLayer(2, 1, LayerType::Minimum, name)
+    : ElementwiseBaseLayer(2, 1, LayerType::Minimum, name)
 {
 }
 
index cd8a168..4338237 100644 (file)
@@ -5,12 +5,12 @@
 
 #pragma once
 
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
 
 namespace armnn
 {
 
-class MinimumLayer : public ArithmeticBaseLayer
+class MinimumLayer : public ElementwiseBaseLayer
 {
 public:
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
index 2abcf86..9448935 100644 (file)
@@ -2,6 +2,7 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
 #include "MultiplicationLayer.hpp"
 
 #include "LayerCloneBase.hpp"
@@ -14,7 +15,7 @@ namespace armnn
 {
 
 MultiplicationLayer::MultiplicationLayer(const char* name)
-    : ArithmeticBaseLayer(2, 1, LayerType::Multiplication, name)
+    : ElementwiseBaseLayer(2, 1, LayerType::Multiplication, name)
 {
 }
 
index 21b68e3..8a7bfde 100644 (file)
@@ -2,14 +2,15 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
 #pragma once
 
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
 
 namespace armnn
 {
 
-class MultiplicationLayer : public ArithmeticBaseLayer
+class MultiplicationLayer : public ElementwiseBaseLayer
 {
 public:
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
index 2b158ac..18d8661 100644 (file)
@@ -2,6 +2,7 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
 #include "SubtractionLayer.hpp"
 
 #include "LayerCloneBase.hpp"
@@ -14,7 +15,7 @@ namespace armnn
 {
 
 SubtractionLayer::SubtractionLayer(const char* name)
-    : ArithmeticBaseLayer(2, 1, LayerType::Subtraction, name)
+    : ElementwiseBaseLayer(2, 1, LayerType::Subtraction, name)
 {
 }
 
index ac02580..d1bccfe 100644 (file)
@@ -2,14 +2,15 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
 #pragma once
 
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
 
 namespace armnn
 {
 
-class SubtractionLayer : public ArithmeticBaseLayer
+class SubtractionLayer : public ElementwiseBaseLayer
 {
 public:
     virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
index 3f2636c..3718df1 100644 (file)
@@ -320,4 +320,12 @@ bool ILayerSupport::IsSubtractionSupported(const TensorInfo& input0,
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
+bool ILayerSupport::IsGreaterSupported(const TensorInfo& input0,
+                                       const TensorInfo& input1,
+                                       const TensorInfo& output,
+                                       Optional<std::string&> reasonIfUnsupported) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
 } // namespace armnn
index b4bcfb0..88b0d5e 100644 (file)
@@ -357,6 +357,11 @@ struct MinimumQueueDescriptor : QueueDescriptor
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
+struct GreaterQueueDescriptor : QueueDescriptor
+{
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
 struct DebugQueueDescriptor : QueueDescriptorWithParameters<DebugDescriptor>
 {
     void Validate(const WorkloadInfo& workloadInfo) const;
index 915d667..d7704ff 100644 (file)
@@ -666,6 +666,17 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
                                                             reason);
             break;
         }
+        case LayerType::Greater:
+        {
+            const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+            result = layerSupportObject->IsGreaterSupported(OverrideDataType(input0, dataType),
+                                                            OverrideDataType(input1, dataType),
+                                                            OverrideDataType(output, dataType),
+                                                            reason);
+            break;
+        }
         default:
         {
             BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
index eb24b64..57f6196 100644 (file)
@@ -148,6 +148,9 @@ public:
     virtual std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
                                                           const WorkloadInfo& Info) const = 0;
 
+    virtual std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
+                                                     const WorkloadInfo& info) const = 0;
+
     virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const = 0;
 };
index e267988..dc4f05e 100644 (file)
@@ -342,6 +342,8 @@ DECLARE_LAYER_POLICY_1_PARAM(Floor)
 
 DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
 
+DECLARE_LAYER_POLICY_1_PARAM(Greater)
+
 DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)
 
 DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
@@ -516,7 +518,7 @@ template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
 bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
 {
     return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
-};
+}
 
 // Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
 template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
@@ -527,14 +529,14 @@ bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
     return v &&
     IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
         (factory, Tag<NextType(Type)>());
-};
+}
 
 // Helper function to pass through to the test framework.
 template<typename FactoryType, armnn::DataType DataType>
 bool IsLayerSupportedTests(FactoryType *factory)
 {
     return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
-};
+}
 
 template<armnn::LayerType Type>
 bool TestLayerTypeMatches()
@@ -549,20 +551,20 @@ bool TestLayerTypeMatches()
     bool v = Type == layer.m_Layer->GetType();
     BOOST_CHECK_MESSAGE(v, ss.str());
     return v;
-};
+}
 
 template<armnn::LayerType Type>
 bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
 {
     return TestLayerTypeMatches<Type>();
-};
+}
 
 template<armnn::LayerType Type>
 bool LayerTypeMatchesTestImpl(Tag<Type>)
 {
     return TestLayerTypeMatches<Type>() &&
         LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
-};
+}
 
 template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
 bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
@@ -584,6 +586,6 @@ bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
     bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
 
     return result;
-};
+}
 
 } //namespace
index dfe6272..ebd957b 100644 (file)
@@ -332,6 +332,12 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMinimum(const MinimumQueueDe
     return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
+                                                            const WorkloadInfo& info) const
+{
+    return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const
 {
index e2c80de..70052c4 100644 (file)
@@ -138,6 +138,9 @@ public:
     virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
                                                      const WorkloadInfo& info) const override;
 
+    virtual std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
+                                                     const WorkloadInfo& info) const override;
+
     virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
index aed2d56..d7de17a 100644 (file)
@@ -300,6 +300,12 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateMinimum(const MinimumQueue
     return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
+                                                              const WorkloadInfo& info) const
+{
+    return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+}
+
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const
 {
index e953894..115dfb0 100644 (file)
@@ -139,6 +139,9 @@ public:
     virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
                                                      const WorkloadInfo& info) const override;
 
+    virtual std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
+                                                     const WorkloadInfo& info) const override;
+
     virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 
index ac837d3..b34de86 100644 (file)
@@ -294,6 +294,12 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStridedSlice(const StridedS
     return MakeWorkload<RefStridedSliceFloat32Workload, RefStridedSliceUint8Workload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
+                                                             const WorkloadInfo& info) const
+{
+    return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const
 {
index 9ee1fe5..03b349d 100644 (file)
@@ -156,6 +156,9 @@ public:
     virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
                                                      const WorkloadInfo& info) const override;
 
+    virtual std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
+                                                     const WorkloadInfo& info) const override;
+
     virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info) const override;
 private: