src/armnnUtils/VerificationHelpers.cpp \
src/armnn/layers/ActivationLayer.cpp \
src/armnn/layers/AdditionLayer.cpp \
- src/armnn/layers/ArithmeticBaseLayer.cpp \
src/armnn/layers/BatchNormalizationLayer.cpp \
src/armnn/layers/BatchToSpaceNdLayer.cpp \
src/armnn/layers/ConstantLayer.cpp \
src/armnn/layers/ConvertFp32ToFp16Layer.cpp \
src/armnn/layers/DebugLayer.cpp \
src/armnn/layers/DepthwiseConvolution2dLayer.cpp \
+ src/armnn/layers/ElementwiseBaseLayer.cpp \
src/armnn/layers/FakeQuantizationLayer.cpp \
src/armnn/layers/FloorLayer.cpp \
src/armnn/layers/FullyConnectedLayer.cpp \
+ src/armnn/layers/GreaterLayer.cpp \
src/armnn/layers/InputLayer.cpp \
src/armnn/layers/L2NormalizationLayer.cpp \
src/armnn/layers/LstmLayer.cpp \
src/armnn/layers/ActivationLayer.cpp
src/armnn/layers/AdditionLayer.hpp
src/armnn/layers/AdditionLayer.cpp
- src/armnn/layers/ArithmeticBaseLayer.hpp
- src/armnn/layers/ArithmeticBaseLayer.cpp
src/armnn/layers/BatchNormalizationLayer.hpp
src/armnn/layers/BatchNormalizationLayer.cpp
src/armnn/layers/BatchToSpaceNdLayer.hpp
src/armnn/layers/DebugLayer.cpp
src/armnn/layers/DepthwiseConvolution2dLayer.hpp
src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+ src/armnn/layers/ElementwiseBaseLayer.hpp
+ src/armnn/layers/ElementwiseBaseLayer.cpp
src/armnn/layers/FakeQuantizationLayer.hpp
src/armnn/layers/FakeQuantizationLayer.cpp
src/armnn/layers/FloorLayer.hpp
src/armnn/layers/FloorLayer.cpp
src/armnn/layers/FullyConnectedLayer.hpp
src/armnn/layers/FullyConnectedLayer.cpp
+ src/armnn/layers/GreaterLayer.cpp
+ src/armnn/layers/GreaterLayer.hpp
src/armnn/layers/InputLayer.hpp
src/armnn/layers/InputLayer.cpp
src/armnn/layers/L2NormalizationLayer.hpp
const TensorInfo& input1,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
+
+ virtual bool IsGreaterSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& ouput,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const;
}; // class ILayerSupport
using ILayerSupportSharedPtr = std::shared_ptr<ILayerSupport>;
/// @ return - Interface for configuring the layer.
virtual IConnectableLayer* AddMinimumLayer(const char* name = nullptr) = 0;
+ /// Add a Greater layer to the network.
+ /// @param name - Optional name for the layer.
+ /// @ return - Interface for configuring the layer.
+ virtual IConnectableLayer* AddGreaterLayer(const char* name = nullptr) = 0;
+
protected:
~INetwork() {}
};
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
+/// Deprecated in favor of IBackend and ILayerSupport interfaces
+bool IsGreaterSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported = nullptr,
+ size_t reasonIfUnsupportedMaxLength = 1024);
+
}
case LayerType::FakeQuantization: return "FakeQuantization";
case LayerType::Floor: return "Floor";
case LayerType::FullyConnected: return "FullyConnected";
+ case LayerType::Greater: return "Greater";
case LayerType::Input: return "Input";
case LayerType::L2Normalization: return "L2Normalization";
case LayerType::Lstm: return "Lstm";
case LayerType::Multiplication: return "Multiplication";
case LayerType::Normalization: return "Normalization";
case LayerType::Output: return "Output";
+ case LayerType::Pad: return "Pad";
case LayerType::Permute: return "Permute";
case LayerType::Pooling2d: return "Pooling2d";
case LayerType::Reshape: return "Reshape";
case LayerType::Splitter: return "Splitter";
case LayerType::StridedSlice: return "StridedSlice";
case LayerType::Subtraction: return "Subtraction";
- case LayerType::Pad: return "Pad";
default:
BOOST_ASSERT_MSG(false, "Unknown layer type");
return "Unknown";
FakeQuantization,
Floor,
FullyConnected,
+ Greater,
Input,
L2Normalization,
Lstm,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsMinimumSupported, input0, input1, output);
}
+bool IsGreaterSupported(const BackendId& backend,
+ const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsGreaterSupported, input0, input1, output);
+}
+
}
#include "layers/FakeQuantizationLayer.hpp"
#include "layers/FloorLayer.hpp"
#include "layers/FullyConnectedLayer.hpp"
+#include "layers/GreaterLayer.hpp"
#include "layers/InputLayer.hpp"
#include "layers/L2NormalizationLayer.hpp"
#include "layers/LstmLayer.hpp"
DECLARE_LAYER(FakeQuantization)
DECLARE_LAYER(Floor)
DECLARE_LAYER(FullyConnected)
+DECLARE_LAYER(Greater)
DECLARE_LAYER(Input)
DECLARE_LAYER(L2Normalization)
DECLARE_LAYER(Lstm)
return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
}
+IConnectableLayer* Network::AddGreaterLayer(const char* name)
+{
+ return m_Graph->AddLayer<GreaterLayer>(name);
+}
+
OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
: m_Graph(std::move(graph))
{
IConnectableLayer* AddMinimumLayer(const char* name = nullptr) override;
+ IConnectableLayer* AddGreaterLayer(const char* name = nullptr) override;
+
private:
IConnectableLayer* AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
const ConstTensor& weights,
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "AdditionLayer.hpp"
#include "LayerCloneBase.hpp"
{
AdditionLayer::AdditionLayer(const char* name)
- : ArithmeticBaseLayer(2, 1, LayerType::Addition, name)
+ : ElementwiseBaseLayer(2, 1, LayerType::Addition, name)
{
}
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#pragma once
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
namespace armnn
{
-class AdditionLayer : public ArithmeticBaseLayer
+class AdditionLayer : public ElementwiseBaseLayer
{
public:
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "DivisionLayer.hpp"
#include "LayerCloneBase.hpp"
{
DivisionLayer::DivisionLayer(const char* name)
- : ArithmeticBaseLayer(2, 1, LayerType::Division, name)
+ : ElementwiseBaseLayer(2, 1, LayerType::Division, name)
{
}
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#pragma once
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
namespace armnn
{
-class DivisionLayer : public ArithmeticBaseLayer
+class DivisionLayer : public ElementwiseBaseLayer
{
public:
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "ArithmeticBaseLayer.hpp"
+
+#include "ElementwiseBaseLayer.hpp"
#include "InternalTypes.hpp"
#include "armnn/Exceptions.hpp"
namespace armnn
{
-ArithmeticBaseLayer::ArithmeticBaseLayer(unsigned int numInputSlots, unsigned int numOutputSlots,
- LayerType type, const char* name)
+ElementwiseBaseLayer::ElementwiseBaseLayer(unsigned int numInputSlots, unsigned int numOutputSlots,
+ LayerType type, const char* name)
: Layer(numInputSlots, numOutputSlots, type, name)
{
}
-std::vector<TensorShape> ArithmeticBaseLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
BOOST_ASSERT(inputShapes.size() == 2);
auto& input0 = inputShapes[0];
return std::vector<TensorShape>({ TensorShape(numDims, dims.data()) });
}
-void ArithmeticBaseLayer::ValidateTensorShapesFromInputs()
+void ElementwiseBaseLayer::ValidateTensorShapesFromInputs()
{
VerifyLayerConnections(2, CHECK_LOCATION());
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#pragma once
#include <Layer.hpp>
/// NOTE: this is an abstract class, it does not implement:
/// std::unique_ptr<IWorkload> Layer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const = 0;
/// Layer* Clone(Graph& graph) const = 0;
-class ArithmeticBaseLayer : public Layer
+class ElementwiseBaseLayer : public Layer
{
public:
void ValidateTensorShapesFromInputs() override;
std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
protected:
- ArithmeticBaseLayer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name);
- ~ArithmeticBaseLayer() = default;
+ ElementwiseBaseLayer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name);
+ ~ElementwiseBaseLayer() = default;
};
} // namespace
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GreaterLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+GreaterLayer::GreaterLayer(const char* name)
+ : ElementwiseBaseLayer(2, 1, LayerType::Greater, name)
+{
+}
+
+std::unique_ptr<IWorkload> GreaterLayer::CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const
+{
+ GreaterQueueDescriptor descriptor;
+ return factory.CreateGreater(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+GreaterLayer* GreaterLayer::Clone(Graph& graph) const
+{
+ return CloneBase<GreaterLayer>(graph, GetName());
+}
+
+} // namespace armnn
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "ElementwiseBaseLayer.hpp"
+
+namespace armnn
+{
+
+class GreaterLayer : public ElementwiseBaseLayer
+{
+public:
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ GreaterLayer* Clone(Graph& graph) const override;
+
+protected:
+ GreaterLayer(const char* name);
+ ~GreaterLayer() = default;
+};
+
+} //namespace armnn
{
MaximumLayer::MaximumLayer(const char* name)
-: ArithmeticBaseLayer(2, 1, LayerType::Maximum, name)
-{}
+ : ElementwiseBaseLayer(2, 1, LayerType::Maximum, name)
+{
+}
std::unique_ptr<IWorkload> MaximumLayer::CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#pragma once
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
namespace armnn
{
-class MaximumLayer : public ArithmeticBaseLayer
+class MaximumLayer : public ElementwiseBaseLayer
{
public:
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "MinimumLayer.hpp"
#include "LayerCloneBase.hpp"
{
MinimumLayer::MinimumLayer(const char* name)
- : ArithmeticBaseLayer(2, 1, LayerType::Minimum, name)
+ : ElementwiseBaseLayer(2, 1, LayerType::Minimum, name)
{
}
#pragma once
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
namespace armnn
{
-class MinimumLayer : public ArithmeticBaseLayer
+class MinimumLayer : public ElementwiseBaseLayer
{
public:
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "MultiplicationLayer.hpp"
#include "LayerCloneBase.hpp"
{
MultiplicationLayer::MultiplicationLayer(const char* name)
- : ArithmeticBaseLayer(2, 1, LayerType::Multiplication, name)
+ : ElementwiseBaseLayer(2, 1, LayerType::Multiplication, name)
{
}
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#pragma once
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
namespace armnn
{
-class MultiplicationLayer : public ArithmeticBaseLayer
+class MultiplicationLayer : public ElementwiseBaseLayer
{
public:
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#include "SubtractionLayer.hpp"
#include "LayerCloneBase.hpp"
{
SubtractionLayer::SubtractionLayer(const char* name)
- : ArithmeticBaseLayer(2, 1, LayerType::Subtraction, name)
+ : ElementwiseBaseLayer(2, 1, LayerType::Subtraction, name)
{
}
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
#pragma once
-#include "ArithmeticBaseLayer.hpp"
+#include "ElementwiseBaseLayer.hpp"
namespace armnn
{
-class SubtractionLayer : public ArithmeticBaseLayer
+class SubtractionLayer : public ElementwiseBaseLayer
{
public:
virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool ILayerSupport::IsGreaterSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
} // namespace armnn
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct GreaterQueueDescriptor : QueueDescriptor
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
struct DebugQueueDescriptor : QueueDescriptorWithParameters<DebugDescriptor>
{
void Validate(const WorkloadInfo& workloadInfo) const;
reason);
break;
}
+ case LayerType::Greater:
+ {
+ const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+ result = layerSupportObject->IsGreaterSupported(OverrideDataType(input0, dataType),
+ OverrideDataType(input1, dataType),
+ OverrideDataType(output, dataType),
+ reason);
+ break;
+ }
default:
{
BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
virtual std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& Info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const = 0;
+
virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const = 0;
};
DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
+DECLARE_LAYER_POLICY_1_PARAM(Greater)
+
DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)
DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
{
return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
-};
+}
// Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
return v &&
IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
(factory, Tag<NextType(Type)>());
-};
+}
// Helper function to pass through to the test framework.
template<typename FactoryType, armnn::DataType DataType>
bool IsLayerSupportedTests(FactoryType *factory)
{
return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
-};
+}
template<armnn::LayerType Type>
bool TestLayerTypeMatches()
bool v = Type == layer.m_Layer->GetType();
BOOST_CHECK_MESSAGE(v, ss.str());
return v;
-};
+}
template<armnn::LayerType Type>
bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
{
return TestLayerTypeMatches<Type>();
-};
+}
template<armnn::LayerType Type>
bool LayerTypeMatchesTestImpl(Tag<Type>)
{
return TestLayerTypeMatches<Type>() &&
LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
-};
+}
template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
return result;
-};
+}
} //namespace
return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ virtual std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+}
+
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ virtual std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
return MakeWorkload<RefStridedSliceFloat32Workload, RefStridedSliceUint8Workload>(descriptor, info);
}
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ virtual std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
private: