IVGCVSW-1966: Ref implementation for the ILayerSupport interface
authorarovir01 <Aron.Virginas-Tar@arm.com>
Mon, 8 Oct 2018 10:34:28 +0000 (11:34 +0100)
committerMatthew Bentham <matthew.bentham@arm.com>
Wed, 10 Oct 2018 15:16:58 +0000 (16:16 +0100)
Change-Id: Idd572cae3a131acb11e884e33c0035ca74c95055

src/backends/reference/RefLayerSupport.cpp
src/backends/reference/RefLayerSupport.hpp

index a42efb7..e6b1442 100644 (file)
@@ -17,6 +17,317 @@ using namespace boost;
 namespace armnn
 {
 
+namespace
+{
+
+std::string* GetReasonIfUnsupportedPtr(const Optional<std::string&>& reasonIfUnsupported)
+{
+    return reasonIfUnsupported ? &reasonIfUnsupported.value() : nullptr;
+}
+
+} // anonymous namespace
+
+bool RefLayerSupport::IsActivationSupported(const TensorInfo& input,
+                                            const TensorInfo& output,
+                                            const ActivationDescriptor& descriptor,
+                                            Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsActivationSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsAdditionSupported(const TensorInfo& input0,
+                                          const TensorInfo& input1,
+                                          const TensorInfo& output,
+                                          Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsAdditionSupportedRef(input0,
+                                         input1,
+                                         output,
+                                         GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
+                                                    const TensorInfo& output,
+                                                    const TensorInfo& mean,
+                                                    const TensorInfo& var,
+                                                    const TensorInfo& beta,
+                                                    const TensorInfo& gamma,
+                                                    const BatchNormalizationDescriptor& descriptor,
+                                                    Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsBatchNormalizationSupportedRef(input,
+                                                   output,
+                                                   mean,
+                                                   var,
+                                                   beta,
+                                                   gamma,
+                                                   descriptor,
+                                                   GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsConstantSupported(const TensorInfo& output,
+                                          Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsConstantSupportedRef(output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
+                                                   const TensorInfo& output,
+                                                   Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsConvertFp16ToFp32SupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
+                                                   const TensorInfo& output,
+                                                   Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsConvertFp32ToFp16SupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
+                                               const TensorInfo& output,
+                                               const Convolution2dDescriptor& descriptor,
+                                               const TensorInfo& weights,
+                                               const Optional<TensorInfo>& biases,
+                                               Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsConvolution2dSupportedRef(input,
+                                              output,
+                                              descriptor,
+                                              weights,
+                                              biases,
+                                              GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
+                                                      const TensorInfo& output,
+                                                      const DepthwiseConvolution2dDescriptor& descriptor,
+                                                      const TensorInfo& weights,
+                                                      const Optional<TensorInfo>& biases,
+                                                      Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsDepthwiseConvolutionSupportedRef(input,
+                                                     output,
+                                                     descriptor,
+                                                     weights,
+                                                     biases,
+                                                     GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsDivisionSupported(const TensorInfo& input0,
+                                          const TensorInfo& input1,
+                                          const TensorInfo& output,
+                                          Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsDivisionSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
+                                                  const FakeQuantizationDescriptor& descriptor,
+                                                  Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsFakeQuantizationSupportedRef(input, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsFloorSupported(const TensorInfo& input,
+                                       const TensorInfo& output,
+                                       Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsFloorSupportedRef(input, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
+                                                const TensorInfo& output,
+                                                const TensorInfo& weights,
+                                                const TensorInfo& biases,
+                                                const FullyConnectedDescriptor& descriptor,
+                                                Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsFullyConnectedSupportedRef(input,
+                                               output,
+                                               weights,
+                                               biases,
+                                               descriptor,
+                                               GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsInputSupported(const TensorInfo& input,
+                                       Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsInputSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
+                                                 const TensorInfo& output,
+                                                 const L2NormalizationDescriptor& descriptor,
+                                                 Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsL2NormalizationSupportedRef(input,
+                                                output,
+                                                descriptor,
+                                                GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsLstmSupported(const TensorInfo& input,
+                                      const TensorInfo& outputStateIn,
+                                      const TensorInfo& cellStateIn,
+                                      const TensorInfo& scratchBuffer,
+                                      const TensorInfo& outputStateOut,
+                                      const TensorInfo& cellStateOut,
+                                      const TensorInfo& output,
+                                      const LstmDescriptor& descriptor,
+                                      const TensorInfo& inputToForgetWeights,
+                                      const TensorInfo& inputToCellWeights,
+                                      const TensorInfo& inputToOutputWeights,
+                                      const TensorInfo& recurrentToForgetWeights,
+                                      const TensorInfo& recurrentToCellWeights,
+                                      const TensorInfo& recurrentToOutputWeights,
+                                      const TensorInfo& forgetGateBias,
+                                      const TensorInfo& cellBias,
+                                      const TensorInfo& outputGateBias,
+                                      const TensorInfo* inputToInputWeights,
+                                      const TensorInfo* recurrentToInputWeights,
+                                      const TensorInfo* cellToInputWeights,
+                                      const TensorInfo* inputGateBias,
+                                      const TensorInfo* projectionWeights,
+                                      const TensorInfo* projectionBias,
+                                      const TensorInfo* cellToForgetWeights,
+                                      const TensorInfo* cellToOutputWeights,
+                                      Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsLstmSupportedRef(input,
+                                     outputStateIn,
+                                     cellStateIn,
+                                     scratchBuffer,
+                                     outputStateOut,
+                                     cellStateOut,
+                                     output,
+                                     descriptor,
+                                     inputToForgetWeights,
+                                     inputToCellWeights,
+                                     inputToOutputWeights,
+                                     recurrentToForgetWeights,
+                                     recurrentToCellWeights,
+                                     recurrentToOutputWeights,
+                                     forgetGateBias,
+                                     cellBias,
+                                     outputGateBias,
+                                     inputToInputWeights,
+                                     recurrentToInputWeights,
+                                     cellToInputWeights,
+                                     inputGateBias,
+                                     projectionWeights,
+                                     projectionBias,
+                                     cellToForgetWeights,
+                                     cellToOutputWeights,
+                                     GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsMeanSupported(const TensorInfo& input,
+                                      const TensorInfo& output,
+                                      const MeanDescriptor& descriptor,
+                                      Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsMeanSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
+                                        const OriginsDescriptor& descriptor,
+                                        Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsMergerSupportedRef(inputs, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
+                                                const TensorInfo& input1,
+                                                const TensorInfo& output,
+                                                Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsMultiplicationSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input,
+                                               const TensorInfo& output,
+                                               const NormalizationDescriptor& descriptor,
+                                               Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsNormalizationSupportedRef(input,
+                                              output,
+                                              descriptor,
+                                              GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsOutputSupported(const TensorInfo& output,
+                                        Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsOutputSupportedRef(output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsPadSupported(const TensorInfo& input,
+                                     const TensorInfo& output,
+                                     const PadDescriptor& descriptor,
+                                     Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsPadSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input,
+                                         const TensorInfo& output,
+                                         const PermuteDescriptor& descriptor,
+                                         Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsPermuteSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsPooling2dSupported(const TensorInfo& input,
+                                           const TensorInfo& output,
+                                           const Pooling2dDescriptor& descriptor,
+                                           Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsPooling2dSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input,
+                                         Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsReshapeSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input,
+                                                Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsResizeBilinearSupportedRef(input, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
+                                         const TensorInfo& output,
+                                         const SoftmaxDescriptor& descriptor,
+                                         Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsSoftmaxSupportedRef(input, output, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
+                                          const ViewsDescriptor& descriptor,
+                                          Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsSplitterSupportedRef(input, descriptor, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+bool RefLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
+                                             const TensorInfo& input1,
+                                             const TensorInfo& output,
+                                             Optional<std::string&> reasonIfUnsupported) const
+{
+    return armnn::IsSubtractionSupportedRef(input0, input1, output, GetReasonIfUnsupportedPtr(reasonIfUnsupported));
+}
+
+//
+// Implementation functions
+//
+// TODO: Functions kept for backward compatibility. Remove once transition to plugable backends is complete!
+
 template<typename Float32Func, typename Uint8Func, typename ... Params>
 bool IsSupportedForDataTypeRef(std::string* reasonIfUnsupported,
                                DataType dataType,
@@ -412,4 +723,4 @@ bool IsPadSupportedRef(const TensorInfo& input,
     return false;
 }
 
-}
+} // namespace armnn
index dcc5dd3..25501fe 100644 (file)
@@ -14,7 +14,162 @@ namespace armnn
 
 class RefLayerSupport : public ILayerSupport
 {
-    // TODO implement
+public:
+    bool IsActivationSupported(const TensorInfo& input,
+                               const TensorInfo& output,
+                               const ActivationDescriptor& descriptor,
+                               Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsAdditionSupported(const TensorInfo& input0,
+                             const TensorInfo& input1,
+                             const TensorInfo& output,
+                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsBatchNormalizationSupported(const TensorInfo& input,
+                                       const TensorInfo& output,
+                                       const TensorInfo& mean,
+                                       const TensorInfo& var,
+                                       const TensorInfo& beta,
+                                       const TensorInfo& gamma,
+                                       const BatchNormalizationDescriptor& descriptor,
+                                       Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsConstantSupported(const TensorInfo& output,
+                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsConvertFp16ToFp32Supported(const TensorInfo& input,
+                                      const TensorInfo& output,
+                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsConvertFp32ToFp16Supported(const TensorInfo& input,
+                                      const TensorInfo& output,
+                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsConvolution2dSupported(const TensorInfo& input,
+                                  const TensorInfo& output,
+                                  const Convolution2dDescriptor& descriptor,
+                                  const TensorInfo& weights,
+                                  const Optional<TensorInfo>& biases,
+                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsDepthwiseConvolutionSupported(const TensorInfo& input,
+                                         const TensorInfo& output,
+                                         const DepthwiseConvolution2dDescriptor& descriptor,
+                                         const TensorInfo& weights,
+                                         const Optional<TensorInfo>& biases,
+                                         Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsDivisionSupported(const TensorInfo& input0,
+                             const TensorInfo& input1,
+                             const TensorInfo& output,
+                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsFakeQuantizationSupported(const TensorInfo& input,
+                                     const FakeQuantizationDescriptor& descriptor,
+                                     Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsFloorSupported(const TensorInfo& input,
+                          const TensorInfo& output,
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsFullyConnectedSupported(const TensorInfo& input,
+                                   const TensorInfo& output,
+                                   const TensorInfo& weights,
+                                   const TensorInfo& biases,
+                                   const FullyConnectedDescriptor& descriptor,
+                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsInputSupported(const TensorInfo& input,
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsL2NormalizationSupported(const TensorInfo& input,
+                                    const TensorInfo& output,
+                                    const L2NormalizationDescriptor& descriptor,
+                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsLstmSupported(const TensorInfo& input,
+                         const TensorInfo& outputStateIn,
+                         const TensorInfo& cellStateIn,
+                         const TensorInfo& scratchBuffer,
+                         const TensorInfo& outputStateOut,
+                         const TensorInfo& cellStateOut,
+                         const TensorInfo& output,
+                         const LstmDescriptor& descriptor,
+                         const TensorInfo& inputToForgetWeights,
+                         const TensorInfo& inputToCellWeights,
+                         const TensorInfo& inputToOutputWeights,
+                         const TensorInfo& recurrentToForgetWeights,
+                         const TensorInfo& recurrentToCellWeights,
+                         const TensorInfo& recurrentToOutputWeights,
+                         const TensorInfo& forgetGateBias,
+                         const TensorInfo& cellBias,
+                         const TensorInfo& outputGateBias,
+                         const TensorInfo* inputToInputWeights,
+                         const TensorInfo* recurrentToInputWeights,
+                         const TensorInfo* cellToInputWeights,
+                         const TensorInfo* inputGateBias,
+                         const TensorInfo* projectionWeights,
+                         const TensorInfo* projectionBias,
+                         const TensorInfo* cellToForgetWeights,
+                         const TensorInfo* cellToOutputWeights,
+                         Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsMeanSupported(const TensorInfo& input,
+                         const TensorInfo& output,
+                         const MeanDescriptor& descriptor,
+                         Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsMergerSupported(const std::vector<const TensorInfo*> inputs,
+                           const OriginsDescriptor& descriptor,
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsMultiplicationSupported(const TensorInfo& input0,
+                                   const TensorInfo& input1,
+                                   const TensorInfo& output,
+                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsNormalizationSupported(const TensorInfo& input,
+                                  const TensorInfo& output,
+                                  const NormalizationDescriptor& descriptor,
+                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsOutputSupported(const TensorInfo& output,
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsPadSupported(const TensorInfo& input,
+                        const TensorInfo& output,
+                        const PadDescriptor& descriptor,
+                        Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsPermuteSupported(const TensorInfo& input,
+                            const TensorInfo& output,
+                            const PermuteDescriptor& descriptor,
+                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsPooling2dSupported(const TensorInfo& input,
+                              const TensorInfo& output,
+                              const Pooling2dDescriptor& descriptor,
+                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsReshapeSupported(const TensorInfo& input,
+                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsResizeBilinearSupported(const TensorInfo& input,
+                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsSoftmaxSupported(const TensorInfo& input,
+                            const TensorInfo& output,
+                            const SoftmaxDescriptor& descriptor,
+                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsSplitterSupported(const TensorInfo& input,
+                             const ViewsDescriptor& descriptor,
+                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+    bool IsSubtractionSupported(const TensorInfo& input0,
+                                const TensorInfo& input1,
+                                const TensorInfo& output,
+                                Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 };
 
 bool IsActivationSupportedRef(const TensorInfo& input,