IVGCVSW-4262 Use ACL Permute and Reshape Validate function in Neon and CL
authorKevin May <kevin.may@arm.com>
Thu, 12 Dec 2019 17:28:05 +0000 (17:28 +0000)
committerTeresaARM <teresa.charlinreyes@arm.com>
Fri, 24 Jan 2020 16:47:17 +0000 (16:47 +0000)
!android-nn-driver:2642

Signed-off-by: Kevin May <kevin.may@arm.com>
Change-Id: Ibabb73c0ae0df2e530a68398f75c76e6b80c0701

16 files changed:
include/armnn/ILayerSupport.hpp
src/armnn/LayerSupport.cpp
src/backends/backendsCommon/LayerSupportBase.cpp
src/backends/backendsCommon/LayerSupportBase.hpp
src/backends/backendsCommon/WorkloadFactory.cpp
src/backends/cl/ClLayerSupport.cpp
src/backends/cl/ClLayerSupport.hpp
src/backends/cl/workloads/ClPermuteWorkload.cpp
src/backends/cl/workloads/ClReshapeWorkload.cpp
src/backends/cl/workloads/ClReshapeWorkload.hpp
src/backends/neon/NeonLayerSupport.cpp
src/backends/neon/NeonLayerSupport.hpp
src/backends/neon/workloads/NeonReshapeWorkload.cpp
src/backends/neon/workloads/NeonReshapeWorkload.hpp
src/backends/reference/RefLayerSupport.cpp
src/backends/reference/RefLayerSupport.hpp

index 1615d3e..d1bbf99 100644 (file)
@@ -285,6 +285,7 @@ public:
                                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
     virtual bool IsReshapeSupported(const TensorInfo& input,
+                                    const TensorInfo& output,
                                     const ReshapeDescriptor& descriptor,
                                     Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
index 08d91fc..3c244b0 100644 (file)
@@ -530,11 +530,12 @@ bool IsPreluSupported(const BackendId& backend,
 
 bool IsReshapeSupported(const BackendId& backend,
                         const TensorInfo& input,
+                        const TensorInfo& output,
                         const ReshapeDescriptor& descriptor,
                         char* reasonIfUnsupported,
                         size_t reasonIfUnsupportedMaxLength)
 {
-    FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input, descriptor);
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsReshapeSupported, input, output, descriptor);
 }
 
 bool IsResizeSupported(const BackendId& backend,
index b19356f..449b809 100644 (file)
@@ -444,6 +444,7 @@ bool LayerSupportBase::IsQuantizedLstmSupported(const TensorInfo& /*input*/,
 }
 
 bool LayerSupportBase::IsReshapeSupported(const TensorInfo& /*input*/,
+                                          const TensorInfo& /*output*/,
                                           const ReshapeDescriptor& /*descriptor*/,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
index 7a65eb5..459ac03 100644 (file)
@@ -271,6 +271,7 @@ public:
                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsReshapeSupported(const TensorInfo& input,
+                            const TensorInfo& output,
                             const ReshapeDescriptor& descriptor,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
index acb73b5..5671761 100644 (file)
@@ -795,7 +795,9 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
         {
             auto cLayer = boost::polymorphic_downcast<const ReshapeLayer*>(&layer);
             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
             result = layerSupportObject->IsReshapeSupported(OverrideDataType(input, dataType),
+                                                            OverrideDataType(output, dataType),
                                                             cLayer->GetParameters(),
                                                             reason);
             break;
index f8cc507..ffe68a3 100644 (file)
@@ -46,6 +46,7 @@
 #include "workloads/ClPermuteWorkload.hpp"
 #include "workloads/ClPooling2dWorkload.hpp"
 #include "workloads/ClPreluWorkload.hpp"
+#include "workloads/ClReshapeWorkload.hpp"
 #include "workloads/ClResizeWorkload.hpp"
 #include "workloads/ClRsqrtWorkload.hpp"
 #include "workloads/ClQuantizedLstmWorkload.hpp"
@@ -670,13 +671,12 @@ bool ClLayerSupport::IsQuantizeSupported(const TensorInfo& input,
 }
 
 bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
+                                        const TensorInfo& output,
                                         const ReshapeDescriptor& descriptor,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(input);
     ignore_unused(descriptor);
-    ignore_unused(reasonIfUnsupported);
-    return true;
+    FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
 }
 
 bool ClLayerSupport::IsResizeSupported(const TensorInfo& input,
index 9371717..819d086 100644 (file)
@@ -216,6 +216,7 @@ public:
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsReshapeSupported(const TensorInfo& input,
+                            const TensorInfo& output,
                             const ReshapeDescriptor& descriptor,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
index dd495c8..41bce1d 100644 (file)
@@ -23,7 +23,7 @@ arm_compute::Status ClPermuteWorkloadValidate(const TensorInfo& input,
     const armnn::PermutationVector& mappings = descriptor.m_DimMappings;
 
     return arm_compute::CLPermute::validate(&aclInputInfo, &aclOutputInfo,
-                                      armcomputetensorutils::BuildArmComputePermutationVector(mappings));
+                                            armcomputetensorutils::BuildArmComputePermutationVector(mappings));
 }
 
 ClPermuteWorkload::ClPermuteWorkload(const PermuteQueueDescriptor& descriptor,
index db1702a..d752290 100644 (file)
 namespace armnn
 {
 
+arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo& input,
+                                              const TensorInfo& output)
+{
+    const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+    const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+    return arm_compute::CLReshapeLayer::validate(&aclInputInfo, &aclOutputInfo);
+}
+
 ClReshapeWorkload::ClReshapeWorkload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info)
     : BaseWorkload<ReshapeQueueDescriptor>(descriptor, info)
 {
index a7b464e..62f5fcc 100644 (file)
@@ -12,6 +12,9 @@
 namespace armnn
 {
 
+arm_compute::Status ClReshapeWorkloadValidate(const TensorInfo& input,
+                                              const TensorInfo& output);
+
 class ClReshapeWorkload : public BaseWorkload<ReshapeQueueDescriptor>
 {
 public:
index 6ca69f4..b8725be 100644 (file)
@@ -47,6 +47,7 @@
 #include "workloads/NeonPreluWorkload.hpp"
 #include "workloads/NeonQuantizeWorkload.hpp"
 #include "workloads/NeonQuantizedLstmWorkload.hpp"
+#include "workloads/NeonReshapeWorkload.hpp"
 #include "workloads/NeonResizeWorkload.hpp"
 #include "workloads/NeonRsqrtWorkload.hpp"
 #include "workloads/NeonSliceWorkload.hpp"
@@ -650,14 +651,15 @@ bool NeonLayerSupport::IsQuantizedLstmSupported(const TensorInfo& input,
 }
 
 bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input,
+                                          const TensorInfo& output,
                                           const ReshapeDescriptor& descriptor,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
     ignore_unused(descriptor);
-    return IsSupportedForDataTypeNeon(reasonIfUnsupported,
-                                      input.GetDataType(),
-                                      &TrueFunc<>,
-                                      &TrueFunc<>);
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonReshapeWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   input,
+                                   output);
 }
 
 bool NeonLayerSupport::IsResizeSupported(const TensorInfo& input,
index f1d87f6..56a70c4 100644 (file)
@@ -222,6 +222,7 @@ public:
                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsReshapeSupported(const TensorInfo& input,
+                            const TensorInfo& output,
                             const ReshapeDescriptor& descriptor,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
index 7f2056c..659bb94 100644 (file)
 namespace armnn
 {
 
+arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo& input,
+                                                const TensorInfo& output)
+{
+    const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+    const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+    return arm_compute::NEReshapeLayer::validate(&aclInputInfo, &aclOutputInfo);
+}
+
 NeonReshapeWorkload::NeonReshapeWorkload(const ReshapeQueueDescriptor& descriptor,
                                          const WorkloadInfo& info)
     : BaseWorkload<ReshapeQueueDescriptor>(descriptor, info)
index 2202463..186a02b 100644 (file)
@@ -6,7 +6,10 @@
 #pragma once
 
 #include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <neon/workloads/NeonWorkloadUtils.hpp>
 
+#include <armnn/TypesUtils.hpp>
 #include <arm_compute/runtime/IFunction.h>
 
 #include <memory>
@@ -14,6 +17,8 @@
 namespace armnn
 {
 
+arm_compute::Status NeonReshapeWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
+
 class NeonReshapeWorkload : public BaseWorkload<ReshapeQueueDescriptor>
 {
 public:
index ee6462d..b801f70 100644 (file)
@@ -1451,9 +1451,11 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input,
 }
 
 bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input,
+                                         const TensorInfo& output,
                                          const ReshapeDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
+    ignore_unused(output);
     ignore_unused(descriptor);
     // Define supported output types.
     std::array<DataType,5> supportedOutputTypes =
index 123c264..1551a55 100644 (file)
@@ -242,6 +242,7 @@ public:
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
     bool IsReshapeSupported(const TensorInfo& input,
+                            const TensorInfo& output,
                             const ReshapeDescriptor& descriptor,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;