IVGCVSW-2771 Fix SubTensor error in vgg16 ExecuteNetwork NEON
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Mon, 20 May 2019 14:31:05 +0000 (15:31 +0100)
committerMatteo Martincigh <matteo.martincigh@arm.com>
Thu, 23 May 2019 13:37:29 +0000 (13:37 +0000)
 * Add check if Sub-tensors cannot be used, call ACL function
 * Add computation of SplitAxis from SplitterDescriptor
 * Add NeonSplitterWorkload functions
 * Modify IsSplitterSupported to call ACL validate function
if sub-tensor cannot be used
 * Also check if quantization parameters match when using sub-tensors
 * Add more unit tests for Splitter in TfParser and TfLiteParser

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I31e4c7d055117c83c65b598c4125442173242226

20 files changed:
include/armnn/ILayerSupport.hpp
include/armnn/LayerSupport.hpp
src/armnn/LayerSupport.cpp
src/armnn/layers/SplitterLayer.cpp
src/armnnTfLiteParser/test/Unpack.cpp
src/armnnTfParser/test/Split.cpp
src/backends/aclCommon/ArmComputeUtils.hpp
src/backends/backendsCommon/LayerSupportBase.cpp
src/backends/backendsCommon/LayerSupportBase.hpp
src/backends/backendsCommon/WorkloadFactory.cpp
src/backends/cl/ClLayerSupport.cpp
src/backends/cl/ClLayerSupport.hpp
src/backends/neon/NeonLayerSupport.cpp
src/backends/neon/NeonLayerSupport.hpp
src/backends/neon/backend.mk
src/backends/neon/workloads/CMakeLists.txt
src/backends/neon/workloads/NeonSplitterWorkload.cpp [new file with mode: 0644]
src/backends/neon/workloads/NeonSplitterWorkload.hpp
src/backends/reference/RefLayerSupport.cpp
src/backends/reference/RefLayerSupport.hpp

index c3fb7b0..f41495c 100644 (file)
@@ -9,6 +9,7 @@
 #include <armnn/Optional.hpp>
 
 #include <cctype>
+#include <functional>
 #include <memory>
 #include <vector>
 
@@ -259,10 +260,16 @@ public:
                                            const SpaceToBatchNdDescriptor& descriptor,
                                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
+    ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
     virtual bool IsSplitterSupported(const TensorInfo& input,
                                      const ViewsDescriptor& descriptor,
                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
+    virtual bool IsSplitterSupported(const TensorInfo& input,
+                                     const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+                                     const ViewsDescriptor& descriptor,
+                                     Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
     virtual bool IsStridedSliceSupported(const TensorInfo& input,
                                          const TensorInfo& output,
                                          const StridedSliceDescriptor& descriptor,
index e105b67..0ae8705 100644 (file)
@@ -326,9 +326,17 @@ bool IsSpaceToBatchNdSupported(const BackendId& backend,
                                char* reasonIfUnsupported = nullptr,
                                size_t reasonIfUnsupportedMaxLength = 1024);
 
+ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
+bool IsSplitterSupported(const BackendId& backend,
+                         const TensorInfo& input,
+                         const ViewsDescriptor& descriptor,
+                         char* reasonIfUnsupported = nullptr,
+                         size_t reasonIfUnsupportedMaxLength = 1024);
+
 /// Deprecated in favor of IBackend and ILayerSupport interfaces
 bool IsSplitterSupported(const BackendId& backend,
                          const TensorInfo& input,
+                         const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
                          const ViewsDescriptor& descriptor,
                          char* reasonIfUnsupported = nullptr,
                          size_t reasonIfUnsupportedMaxLength = 1024);
index 47a0d3e..5867fab 100644 (file)
@@ -538,13 +538,26 @@ bool IsSpaceToBatchNdSupported(const BackendId& backend,
     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSpaceToBatchNdSupported, input, output, descriptor);
 }
 
+ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
 bool IsSplitterSupported(const BackendId& backend,
                          const TensorInfo& input,
                          const ViewsDescriptor& descriptor,
                          char* reasonIfUnsupported,
                          size_t reasonIfUnsupportedMaxLength)
 {
+    ARMNN_NO_DEPRECATE_WARN_BEGIN
     FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, descriptor);
+    ARMNN_NO_DEPRECATE_WARN_END
+}
+
+bool IsSplitterSupported(const BackendId& backend,
+                         const TensorInfo& input,
+                         const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+                         const ViewsDescriptor& descriptor,
+                         char* reasonIfUnsupported,
+                         size_t reasonIfUnsupportedMaxLength)
+{
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsSplitterSupported, input, outputs, descriptor);
 }
 
 bool IsStridedSliceSupported(const BackendId& backend,
index b3a1094..4a6b222 100644 (file)
@@ -36,20 +36,57 @@ void SplitterLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& fa
 {
     //If sub tensors are supported than all the "splitter" need to do is to
     //set the outputs to be appropriate sub tensors of the input.
-    if (factory.SupportsSubTensors())
+    bool useSubTensors = factory.SupportsSubTensors();
+
+    if (useSubTensors)
     {
         const OutputHandler& outputHandler = GetInputSlots()[0].GetConnectedOutputSlot()->GetOutputHandler();
 
+        const TensorInfo& parentInfo = outputHandler.GetTensorInfo();
+
         ITensorHandle* inputData = outputHandler.GetData();
+
+        std::vector<std::unique_ptr<ITensorHandle>> subTensors;
+
         //Creates the outputs as subtensors of the input.
         for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
         {
-            m_OutputHandlers[i].SetData(factory.CreateSubTensorHandle(*inputData,
-                                                                      m_OutputHandlers[i].GetTensorInfo().GetShape(),
-                                                                      m_Param.GetViewOrigin(i)));
+            const TensorInfo& info = m_OutputHandlers[i].GetTensorInfo();
+
+            auto CreateSubTensor = [&]()
+            {
+                // Make sure quantization parameters are in the same space
+                if (parentInfo.IsTypeSpaceMatch(info))
+                {
+                    return factory.CreateSubTensorHandle(*inputData,
+                                                         info.GetShape(),
+                                                         this->m_Param.GetViewOrigin(i));
+                }
+                return std::unique_ptr<ITensorHandle>();
+            };
+
+            auto subTensor = CreateSubTensor();
+            if (!subTensor)
+            {
+                useSubTensors = false;
+                break; //Failed to create a valid sub-tensor, so stop trying with the rest of the views.
+            }
+            subTensors.push_back(std::move(subTensor));
+        }
+
+        if (useSubTensors)
+        {
+            unsigned int i = 0;
+            for (auto& subTensor : subTensors)
+            {
+                m_OutputHandlers[i].SetData(std::move(subTensor));
+                ++i;
+            }
+
         }
     }
-    else
+
+    if (!useSubTensors)
     {
         for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
         {
index 10e682e..6b3c57b 100644 (file)
@@ -116,4 +116,25 @@ BOOST_FIXTURE_TEST_CASE(UnpackAxisZeroNumIsDefaultNotSpecified, DefaultUnpackAxi
           {"outputTensor4", { 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f }} });
 }
 
+struct DefaultUnpackLastAxisFixture : UnpackFixture
+{
+    DefaultUnpackLastAxisFixture() : UnpackFixture("[ 4, 1, 6 ]", 6, "[ 4, 1 ]", "2", "6") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(UnpackLastAxisNumSix, DefaultUnpackLastAxisFixture)
+{
+    RunTest<2, armnn::DataType::Float32>(
+        0,
+        { {"inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f,
+                            7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+                            13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
+                            19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f } } },
+        { {"outputTensor1", { 1.0f, 7.0f, 13.0f, 19.0f }},
+          {"outputTensor2", { 2.0f, 8.0f, 14.0f, 20.0f }},
+          {"outputTensor3", { 3.0f, 9.0f, 15.0f, 21.0f }},
+          {"outputTensor4", { 4.0f, 10.0f, 16.0f, 22.0f }},
+          {"outputTensor5", { 5.0f, 11.0f, 17.0f, 23.0f }},
+          {"outputTensor6", { 6.0f, 12.0f, 18.0f, 24.0f }} });
+}
+
 BOOST_AUTO_TEST_SUITE_END()
index bf42bf7..10ff04d 100644 (file)
@@ -173,4 +173,222 @@ BOOST_FIXTURE_TEST_CASE(ParseSplit, InputFirstSplitFixture)
                  { "Relu_2", { 0.25, 9.0f, 0.25f, 3.0625f } } });
 }
 
+struct SplitLastDimFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
+{
+    SplitLastDimFixture(bool withDimZero=false) {
+        m_Prototext = R"(
+        node {
+          name: "Placeholder"
+          op: "Placeholder"
+          attr {
+            key: "dtype"
+            value {
+              type: DT_FLOAT
+            }
+          }
+          attr {
+            key: "shape"
+            value {
+              shape {
+                dim {
+                  size: 1
+                }
+                dim {
+                  size: 2
+                }
+                dim {
+                  size: 2
+                }
+                dim {
+                  size: 3
+                }
+              }
+            }
+          }
+        }
+        node {
+          name: "Const"
+          op: "Const"
+          attr {
+            key: "dtype"
+            value {
+              type: DT_INT32
+            }
+          }
+          attr {
+            key: "value"
+            value {
+              tensor {
+                dtype: DT_INT32
+                tensor_shape {
+                }
+                int_val: 3
+              }
+            }
+          }
+        }
+        node {
+          name: "split/split_dim"
+          op: "Const"
+          attr {
+            key: "dtype"
+            value {
+              type: DT_INT32
+            }
+          }
+          attr {
+            key: "value"
+            value {
+              tensor {
+                dtype: DT_INT32
+                tensor_shape {
+                }
+                int_val: 3
+              }
+            }
+          }
+        }
+        node {
+          name: "split"
+          op: "Split"
+          input: "split/split_dim"
+          input: "Placeholder"
+          attr {
+            key: "T"
+            value {
+              type: DT_FLOAT
+            }
+          }
+          attr {
+            key: "num_split"
+            value {
+              i: 3
+            }
+          }
+        }
+        node {
+          name: "sub0/y"
+          op: "Const"
+          attr {
+            key: "dtype"
+            value {
+              type: DT_FLOAT
+            }
+          }
+          attr {
+            key: "value"
+            value {
+              tensor {
+                dtype: DT_FLOAT
+                tensor_shape {
+                }
+                float_val: 3.0
+              }
+            }
+          }
+        }
+        node {
+          name: "sub0"
+          op: "Sub"
+          input: "split"
+          input: "sub0/y"
+          attr {
+            key: "T"
+            value {
+              type: DT_FLOAT
+            }
+          }
+        }
+        node {
+          name: "sub1/y"
+          op: "Const"
+          attr {
+            key: "dtype"
+            value {
+              type: DT_FLOAT
+            }
+          }
+          attr {
+            key: "value"
+            value {
+              tensor {
+                dtype: DT_FLOAT
+                tensor_shape {
+                }
+                float_val: 2.0
+              }
+            }
+          }
+        }
+        node {
+          name: "sub1"
+          op: "Sub"
+          input: "split:1"
+          input: "sub1/y"
+          attr {
+            key: "T"
+            value {
+              type: DT_FLOAT
+            }
+          }
+        }
+        node {
+          name: "sub2/y"
+          op: "Const"
+          attr {
+            key: "dtype"
+            value {
+              type: DT_FLOAT
+            }
+          }
+          attr {
+            key: "value"
+            value {
+              tensor {
+                dtype: DT_FLOAT
+                tensor_shape {
+                }
+                float_val: 1.0
+              }
+            }
+          }
+        }
+        node {
+          name: "sub2"
+          op: "Sub"
+          input: "split:2"
+          input: "sub2/y"
+          attr {
+            key: "T"
+            value {
+              type: DT_FLOAT
+            }
+          }
+        }
+        versions {
+          producer: 27
+        } )";
+
+        Setup( { { "Placeholder", { 1,  2,  2 , 3} } },
+               { "sub0", "sub1", "sub2" });
+    }
+};
+
+BOOST_FIXTURE_TEST_CASE(SplitLastDimTest, SplitLastDimFixture)
+{
+    BOOST_TEST(
+            (m_Parser->GetNetworkOutputBindingInfo("sub0").second.GetShape() == armnn::TensorShape({ 1, 2, 2, 1 })));
+
+    BOOST_TEST(
+            (m_Parser->GetNetworkOutputBindingInfo("sub1").second.GetShape() == armnn::TensorShape({ 1, 2, 2, 1 })));
+
+    BOOST_TEST(
+            (m_Parser->GetNetworkOutputBindingInfo("sub2").second.GetShape() == armnn::TensorShape({ 1, 2, 2, 1 })));
+
+    RunTest<4>({ { "Placeholder", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f } } },
+               { { "sub0", { -2.0f, 1.0f, 4.0f, 7.0f } },
+                 { "sub1", { 0.0f, 3.0f, 6.0f, 9.0f } },
+                 { "sub2", { 2.0f, 5.0f, 8.0f, 11.0f } } });
+}
+
 BOOST_AUTO_TEST_SUITE_END()
index b4673f7..5b8f983 100644 (file)
@@ -9,6 +9,8 @@
 
 #include <arm_compute/core/Types.h>
 
+#include <boost/assert.hpp>
+
 namespace armnn
 {
 
@@ -130,4 +132,23 @@ inline unsigned int ComputeSoftmaxAclAxis(const armnn::TensorInfo& tensor)
     return dim - 1;
 }
 
+inline std::set<unsigned int> ComputeSplitAxis(const armnn::SplitterDescriptor& desc, const TensorShape& input)
+{
+    unsigned int numSplit = desc.GetNumViews();
+    unsigned int numDimensions = desc.GetNumDimensions();
+    std::set<unsigned int> splitAxis;
+
+    for (unsigned int i = 0; i < numSplit; ++i)
+    {
+        for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
+        {
+            if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx])
+            {
+                splitAxis.insert(dimIdx);
+            }
+        }
+    }
+    return splitAxis;
+}
+
 } // namespace armnn
index 7760c07..9fcb496 100644 (file)
@@ -401,6 +401,14 @@ bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input,
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
+bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input,
+                                           const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+                                           const ViewsDescriptor& descriptor,
+                                           Optional<std::string&> reasonIfUnsupported) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
 bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& input,
                                                const TensorInfo& output,
                                                const StridedSliceDescriptor& descriptor,
index 88d5792..7552758 100644 (file)
@@ -247,10 +247,16 @@ public:
                                    const SpaceToBatchNdDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
     bool IsSplitterSupported(const TensorInfo& input,
                              const ViewsDescriptor& descriptor,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsSplitterSupported(const TensorInfo& input,
+                             const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+                             const ViewsDescriptor& descriptor,
+                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsStridedSliceSupported(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const StridedSliceDescriptor& descriptor,
index 9679c35..0490a94 100644 (file)
@@ -703,7 +703,20 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
         {
             auto cLayer = boost::polymorphic_downcast<const SplitterLayer*>(&layer);
             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+
+            // Get vector of all outputs.
+            auto getTensorInfo = [&dataType](const OutputSlot& slot)
+            {
+                return OverrideDataType(slot.GetTensorInfo(), dataType);
+            };
+            auto beginI = boost::make_transform_iterator(layer.GetOutputSlots().begin(), getTensorInfo);
+            auto endI = boost::make_transform_iterator(layer.GetOutputSlots().end(), getTensorInfo);
+            std::vector<TensorInfo> outputs(beginI, endI);
+
+            const std::vector<std::reference_wrapper<TensorInfo>> outputPtrs(outputs.begin(), outputs.end());
+
             result = layerSupportObject->IsSplitterSupported(OverrideDataType(input, dataType),
+                                                             outputPtrs,
                                                              cLayer->GetParameters(),
                                                              reason);
             break;
index 2ce5179..21d191a 100644 (file)
@@ -607,6 +607,19 @@ bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
                                     &TrueFunc<>);
 }
 
+bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
+                                         const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+                                         const ViewsDescriptor& descriptor,
+                                         Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(descriptor);
+    ignore_unused(outputs);
+    return IsSupportedForDataTypeCl(reasonIfUnsupported,
+                                    input.GetDataType(),
+                                    &TrueFunc<>,
+                                    &TrueFunc<>);
+}
+
 bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
                                              const TensorInfo& output,
                                              const StridedSliceDescriptor& descriptor,
index b634d46..fca0bfd 100644 (file)
@@ -200,10 +200,16 @@ public:
                                    const SpaceToBatchNdDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
     bool IsSplitterSupported(const TensorInfo& input,
                              const ViewsDescriptor& descriptor,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsSplitterSupported(const TensorInfo& input,
+                             const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+                             const ViewsDescriptor& descriptor,
+                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsStridedSliceSupported(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const StridedSliceDescriptor& descriptor,
index f4599ff..fd9aac5 100644 (file)
@@ -17,6 +17,7 @@
 #include <boost/core/ignore_unused.hpp>
 
 #if defined(ARMCOMPUTENEON_ENABLED)
+#include <aclCommon/ArmComputeUtils.hpp>
 #include "workloads/NeonAdditionWorkload.hpp"
 #include "workloads/NeonActivationWorkload.hpp"
 #include "workloads/NeonBatchNormalizationWorkload.hpp"
@@ -36,6 +37,7 @@
 #include "workloads/NeonPooling2dWorkload.hpp"
 #include "workloads/NeonResizeBilinearWorkload.hpp"
 #include "workloads/NeonSoftmaxBaseWorkload.hpp"
+#include "workloads/NeonSplitterWorkload.hpp"
 #include "workloads/NeonSubtractionWorkload.hpp"
 #endif
 
@@ -478,6 +480,38 @@ bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
                                       &TrueFunc<>);
 }
 
+bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
+                                           const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+                                           const ViewsDescriptor& descriptor,
+                                           Optional<std::string&> reasonIfUnsupported) const
+{
+#if defined(ARMCOMPUTENEON_ENABLED)
+    // Split along the last dimension, cannot use sub-tensors
+    // as width and height of the sub-tensors do not match
+    // the width and height of the parent tensor
+    // in case of input with more than 2D.
+    std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
+    if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
+        *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
+    {
+        FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSplitterWorkloadValidate,
+                                       reasonIfUnsupported,
+                                       input,
+                                       outputs,
+                                       *splitAxis.begin());
+    }
+#endif
+    for (auto output : outputs)
+    {
+        if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
+        {
+            SetValueChecked(reasonIfUnsupported, "Neon Splitter: Types and quantization parameters must match.");
+            return false;
+        }
+    }
+    return true;
+}
+
 bool NeonLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
                                               const TensorInfo& input1,
                                               const TensorInfo& output,
index 8312bb9..5e8e0bd 100644 (file)
@@ -158,10 +158,16 @@ public:
                             const SoftmaxDescriptor& descriptor,
                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
     bool IsSplitterSupported(const TensorInfo& input,
                              const ViewsDescriptor& descriptor,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsSplitterSupported(const TensorInfo& input,
+                             const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+                             const ViewsDescriptor& descriptor,
+                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsSubtractionSupported(const TensorInfo& input0,
                                 const TensorInfo& input1,
                                 const TensorInfo& output,
index 6824879..6931bd7 100644 (file)
@@ -46,6 +46,7 @@ BACKEND_SOURCES := \
         workloads/NeonSoftmaxBaseWorkload.cpp \
         workloads/NeonSoftmaxFloatWorkload.cpp \
         workloads/NeonSoftmaxUint8Workload.cpp \
+        workloads/NeonSplitterWorkload.cpp \
         workloads/NeonSubtractionWorkload.cpp
 
 else
index f1c773d..8b2ad63 100644 (file)
@@ -58,6 +58,7 @@ list(APPEND armnnNeonBackendWorkloads_sources
     NeonSoftmaxFloatWorkload.hpp
     NeonSoftmaxUint8Workload.cpp
     NeonSoftmaxUint8Workload.hpp
+    NeonSplitterWorkload.cpp
     NeonSplitterWorkload.hpp
     NeonSubtractionWorkload.cpp
     NeonSubtractionWorkload.hpp
diff --git a/src/backends/neon/workloads/NeonSplitterWorkload.cpp b/src/backends/neon/workloads/NeonSplitterWorkload.cpp
new file mode 100644 (file)
index 0000000..bf35939
--- /dev/null
@@ -0,0 +1,112 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonSplitterWorkload.hpp"
+
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <neon/NeonTensorHandle.hpp>
+
+
+namespace armnn
+{
+
+using namespace armcomputetensorutils;
+
+namespace
+{
+unsigned int CalcAclAxis(unsigned int numDimensions, unsigned int splitAxis)
+{
+    return (numDimensions - splitAxis) - 1;
+}
+
+} //namespace
+
+arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo& input,
+                                                 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+                                                 unsigned int splitAxis)
+{
+    const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
+
+    size_t numOutputs = outputs.size();
+
+    std::vector<arm_compute::TensorInfo> aclOutputs;
+    aclOutputs.reserve(numOutputs);
+
+    std::vector<arm_compute::ITensorInfo*> aclOutputPtr;
+    aclOutputPtr.reserve(numOutputs);
+
+    for (size_t i = 0u; i < outputs.size(); ++i)
+    {
+        aclOutputs.emplace_back(BuildArmComputeTensorInfo(outputs[i]));
+        aclOutputPtr.emplace_back(&aclOutputs.back());
+    }
+
+    unsigned int aclAxis = CalcAclAxis(input.GetNumDimensions(), splitAxis);
+    return arm_compute::NESplit::validate(&aclInputInfo, aclOutputPtr, aclAxis);
+}
+
+NeonSplitterWorkload::NeonSplitterWorkload(const SplitterQueueDescriptor& descriptor, const WorkloadInfo& info)
+    : BaseWorkload<SplitterQueueDescriptor>(descriptor, info)
+{
+    bool allOutputsAreSubtensors = true;
+
+    // Check that all outputs are sub-tensors
+    for (auto output : m_Data.m_Outputs)
+    {
+        if (output && !output->GetParent())
+        {
+            // Non sub-tensor input found so we need to execute the split function
+            allOutputsAreSubtensors = false;
+            break;
+        }
+    }
+
+    if (allOutputsAreSubtensors)
+    {
+        // Can skip configuring the split function since it's not executed
+        return;
+    }
+
+    arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+
+    std::vector<arm_compute::ITensor *> aclOutputs;
+    for (auto output : m_Data.m_Outputs)
+    {
+        arm_compute::ITensor& aclOutput  = boost::polymorphic_pointer_downcast<INeonTensorHandle>(output)->GetTensor();
+        aclOutputs.emplace_back(&aclOutput);
+    }
+
+    // Create the layer function
+    m_Layer.reset(new arm_compute::NESplit());
+
+    // Configure input and output tensors
+    std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor.m_Parameters, m_Data.m_Inputs[0]->GetShape());
+    if (splitAxis.size() != 1)
+    {
+        throw InvalidArgumentException("Cannot derive split axis from SplitterDescriptor");
+    }
+
+    unsigned int aclAxis = CalcAclAxis(descriptor.m_Parameters.GetNumDimensions(), *splitAxis.begin());
+    m_Layer->configure(&input, aclOutputs, aclAxis);
+
+    // Prepare
+    m_Layer->prepare();
+}
+
+void NeonSplitterWorkload::Execute() const
+{
+    if (m_Layer)
+    {
+        ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSplitterWorkload_Execute");
+        m_Layer->run();
+    }
+}
+
+} //namespace armnn
+
index 2a7ee19..f902566 100644 (file)
@@ -7,18 +7,26 @@
 
 #include <backendsCommon/Workload.hpp>
 
+#include <arm_compute/runtime/NEON/NEFunctions.h>
+
+#include <functional>
+
 namespace armnn
 {
 
+arm_compute::Status NeonSplitterWorkloadValidate(const TensorInfo& input,
+                                                 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+                                                 unsigned int splitAxis);
+
 class NeonSplitterWorkload : public BaseWorkload<SplitterQueueDescriptor>
 {
 public:
-    using BaseWorkload<SplitterQueueDescriptor>::BaseWorkload;
+    NeonSplitterWorkload(const SplitterQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+    void Execute() const override;
 
-    virtual void Execute() const override
-    {
-        // With subtensors, splitter is a no-op.
-    }
+private:
+    mutable std::unique_ptr<arm_compute::NESplit> m_Layer;
 };
 
 } //namespace armnn
index 7beff72..6ad6816 100644 (file)
@@ -998,6 +998,19 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
                                      &TrueFunc<>);
 }
 
+bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
+                                          const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+                                          const ViewsDescriptor& descriptor,
+                                          Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(descriptor);
+    ignore_unused(outputs);
+    return IsSupportedForDataTypeRef(reasonIfUnsupported,
+                                     input.GetDataType(),
+                                     &TrueFunc<>,
+                                     &TrueFunc<>);
+}
+
 bool RefLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
                                               const TensorInfo& output,
                                               const StridedSliceDescriptor& descriptor,
index a4ae01e..944061d 100644 (file)
@@ -236,10 +236,16 @@ public:
                                    const SpaceToBatchNdDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    ARMNN_DEPRECATED_MSG("Use IsSplitterSupported with outputs instead")
     bool IsSplitterSupported(const TensorInfo& input,
                              const ViewsDescriptor& descriptor,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsSplitterSupported(const TensorInfo& input,
+                             const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+                             const ViewsDescriptor& descriptor,
+                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsStridedSliceSupported(const TensorInfo& input,
                                  const TensorInfo& output,
                                  const StridedSliceDescriptor& descriptor,