IVGCVSW-3878 Add reference workload for SLICE
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Tue, 17 Sep 2019 16:27:04 +0000 (17:27 +0100)
committerAron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Wed, 18 Sep 2019 10:09:02 +0000 (11:09 +0100)
* Added reference workload implementation and layer tests
  for all supported tensor dimensions (1d, 2d, 3d, 4d)

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I40eb300828933e9183027281105d1a7e597d1569

18 files changed:
src/backends/backendsCommon/WorkloadData.cpp
src/backends/backendsCommon/common.mk
src/backends/backendsCommon/test/CMakeLists.txt
src/backends/backendsCommon/test/LayerTests.hpp
src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/SliceTestImpl.hpp [new file with mode: 0644]
src/backends/reference/RefLayerSupport.cpp
src/backends/reference/RefLayerSupport.hpp
src/backends/reference/RefWorkloadFactory.cpp
src/backends/reference/RefWorkloadFactory.hpp
src/backends/reference/backend.mk
src/backends/reference/test/RefLayerTests.cpp
src/backends/reference/workloads/CMakeLists.txt
src/backends/reference/workloads/RefSliceWorkload.cpp [new file with mode: 0644]
src/backends/reference/workloads/RefSliceWorkload.hpp [new file with mode: 0644]
src/backends/reference/workloads/RefWorkloads.hpp
src/backends/reference/workloads/Slice.cpp [new file with mode: 0644]
src/backends/reference/workloads/Slice.hpp [new file with mode: 0644]

index 2fa0c92..136c196 100644 (file)
@@ -2631,7 +2631,7 @@ void SliceQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     const TensorShape& inputShape  = inputTensorInfo.GetShape();
     for(unsigned int i = 0u; i < rank; ++i)
     {
-        if (m_Parameters.m_Begin[i] + m_Parameters.m_Size[i] >= inputShape[i])
+        if (m_Parameters.m_Begin[i] + m_Parameters.m_Size[i] > inputShape[i])
         {
             throw InvalidArgumentException(descriptorName + ": Sum of begin offset and size for dimension " +
                 std::to_string(i) + " exceeds input size.");
index 14feb34..0943a83 100644 (file)
@@ -62,6 +62,7 @@ COMMON_TEST_SOURCES := \
     test/layerTests/Pooling2dTestImpl.cpp \
     test/layerTests/ReshapeTestImpl.cpp \
     test/layerTests/RsqrtTestImpl.cpp \
+    test/layerTests/SliceTestImpl.cpp \
     test/layerTests/QuantizeTestImpl.cpp \
     test/layerTests/SoftmaxTestImpl.cpp \
     test/layerTests/SpaceToBatchNdTestImpl.cpp \
index e3fa431..ef293d4 100644 (file)
@@ -106,6 +106,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources
     layerTests/ResizeTestImpl.hpp
     layerTests/RsqrtTestImpl.cpp
     layerTests/RsqrtTestImpl.hpp
+    layerTests/SliceTestImpl.cpp
+    layerTests/SliceTestImpl.hpp
     layerTests/SoftmaxTestImpl.cpp
     layerTests/SoftmaxTestImpl.hpp
     layerTests/SpaceToBatchNdTestImpl.cpp
index bf2ef6a..f48ae43 100644 (file)
@@ -40,6 +40,7 @@
 #include <backendsCommon/test/layerTests/ReshapeTestImpl.hpp>
 #include <backendsCommon/test/layerTests/ResizeTestImpl.hpp>
 #include <backendsCommon/test/layerTests/RsqrtTestImpl.hpp>
+#include <backendsCommon/test/layerTests/SliceTestImpl.hpp>
 #include <backendsCommon/test/layerTests/SoftmaxTestImpl.hpp>
 #include <backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.hpp>
 #include <backendsCommon/test/layerTests/SpaceToDepthTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
new file mode 100644 (file)
index 0000000..f0479c8
--- /dev/null
@@ -0,0 +1,291 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SliceTestImpl.hpp"
+
+#include <ResolveType.hpp>
+
+#include <armnn/ArmNN.hpp>
+
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+namespace
+{
+
+template<typename T, std::size_t NumDims>
+LayerTestResult<T, NumDims> SliceTestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    armnn::TensorInfo& inputInfo,
+    armnn::TensorInfo& outputInfo,
+    std::vector<float>& inputData,
+    std::vector<float>& expectedOutputData,
+    armnn::SliceQueueDescriptor descriptor,
+    const float qScale = 1.0f,
+    const int qOffset = 0)
+{
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputInfo.SetQuantizationScale(qScale);
+        inputInfo.SetQuantizationOffset(qOffset);
+
+        outputInfo.SetQuantizationScale(qScale);
+        outputInfo.SetQuantizationOffset(qOffset);
+    }
+
+    boost::multi_array<T, NumDims> input =
+        MakeTensor<T, NumDims>(inputInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+
+    LayerTestResult<T, NumDims> result(outputInfo);
+    result.outputExpected =
+        MakeTensor<T, NumDims>(outputInfo, QuantizedVector<T>(qScale, qOffset, expectedOutputData));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
+
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSlice(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), input.data());
+
+    ExecuteWorkload(*workload, memoryManager);
+
+    CopyDataFromITensorHandle(result.output.data(), outputHandle.get());
+
+    return result;
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> Slice4dTest(armnn::IWorkloadFactory& workloadFactory,
+                                  const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    armnn::TensorShape inputShape  = { 3, 2, 3, 5 };
+    armnn::TensorShape outputShape = { 2, 1, 2, 3 };
+
+    armnn::SliceQueueDescriptor desc;
+    desc.m_Parameters.m_Begin  = { 1, 0, 1, 2 };
+    desc.m_Parameters.m_Size   = { 2, 1, 2, 3 };
+
+    armnn::TensorInfo inputInfo(inputShape, ArmnnType);
+    armnn::TensorInfo outputInfo(outputShape, ArmnnType);
+
+    std::vector<float> input =
+    {
+         0.f,  1.f,  2.f,  3.f,  4.f,
+         5.f,  6.f,  7.f,  8.f,  9.f,
+        10.f, 11.f, 12.f, 13.f, 14.f,
+
+        15.f, 16.f, 17.f, 18.f, 19.f,
+        20.f, 21.f, 22.f, 23.f, 24.f,
+        25.f, 26.f, 27.f, 28.f, 29.f,
+
+
+        30.f, 31.f, 32.f, 33.f, 34.f,
+        35.f, 36.f, 37.f, 38.f, 39.f,
+        40.f, 41.f, 42.f, 43.f, 44.f,
+
+        45.f, 46.f, 47.f, 48.f, 49.f,
+        50.f, 51.f, 52.f, 53.f, 54.f,
+        55.f, 56.f, 57.f, 58.f, 59.f,
+
+
+        60.f, 61.f, 62.f, 63.f, 64.f,
+        65.f, 66.f, 67.f, 68.f, 69.f,
+        70.f, 71.f, 72.f, 73.f, 74.f,
+
+        75.f, 76.f, 77.f, 78.f, 79.f,
+        80.f, 81.f, 82.f, 83.f, 84.f,
+        85.f, 86.f, 87.f, 88.f, 89.f
+    };
+
+    std::vector<float> expectedOutput =
+    {
+        37.f, 38.f, 39.f,
+        42.f, 43.f, 44.f,
+
+
+        67.f, 68.f, 69.f,
+        72.f, 73.f, 74.f
+    };
+
+    return SliceTestImpl<T, 4>(workloadFactory, memoryManager, inputInfo, outputInfo, input, expectedOutput, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> Slice3dTest(armnn::IWorkloadFactory& workloadFactory,
+                                  const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    armnn::TensorShape inputShape  = { 2, 3, 5 };
+    armnn::TensorShape outputShape = { 1, 2, 3 };
+
+    armnn::SliceQueueDescriptor desc;
+    desc.m_Parameters.m_Begin  = { 0, 1, 2 };
+    desc.m_Parameters.m_Size   = { 1, 2, 3 };
+
+    armnn::TensorInfo inputInfo(inputShape, ArmnnType);
+    armnn::TensorInfo outputInfo(outputShape, ArmnnType);
+
+    std::vector<float> input =
+    {
+         0.f,  1.f,  2.f,  3.f,  4.f,
+         5.f,  6.f,  7.f,  8.f,  9.f,
+        10.f, 11.f, 12.f, 13.f, 14.f,
+
+        15.f, 16.f, 17.f, 18.f, 19.f,
+        20.f, 21.f, 22.f, 23.f, 24.f,
+        25.f, 26.f, 27.f, 28.f, 29.f,
+    };
+
+    std::vector<float> expectedOutput =
+    {
+         7.f,  8.f,  9.f,
+        12.f, 13.f, 14.f
+    };
+
+    return SliceTestImpl<T, 3>(workloadFactory, memoryManager, inputInfo, outputInfo, input, expectedOutput, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> Slice2dTest(armnn::IWorkloadFactory& workloadFactory,
+                                  const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    armnn::TensorShape inputShape  = { 3, 5 };
+    armnn::TensorShape outputShape = { 2, 3 };
+
+    armnn::SliceQueueDescriptor desc;
+    desc.m_Parameters.m_Begin  = { 1, 2 };
+    desc.m_Parameters.m_Size   = { 2, 3 };
+
+    armnn::TensorInfo inputInfo(inputShape, ArmnnType);
+    armnn::TensorInfo outputInfo(outputShape, ArmnnType);
+
+    std::vector<float> input =
+    {
+         0.f,  1.f,  2.f,  3.f,  4.f,
+         5.f,  6.f,  7.f,  8.f,  9.f,
+        10.f, 11.f, 12.f, 13.f, 14.f
+    };
+
+    std::vector<float> expectedOutput =
+    {
+         7.f,  8.f,  9.f,
+        12.f, 13.f, 14.f
+    };
+
+    return SliceTestImpl<T, 2>(workloadFactory, memoryManager, inputInfo, outputInfo, input, expectedOutput, desc);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 1> Slice1dTest(armnn::IWorkloadFactory& workloadFactory,
+                                  const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    armnn::TensorShape inputShape  = { 5 };
+    armnn::TensorShape outputShape = { 3 };
+
+    armnn::SliceQueueDescriptor desc;
+    desc.m_Parameters.m_Begin  = { 2 };
+    desc.m_Parameters.m_Size   = { 3 };
+
+    armnn::TensorInfo inputInfo(inputShape, ArmnnType);
+    armnn::TensorInfo outputInfo(outputShape, ArmnnType);
+
+    std::vector<float> input =
+    {
+         0.f,  1.f,  2.f,  3.f,  4.f
+    };
+
+    std::vector<float> expectedOutput =
+    {
+         2.f,  3.f,  4.f
+    };
+
+    return SliceTestImpl<T, 1>(workloadFactory, memoryManager, inputInfo, outputInfo, input, expectedOutput, desc);
+}
+
+} // anonymous namespace
+
+// Float32 tests
+LayerTestResult<float, 4> Slice4dFloat32Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Slice4dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
+}
+
+LayerTestResult<float, 3> Slice3dFloat32Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Slice3dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
+}
+
+LayerTestResult<float, 2> Slice2dFloat32Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Slice2dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
+}
+
+LayerTestResult<float, 1> Slice1dFloat32Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Slice1dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
+}
+
+// Uint8 tests
+LayerTestResult<uint8_t, 4> Slice4dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Slice4dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+}
+
+LayerTestResult<uint8_t, 3> Slice3dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Slice3dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+}
+
+LayerTestResult<uint8_t, 2> Slice2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Slice2dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+}
+
+LayerTestResult<uint8_t, 1> Slice1dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Slice1dTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
+}
+
+// Int16 tests
+LayerTestResult<int16_t, 4> Slice4dInt16Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Slice4dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+}
+
+LayerTestResult<int16_t, 3> Slice3dInt16Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Slice3dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+}
+
+LayerTestResult<int16_t, 2> Slice2dInt16Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Slice2dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+}
+
+LayerTestResult<int16_t, 1> Slice1dInt16Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    return Slice1dTest<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager);
+}
diff --git a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.hpp
new file mode 100644 (file)
index 0000000..59458ed
--- /dev/null
@@ -0,0 +1,50 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <backendsCommon/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+// Float32 tests
+LayerTestResult<float, 4> Slice4dFloat32Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 3> Slice3dFloat32Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 2> Slice2dFloat32Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 1> Slice1dFloat32Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+// Uint8 tests
+LayerTestResult<uint8_t, 4> Slice4dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 3> Slice3dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 2> Slice2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 1> Slice1dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+// Int16 tests
+LayerTestResult<int16_t, 4> Slice4dInt16Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 3> Slice3dInt16Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 2> Slice2dInt16Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<int16_t, 1> Slice1dInt16Test(armnn::IWorkloadFactory& workloadFactory,
+                                             const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
index 228f8a8..572f617 100644 (file)
@@ -1374,6 +1374,33 @@ bool RefLayerSupport::IsRsqrtSupported(const TensorInfo& input,
     return supported;
 }
 
+bool RefLayerSupport::IsSliceSupported(const TensorInfo& input,
+                                       const TensorInfo& output,
+                                       const SliceDescriptor& descriptor,
+                                       Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(descriptor);
+    bool supported = true;
+
+    std::array<DataType, 3> supportedTypes =
+    {
+        DataType::Float32,
+        DataType::QuantisedAsymm8,
+        DataType::QuantisedSymm16
+    };
+
+    supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+                                  "Reference Slice: input type not supported");
+
+    supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+                                  "Reference Slice: output type not supported");
+
+    supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+                                  "Reference Slice: input and output types are mismatched");
+
+    return supported;
+}
+
 bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
                                          const TensorInfo& output,
                                          const SoftmaxDescriptor& descriptor,
index 26c60dc..8200058 100644 (file)
@@ -219,6 +219,11 @@ public:
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsSliceSupported(const TensorInfo& input,
+                          const TensorInfo& output,
+                          const SliceDescriptor& descriptor,
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsSoftmaxSupported(const TensorInfo& input,
                             const TensorInfo& output,
                             const SoftmaxDescriptor& descriptor,
index 52dffcc..055c8da 100644 (file)
@@ -457,4 +457,10 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAbs(const AbsQueueDescripto
     return std::make_unique<RefAbsWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
+                                                           const WorkloadInfo& info) const
+{
+    return std::make_unique<RefSliceWorkload>(descriptor, info);
+}
+
 } // namespace armnn
index 5851528..2c40053 100644 (file)
@@ -212,6 +212,9 @@ public:
     std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
                                          const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
+                                           const WorkloadInfo& info) const override;
+
 private:
 
     template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
index b1f0a03..b2ec748 100644 (file)
@@ -69,6 +69,7 @@ BACKEND_SOURCES := \
         workloads/RefResizeBilinearWorkload.cpp \
         workloads/RefResizeWorkload.cpp \
         workloads/RefRsqrtWorkload.cpp \
+        workloads/RefSliceWorkload.cpp \
         workloads/RefSoftmaxWorkload.cpp \
         workloads/RefSpaceToBatchNdWorkload.cpp \
         workloads/RefSpaceToDepthWorkload.cpp \
@@ -78,6 +79,7 @@ BACKEND_SOURCES := \
         workloads/RefTransposeConvolution2dWorkload.cpp \
         workloads/Resize.cpp \
         workloads/Rsqrt.cpp \
+        workloads/Slice.cpp \
         workloads/SpaceToBatchNd.cpp \
         workloads/SpaceToDepth.cpp \
         workloads/Stack.cpp \
index eb56dde..afeb8a4 100644 (file)
@@ -1252,6 +1252,22 @@ ARMNN_AUTO_TEST_CASE(PreluFloat16, PreluTest<DataType::Float16>)
 ARMNN_AUTO_TEST_CASE(PreluUint8,   PreluTest<DataType::QuantisedAsymm8>)
 ARMNN_AUTO_TEST_CASE(PreluInt16,   PreluTest<DataType::QuantisedSymm16>)
 
+// Slice
+ARMNN_AUTO_TEST_CASE(Slice4dFloat32, Slice4dFloat32Test)
+ARMNN_AUTO_TEST_CASE(Slice3dFloat32, Slice3dFloat32Test)
+ARMNN_AUTO_TEST_CASE(Slice2dFloat32, Slice2dFloat32Test)
+ARMNN_AUTO_TEST_CASE(Slice1dFloat32, Slice1dFloat32Test)
+
+ARMNN_AUTO_TEST_CASE(Slice4dUint8, Slice4dUint8Test)
+ARMNN_AUTO_TEST_CASE(Slice3dUint8, Slice3dUint8Test)
+ARMNN_AUTO_TEST_CASE(Slice2dUint8, Slice2dUint8Test)
+ARMNN_AUTO_TEST_CASE(Slice1dUint8, Slice1dUint8Test)
+
+ARMNN_AUTO_TEST_CASE(Slice4dInt16, Slice4dInt16Test)
+ARMNN_AUTO_TEST_CASE(Slice3dInt16, Slice3dInt16Test)
+ARMNN_AUTO_TEST_CASE(Slice2dInt16, Slice2dInt16Test)
+ARMNN_AUTO_TEST_CASE(Slice1dInt16, Slice1dInt16Test)
+
 // TransposeConvolution2d
 ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNchw,
                      SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
index 23d6024..3077095 100644 (file)
@@ -107,6 +107,8 @@ list(APPEND armnnRefBackendWorkloads_sources
     RefResizeWorkload.hpp
     RefRsqrtWorkload.cpp
     RefRsqrtWorkload.hpp
+    RefSliceWorkload.cpp
+    RefSliceWorkload.hpp
     RefSoftmaxWorkload.cpp
     RefSoftmaxWorkload.hpp
     RefSpaceToBatchNdWorkload.cpp
@@ -127,6 +129,8 @@ list(APPEND armnnRefBackendWorkloads_sources
     Resize.hpp
     Rsqrt.cpp
     Rsqrt.hpp
+    Slice.cpp
+    Slice.hpp
     Softmax.cpp
     Softmax.hpp
     SpaceToBatchNd.hpp
diff --git a/src/backends/reference/workloads/RefSliceWorkload.cpp b/src/backends/reference/workloads/RefSliceWorkload.cpp
new file mode 100644 (file)
index 0000000..2e44845
--- /dev/null
@@ -0,0 +1,29 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefSliceWorkload.hpp"
+
+#include "RefWorkloadUtils.hpp"
+#include "Slice.hpp"
+
+#include <Profiling.hpp>
+
+namespace armnn
+{
+
+void RefSliceWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSliceWorkload_Execute");
+
+    const TensorInfo& inputInfo  = GetTensorInfo(m_Data.m_Inputs[0]);
+
+    Slice(inputInfo,
+          m_Data.m_Parameters,
+          m_Data.m_Inputs[0]->Map(),
+          m_Data.m_Outputs[0]->Map(),
+          GetDataTypeSize(inputInfo.GetDataType()));
+}
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefSliceWorkload.hpp b/src/backends/reference/workloads/RefSliceWorkload.hpp
new file mode 100644 (file)
index 0000000..006c7b7
--- /dev/null
@@ -0,0 +1,22 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+class RefSliceWorkload : public BaseWorkload<SliceQueueDescriptor>
+{
+public:
+    using BaseWorkload<SliceQueueDescriptor>::BaseWorkload;
+
+    virtual void Execute() const override;
+};
+
+} // namespace armnn
index 1ec349e..959226a 100644 (file)
 #include "RefPadWorkload.hpp"
 #include "RefPreluWorkload.hpp"
 #include "RefQuantizeWorkload.hpp"
+#include "RefReshapeWorkload.hpp"
 #include "RefResizeBilinearWorkload.hpp"
 #include "RefResizeWorkload.hpp"
 #include "RefRsqrtWorkload.hpp"
-#include "RefReshapeWorkload.hpp"
+#include "RefSliceWorkload.hpp"
 #include "RefSplitterWorkload.hpp"
 #include "RefSoftmaxWorkload.hpp"
 #include "RefSpaceToBatchNdWorkload.hpp"
@@ -56,4 +57,4 @@
 #include "Resize.hpp"
 #include "Softmax.hpp"
 #include "Splitter.hpp"
-#include "TensorBufferArrayView.hpp"
\ No newline at end of file
+#include "TensorBufferArrayView.hpp"
diff --git a/src/backends/reference/workloads/Slice.cpp b/src/backends/reference/workloads/Slice.cpp
new file mode 100644 (file)
index 0000000..c7ca3b1
--- /dev/null
@@ -0,0 +1,95 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Slice.hpp"
+
+#include <boost/assert.hpp>
+#include <boost/core/ignore_unused.hpp>
+#include <boost/numeric/conversion/cast.hpp>
+
+namespace armnn
+{
+
+void Slice(const TensorInfo& inputInfo,
+           const SliceDescriptor& descriptor,
+           const void* inputData,
+           void* outputData,
+           unsigned int dataTypeSize)
+{
+    const TensorShape& inputShape = inputInfo.GetShape();
+    const unsigned int numDims    = inputShape.GetNumDimensions();
+
+    BOOST_ASSERT(descriptor.m_Begin.size() == numDims);
+    BOOST_ASSERT(descriptor.m_Size.size()  == numDims);
+
+    constexpr unsigned int maxNumDims = 4;
+    BOOST_ASSERT(numDims <= maxNumDims);
+
+    std::vector<unsigned int> paddedInput(4);
+    std::vector<unsigned int> paddedBegin(4);
+    std::vector<unsigned int> paddedSize (4);
+
+    const unsigned int numPaddingDims = maxNumDims - numDims;
+    for (unsigned int i = 0u; i < maxNumDims; ++i)
+    {
+        if (i < numPaddingDims)
+        {
+            paddedInput[i] = 1u;
+            paddedBegin[i] = 0u;
+            paddedSize[i]  = 1u;
+        }
+        else
+        {
+            const unsigned int j = i - numPaddingDims;
+            paddedInput[i] = inputShape[j];
+            paddedBegin[i] = descriptor.m_Begin[j];
+            paddedSize[i]  = descriptor.m_Size[j];
+        }
+    }
+
+    unsigned int dim0 = paddedInput[0];
+    unsigned int dim1 = paddedInput[1];
+    unsigned int dim2 = paddedInput[2];
+    unsigned int dim3 = paddedInput[3];
+
+    unsigned int begin0 = paddedBegin[0];
+    unsigned int begin1 = paddedBegin[1];
+    unsigned int begin2 = paddedBegin[2];
+    unsigned int begin3 = paddedBegin[3];
+
+    unsigned int size0  = paddedSize[0];
+    unsigned int size1  = paddedSize[1];
+    unsigned int size2  = paddedSize[2];
+    unsigned int size3  = paddedSize[3];
+
+    BOOST_ASSERT(begin0 + size0 <= dim0);
+    BOOST_ASSERT(begin1 + size1 <= dim1);
+    BOOST_ASSERT(begin2 + size2 <= dim2);
+    BOOST_ASSERT(begin3 + size3 <= dim3);
+
+    const unsigned char* input = reinterpret_cast<const unsigned char*>(inputData);
+    unsigned char* output      = reinterpret_cast<unsigned char*>(outputData);
+
+    boost::ignore_unused(dim0);
+    for (unsigned int idx0 = begin0; idx0 < begin0 + size0; ++idx0)
+    {
+        for (unsigned int idx1 = begin1; idx1 < begin1 + size1; ++idx1)
+        {
+            for (unsigned int idx2 = begin2; idx2 < begin2 + size2; ++idx2)
+            {
+                for (unsigned int idx3 = begin3; idx3 < begin3 + size3; ++idx3)
+                {
+                    const unsigned int inputOffset =
+                        (((idx0 * dim1 + idx1) * dim2 + idx2) * dim3 + idx3) * dataTypeSize;
+
+                    ::memcpy(output, input + inputOffset, dataTypeSize);
+                    output += dataTypeSize;
+                }
+            }
+        }
+    }
+}
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/Slice.hpp b/src/backends/reference/workloads/Slice.hpp
new file mode 100644 (file)
index 0000000..823f16c
--- /dev/null
@@ -0,0 +1,21 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "BaseIterator.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+namespace armnn
+{
+
+void Slice(const TensorInfo& inputInfo,
+           const SliceDescriptor& descriptor,
+           const void* inputData,
+           void* outputData,
+           unsigned int dataTypeSize);
+
+} // namespace armnn