From: Teresa Charlin Date: Mon, 1 Jul 2019 12:51:07 +0000 (+0100) Subject: IVGCVSW-3365 Add reference workload support for ResizeLayer X-Git-Tag: submit/tizen/20200316.035456~488 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=970f43b078eba91c66fb64eadbc9803661ffcda8;p=platform%2Fupstream%2Farmnn.git IVGCVSW-3365 Add reference workload support for ResizeLayer Signed-off-by: Teresa Charlin Change-Id: Id551690065dca0686ce597d1f0c14fd73163481e --- diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 1d0be5d..e7915dd 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -915,12 +915,12 @@ void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output"); std::vector supportedTypes = - { - DataType::Float16, - DataType::Float32, - DataType::QuantisedAsymm8, - DataType::QuantisedSymm16 - }; + { + DataType::Float16, + DataType::Float32, + DataType::QuantisedAsymm8, + DataType::QuantisedSymm16 + }; ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], supportedTypes, @@ -931,29 +931,72 @@ void ResizeBilinearQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c "ResizeBilinearQueueDescriptor"); // Resizes bilinear only changes width and height: batch and channel count must match. + const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0]; + const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0]; + if (inputBatchSize != outputBatchSize) { - const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0]; - const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0]; - if (inputBatchSize != outputBatchSize) - { - throw InvalidArgumentException( - boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) " - "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize)); - } + throw InvalidArgumentException( + boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) " + "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize)); } + DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout); + const unsigned int inputChannelCount = + workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()]; + const unsigned int outputChannelCount = + workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()]; + if (inputChannelCount != outputChannelCount) { - DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout); - const unsigned int inputChannelCount = + throw InvalidArgumentException( + boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) " + "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount)); + } +} + +void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateNumInputs(workloadInfo, "ResizeQueueDescriptor", 1); + ValidateNumOutputs(workloadInfo, "ResizeQueueDescriptor", 1); + + ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeQueueDescriptor", 4, "input"); + ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeQueueDescriptor", 4, "output"); + + std::vector supportedTypes = + { + DataType::Float16, + DataType::Float32, + DataType::QuantisedAsymm8, + DataType::QuantisedSymm16 + }; + + ValidateDataTypes(workloadInfo.m_InputTensorInfos[0], + supportedTypes, + "ResizeQueueDescriptor"); + + ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0], + {workloadInfo.m_InputTensorInfos[0].GetDataType()}, + "ResizeQueueDescriptor"); + + // Resizes only changes width and height: batch and channel count must match. + const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0]; + const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0]; + if (inputBatchSize != outputBatchSize) + { + throw InvalidArgumentException( + boost::str(boost::format("ResizeQueueDescriptor: Input batch size (%1%) " + "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize)); + } + + DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout); + const unsigned int inputChannelCount = workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()]; - const unsigned int outputChannelCount = + const unsigned int outputChannelCount = workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()]; - if (inputChannelCount != outputChannelCount) - { - throw InvalidArgumentException( - boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) " - "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount)); - } + if (inputChannelCount != outputChannelCount) + { + throw InvalidArgumentException( + boost::str(boost::format("ResizeQueueDescriptor: Input channel count (%1%) " + "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount)); } } diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index b225e4d..405ccff 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -873,7 +873,7 @@ LayerTestResult TanhInt16Test( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); -/// Tests that the output should be identical to the input when the output dimensions match the input ones. +// Tests that the output should be identical to the input when the output dimensions match the input ones. template> LayerTestResult ResizeBilinearNopTest( armnn::IWorkloadFactory& workloadFactory, @@ -909,6 +909,42 @@ LayerTestResult ResizeBilinearMagTest( const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::DataLayout dataLayout); +// Tests that the output should be identical to the input when the output dimensions match the input ones. +template> +LayerTestResult ResizeNearestNeighborNopTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +// Tests the behaviour of the resize NearestNeighbor operation when rescaling a 2x2 image into a 1x1 image. +template> +LayerTestResult SimpleResizeNearestNeighborTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +// Tests the resize NearestNeighbor for minification of a square input matrix (also: input dimensions are a +// multiple of output dimensions). +template> +LayerTestResult ResizeNearestNeighborSqMinTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +// Tests the resize NearestNeighbor for minification (output dimensions smaller than input dimensions). +template> +LayerTestResult ResizeNearestNeighborMinTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + +// Tests the resize NearestNeighbor for magnification (output dimensions bigger than input dimensions). +template> +LayerTestResult ResizeNearestNeighborMagTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout); + template> LayerTestResult Rsqrt2dTestCommon( armnn::IWorkloadFactory& workloadFactory, @@ -2927,6 +2963,486 @@ LayerTestResult ResizeBilinearMagTest( return result; } + +template +LayerTestResult ResizeNearestNeighborNopTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout) +{ + armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType); + armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType); + if (armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(1.5f); + inputTensorInfo.SetQuantizationOffset(-3); + outputTensorInfo.SetQuantizationScale(1.5f); + outputTensorInfo.SetQuantizationOffset(-3); + } + + std::vector inputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 1, 2, 3, 4, + 2, 3, 4, 5, + 3, 4, 5, 6, + 4, 5, 6, 7 + } + : std::initializer_list + { + 1.0f, 2.0f, 3.0f, 4.0f, + 2.0f, 3.0f, 4.0f, 5.0f, + 3.0f, 4.0f, 5.0f, 6.0f, + 4.0f, 5.0f, 6.0f, 7.0f, + + 1.0f, 2.0f, 3.0f, 4.0f, + 2.0f, 3.0f, 4.0f, 5.0f, + 3.0f, 4.0f, 5.0f, 6.0f, + 4.0f, 5.0f, 6.0f, 7.0f + }; + + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float)); + inputData = tmp; + } + + auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset(), + inputData)); + + LayerTestResult result(outputTensorInfo); + result.outputExpected = input; + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeQueueDescriptor descriptor; + descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor; + descriptor.m_Parameters.m_DataLayout = dataLayout; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateResize(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->PostAllocationConfigure(); + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +template +LayerTestResult SimpleResizeNearestNeighborTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout) +{ + armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType); + armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 1, 1, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, ArmnnType); + + if (armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(0.1567f); + inputTensorInfo.SetQuantizationOffset(1); + outputTensorInfo.SetQuantizationScale(0.1567f); + outputTensorInfo.SetQuantizationOffset(1); + } + + std::vector inputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 1, 255, + 200, 250 + } + : std::initializer_list + { + 1.0f, 255.0f, + 200.0f, 250.0f, + + 250.0f, 200.0f, + 250.0f, 1.0f + }; + + // The 'resize' operation projects the top-left corner of output texels into the input image, + // then figures out the interpolants and weights. Note this is different to projecting the centre of the + // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as + // its single element, the value that was at position (0,0) of the input matrix (rather than an average, + // which we would expect if projecting the centre). + + std::vector outputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 1 + } + : std::initializer_list + { + 1.0f, + + 250.0f + }; + + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float)); + inputData = tmp; + + std::vector tmp1(outputData.size()); + armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float)); + outputData = tmp1; + } + + auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset(), + inputData)); + + LayerTestResult result(outputTensorInfo); + result.outputExpected = MakeTensor(outputTensorInfo, + QuantizedVector(outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset(), + outputData)); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeQueueDescriptor descriptor; + descriptor.m_Parameters.m_DataLayout = dataLayout; + descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateResize(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->PostAllocationConfigure(); + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +template +LayerTestResult ResizeNearestNeighborSqMinTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout) +{ + armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType); + armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType); + + if (armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(3.141592f); + inputTensorInfo.SetQuantizationOffset(3); + outputTensorInfo.SetQuantizationScale(3.141592f); + outputTensorInfo.SetQuantizationOffset(3); + } + + std::vector inputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 1, 2, 3, 4, + 2, 3, 4, 5, + 3, 4, 5, 6, + 4, 5, 6, 7 + } + : std::initializer_list + { + 1.0f, 2.0f, 3.0f, 4.0f, + 2.0f, 3.0f, 4.0f, 5.0f, + 3.0f, 4.0f, 5.0f, 6.0f, + 4.0f, 5.0f, 6.0f, 7.0f, + + 7.0f, 6.0f, 5.0f, 4.0f, + 6.0f, 5.0f, 4.0f, 3.0f, + 5.0f, 4.0f, 3.0f, 2.0f, + 4.0f, 3.0f, 2.0f, 1.0f + }; + + std::vector outputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 1, 3, + 3, 5 + } + : std::initializer_list + { + 1.0f, 3.0f, + 3.0f, 5.0f, + + 7.0f, 5.0f, + 5.0f, 3.0f + }; + + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float)); + inputData = tmp; + + std::vector tmp1(outputData.size()); + armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float)); + outputData = tmp1; + } + + auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset(), + inputData)); + + LayerTestResult result(outputTensorInfo); + result.outputExpected = MakeTensor(outputTensorInfo, + QuantizedVector(outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset(), + outputData)); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeQueueDescriptor descriptor; + descriptor.m_Parameters.m_DataLayout = dataLayout; + descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateResize(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->PostAllocationConfigure(); + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +template +LayerTestResult ResizeNearestNeighborMinTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout) +{ + armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 2, 3, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType); + armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 1, 2, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, ArmnnType); + + if (armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(1.5f); + inputTensorInfo.SetQuantizationOffset(-1); + outputTensorInfo.SetQuantizationScale(1.5f); + outputTensorInfo.SetQuantizationOffset(-1); + } + + std::vector inputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 3.0f, 4.5f, 6.0f, // 1, 2, 3, : Expected quantised values + 9.0f, 13.5f, 21.0f // 5, 8, 13 + } + : std::initializer_list + { + 1.0f, 2.0f, 3.0f, 5.0f, 8.0f, + 13.0f, 21.0f, 34.0f, 55.0f, 89.0f, + 144.0f, 233.0f, 377.0f, 610.0f, 987.0f, + + 987.0f, 610.0f, 377.0f, 233.0f, 144.0f, + 89.0f, 55.0f, 34.0f, 21.0f, 13.0f, + 8.0f, 5.0f, 3.0f, 2.0f, 1.0f + }; + + std::vector outputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 3.0f, 4.5f // 1, 3 + } + : std::initializer_list + { + 1.f, 2.f, 5.f, + 13.f, 21.f, 55.f, + + 987.f, 610.f, 233.f, + 89.f, 55.f, 21.f + }; + + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float)); + inputData = tmp; + + std::vector tmp1(outputData.size()); + armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float)); + outputData = tmp1; + } + + auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset(), + inputData)); + + LayerTestResult result(outputTensorInfo); + result.outputExpected = MakeTensor(outputTensorInfo, + QuantizedVector(outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset(), + outputData)); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeQueueDescriptor descriptor; + descriptor.m_Parameters.m_DataLayout = dataLayout; + descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateResize(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->PostAllocationConfigure(); + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + +template +LayerTestResult ResizeNearestNeighborMagTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::DataLayout dataLayout) +{ + armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 3, 2, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, ArmnnType); + armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType() + ? armnnUtils::GetTensorInfo(1, 1, 3, 5, dataLayout, ArmnnType) + : armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType); + + if (armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(0.010765f); + inputTensorInfo.SetQuantizationOffset(7); + outputTensorInfo.SetQuantizationScale(0.010132f); + outputTensorInfo.SetQuantizationOffset(-18); + } + + std::vector inputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 0.183005f, 2.379065f, // 24, 228, : Expected quantised values + 1.05497f, 1.302565f, // 105, 128, + 2.400595f, 0.68896f // 230, 71 + } + : std::initializer_list + { + 1.0f, 2.0f, + 13.0f, 21.0f, + 144.0f, 233.0f, + + 233.0f, 144.0f, + 21.0f, 13.0f, + 2.0f, 1.0f + }; + std::vector outputData = armnn::IsQuantizedType() + ? std::initializer_list + { + 0.183005f, 0.183005f, 0.183005f, 2.379065f, 2.379065f, + 1.05497f, 1.05497f, 1.05497f, 1.302565f, 1.302565f, + 2.400595f, 2.400595f, 2.400595f, 0.68896f, 0.68896f + } + : std::initializer_list + { + 1.f, 1.f, 1.f, 2.f, 2.f, + 13.f, 13.f, 13.f, 21.f, 21.f, + 144.f, 144.f, 144.f, 233.f, 233.f, + + 233.f, 233.f, 233.f, 144.f, 144.f, + 21.f, 21.f, 21.f, 13.f, 13.f, + 2.f, 2.f, 2.f, 1.f, 1.f + }; + + const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 }; + if (dataLayout == armnn::DataLayout::NHWC) + { + std::vector tmp(inputData.size()); + armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float)); + inputData = tmp; + + std::vector tmp1(outputData.size()); + armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float)); + outputData = tmp1; + } + + auto input = MakeTensor(inputTensorInfo, QuantizedVector(inputTensorInfo.GetQuantizationScale(), + inputTensorInfo.GetQuantizationOffset(), + inputData)); + + LayerTestResult result(outputTensorInfo); + result.outputExpected = MakeTensor(outputTensorInfo, + QuantizedVector(outputTensorInfo.GetQuantizationScale(), + outputTensorInfo.GetQuantizationOffset(), + outputData)); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ResizeQueueDescriptor descriptor; + descriptor.m_Parameters.m_DataLayout = dataLayout; + descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor; + armnn::WorkloadInfo info; + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateResize(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); + + workload->PostAllocationConfigure(); + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + return result; +} + + template LayerTestResult MeanTestHelper( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 429993a..b563bad 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -1239,11 +1239,11 @@ bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input, { bool supported = true; std::array supportedTypes = - { - DataType::Float32, - DataType::QuantisedAsymm8, - DataType::QuantisedSymm16 - }; + { + DataType::Float32, + DataType::QuantisedAsymm8, + DataType::QuantisedSymm16 + }; supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, "Reference ResizeBilinear: input type not supported"); @@ -1257,6 +1257,31 @@ bool RefLayerSupport::IsResizeBilinearSupported(const TensorInfo& input, return supported; } +bool RefLayerSupport::IsResizeSupported(const TensorInfo& input, + const TensorInfo& output, + const ResizeDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + bool supported = true; + std::array supportedTypes = + { + DataType::Float32, + DataType::QuantisedAsymm8, + DataType::QuantisedSymm16 + }; + + supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, + "Reference Resize: input type not supported"); + + supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, + "Reference Resize: output type not supported"); + + supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported, + "Reference Resize: input and output types not matching"); + + return supported; +} + bool RefLayerSupport::IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index 9c397fe..22b007b 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -222,6 +222,11 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsResizeSupported(const TensorInfo& input, + const TensorInfo& output, + const ResizeDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsRsqrtSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index d906f93..95a4419 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -239,6 +239,16 @@ std::unique_ptr RefWorkloadFactory::CreateMemCopy(const MemCop return std::make_unique(descriptor, info); } +std::unique_ptr RefWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + if (IsFloat16(info)) + { + return MakeWorkload(descriptor, info); + } + return std::make_unique(descriptor, info); +} + std::unique_ptr RefWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const { diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp index 44cb079..1a40259 100644 --- a/src/backends/reference/RefWorkloadFactory.hpp +++ b/src/backends/reference/RefWorkloadFactory.hpp @@ -106,6 +106,9 @@ public: std::unique_ptr CreateMemCopy(const MemCopyQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + std::unique_ptr CreateResize(const ResizeQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + std::unique_ptr CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index 6fb17b5..7995654 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -54,6 +54,7 @@ BACKEND_SOURCES := \ workloads/RefQuantizeWorkload.cpp \ workloads/RefReshapeWorkload.cpp \ workloads/RefResizeBilinearWorkload.cpp \ + workloads/RefResizeWorkload.cpp \ workloads/RefRsqrtWorkload.cpp \ workloads/RefSoftmaxWorkload.cpp \ workloads/RefSpaceToBatchNdWorkload.cpp \ @@ -61,7 +62,7 @@ BACKEND_SOURCES := \ workloads/RefStridedSliceWorkload.cpp \ workloads/RefSplitterWorkload.cpp \ workloads/RefTransposeConvolution2dWorkload.cpp \ - workloads/ResizeBilinear.cpp \ + workloads/Resize.cpp \ workloads/Rsqrt.cpp \ workloads/SpaceToBatchNd.cpp \ workloads/SpaceToDepth.cpp \ diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index 80d5319..7797f17 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -607,6 +607,100 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint16Nhwc, ResizeBilinearNopTest, armnn::DataLayout::NHWC) +// Resize NearestNeighbor - NCHW +ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor, + SimpleResizeNearestNeighborTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8, + SimpleResizeNearestNeighborTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16, + SimpleResizeNearestNeighborTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop, + ResizeNearestNeighborNopTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8, + ResizeNearestNeighborNopTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(esizeNearestNeighborNopUint16, + SimpleResizeNearestNeighborTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin, + ResizeNearestNeighborSqMinTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8, + ResizeNearestNeighborSqMinTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16, + SimpleResizeNearestNeighborTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin, + ResizeNearestNeighborMinTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8, + ResizeNearestNeighborMinTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16, + SimpleResizeNearestNeighborTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag, + ResizeNearestNeighborMagTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8, + ResizeNearestNeighborMagTest, + armnn::DataLayout::NCHW) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16, + SimpleResizeNearestNeighborTest, + armnn::DataLayout::NCHW) + +// Resize NearestNeighbor - NHWC +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc, + ResizeNearestNeighborNopTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc, + ResizeNearestNeighborNopTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint16Nhwc, + ResizeNearestNeighborNopTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc, + SimpleResizeNearestNeighborTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc, + SimpleResizeNearestNeighborTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16Nhwc, + ResizeNearestNeighborNopTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc, + ResizeNearestNeighborSqMinTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc, + ResizeNearestNeighborSqMinTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16Nhwc, + ResizeNearestNeighborNopTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc, + ResizeNearestNeighborMinTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc, + ResizeNearestNeighborMinTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16Nhwc, + ResizeNearestNeighborNopTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc, + ResizeNearestNeighborMagTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc, + ResizeNearestNeighborMagTest, + armnn::DataLayout::NHWC) +ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16Nhwc, + ResizeNearestNeighborNopTest, + armnn::DataLayout::NHWC) + // Fake Quantization ARMNN_AUTO_TEST_CASE(FakeQuantization, FakeQuantizationTest) diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index 9be245b..3c0af01 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -96,6 +96,8 @@ list(APPEND armnnRefBackendWorkloads_sources RefReshapeWorkload.hpp RefResizeBilinearWorkload.cpp RefResizeBilinearWorkload.hpp + RefResizeWorkload.cpp + RefResizeWorkload.hpp RefRsqrtWorkload.cpp RefRsqrtWorkload.hpp RefSoftmaxWorkload.cpp @@ -112,8 +114,8 @@ list(APPEND armnnRefBackendWorkloads_sources RefTransposeConvolution2dWorkload.hpp RefWorkloads.hpp RefWorkloadUtils.hpp - ResizeBilinear.cpp - ResizeBilinear.hpp + Resize.cpp + Resize.hpp Rsqrt.cpp Rsqrt.hpp Softmax.cpp diff --git a/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp b/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp index 03fcec2..fc27c0f 100644 --- a/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp +++ b/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp @@ -6,7 +6,7 @@ #include "RefResizeBilinearWorkload.hpp" #include "RefWorkloadUtils.hpp" -#include "ResizeBilinear.hpp" +#include "Resize.hpp" #include "BaseIterator.hpp" #include "Profiling.hpp" @@ -29,7 +29,7 @@ void RefResizeBilinearWorkload::Execute() const std::unique_ptr> encoderPtr = MakeEncoder(outputInfo, m_Data.m_Outputs[0]->Map()); Encoder &encoder = *encoderPtr; - ResizeBilinear(decoder, inputInfo, encoder, outputInfo, m_Data.m_Parameters.m_DataLayout); + Resize(decoder, inputInfo, encoder, outputInfo, m_Data.m_Parameters.m_DataLayout, armnn::ResizeMethod::Bilinear); } } //namespace armnn diff --git a/src/backends/reference/workloads/RefResizeWorkload.cpp b/src/backends/reference/workloads/RefResizeWorkload.cpp new file mode 100644 index 0000000..26225f8 --- /dev/null +++ b/src/backends/reference/workloads/RefResizeWorkload.cpp @@ -0,0 +1,35 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefResizeWorkload.hpp" + +#include "RefWorkloadUtils.hpp" +#include "Resize.hpp" +#include "BaseIterator.hpp" +#include "Profiling.hpp" + +#include "BaseIterator.hpp" +#include "Decoders.hpp" +#include "Encoders.hpp" + +namespace armnn +{ + +void RefResizeWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefResizeWorkload_Execute"); + + const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + std::unique_ptr> decoderPtr = MakeDecoder(inputInfo, m_Data.m_Inputs[0]->Map()); + Decoder &decoder = *decoderPtr; + std::unique_ptr> encoderPtr = MakeEncoder(outputInfo, m_Data.m_Outputs[0]->Map()); + Encoder &encoder = *encoderPtr; + + Resize(decoder, inputInfo, encoder, outputInfo, m_Data.m_Parameters.m_DataLayout, m_Data.m_Parameters.m_Method); +} + +} //namespace armnn diff --git a/src/backends/reference/workloads/RefResizeWorkload.hpp b/src/backends/reference/workloads/RefResizeWorkload.hpp new file mode 100644 index 0000000..1ddfcdf --- /dev/null +++ b/src/backends/reference/workloads/RefResizeWorkload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include + +namespace armnn +{ + +class RefResizeWorkload : public BaseWorkload +{ +public: + using BaseWorkload::BaseWorkload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index 3a094c8..4bdf05d 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -40,6 +40,7 @@ #include "RefPreluWorkload.hpp" #include "RefQuantizeWorkload.hpp" #include "RefResizeBilinearWorkload.hpp" +#include "RefResizeWorkload.hpp" #include "RefRsqrtWorkload.hpp" #include "RefReshapeWorkload.hpp" #include "RefSplitterWorkload.hpp" @@ -49,7 +50,7 @@ #include "RefSpaceToDepthWorkload.hpp" #include "RefTransposeConvolution2dWorkload.hpp" #include "RefWorkloadUtils.hpp" -#include "ResizeBilinear.hpp" +#include "Resize.hpp" #include "Softmax.hpp" #include "Splitter.hpp" #include "TensorBufferArrayView.hpp" \ No newline at end of file diff --git a/src/backends/reference/workloads/ResizeBilinear.cpp b/src/backends/reference/workloads/Resize.cpp similarity index 58% rename from src/backends/reference/workloads/ResizeBilinear.cpp rename to src/backends/reference/workloads/Resize.cpp index 70a0514..0e0bdd7 100644 --- a/src/backends/reference/workloads/ResizeBilinear.cpp +++ b/src/backends/reference/workloads/Resize.cpp @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MIT // -#include "ResizeBilinear.hpp" +#include "Resize.hpp" #include "TensorBufferArrayView.hpp" @@ -25,13 +25,14 @@ inline float Lerp(float a, float b, float w) return w * b + (1.f - w) * a; } -} +}// anonymous namespace -void ResizeBilinear(Decoder& in, - const TensorInfo& inputInfo, - Encoder& out, - const TensorInfo& outputInfo, - DataLayoutIndexed dataLayout) +void Resize(Decoder& in, + const TensorInfo& inputInfo, + Encoder& out, + const TensorInfo& outputInfo, + DataLayoutIndexed dataLayout, + armnn::ResizeMethod resizeMethod) { // We follow the definition of TensorFlow and AndroidNN: the top-left corner of a texel in the output // image is projected into the input image to figure out the interpolants and weights. Note that this @@ -83,22 +84,43 @@ void ResizeBilinear(Decoder& in, const unsigned int x1 = std::min(x0 + 1, inputWidth - 1u); const unsigned int y1 = std::min(y0 + 1, inputHeight - 1u); - // Interpolation - in[dataLayout.GetIndex(inputShape, n, c, y0, x0)]; - float input1 = in.Get(); - in[dataLayout.GetIndex(inputShape, n, c, y0, x1)]; - float input2 = in.Get(); - in[dataLayout.GetIndex(inputShape, n, c, y1, x0)]; - float input3 = in.Get(); - in[dataLayout.GetIndex(inputShape, n, c, y1, x1)]; - float input4 = in.Get(); - - const float ly0 = Lerp(input1, input2, xw); // lerp along row y0. - const float ly1 = Lerp(input3, input4, xw); // lerp along row y1. - const float l = Lerp(ly0, ly1, yw); - + float interpolatedValue; + switch (resizeMethod) + { + case armnn::ResizeMethod::Bilinear: + { + in[dataLayout.GetIndex(inputShape, n, c, y0, x0)]; + float input1 = in.Get(); + in[dataLayout.GetIndex(inputShape, n, c, y0, x1)]; + float input2 = in.Get(); + in[dataLayout.GetIndex(inputShape, n, c, y1, x0)]; + float input3 = in.Get(); + in[dataLayout.GetIndex(inputShape, n, c, y1, x1)]; + float input4 = in.Get(); + + const float ly0 = Lerp(input1, input2, xw); // lerp along row y0. + const float ly1 = Lerp(input3, input4, xw); // lerp along row y1. + interpolatedValue = Lerp(ly0, ly1, yw); + break; + } + case armnn::ResizeMethod::NearestNeighbor: + default: + { + auto distance0 = std::sqrt(pow(fix - boost::numeric_cast(x0), 2) + + pow(fiy - boost::numeric_cast(y0), 2)); + auto distance1 = std::sqrt(pow(fix - boost::numeric_cast(x1), 2) + + pow(fiy - boost::numeric_cast(y1), 2)); + + unsigned int xNearest = distance0 <= distance1? x0 : x1; + unsigned int yNearest = distance0 <= distance1? y0 : y1; + + in[dataLayout.GetIndex(inputShape, n, c, yNearest, xNearest)]; + interpolatedValue = in.Get(); + break; + } + } out[dataLayout.GetIndex(outputShape, n, c, y, x)]; - out.Set(l); + out.Set(interpolatedValue); } } } diff --git a/src/backends/reference/workloads/Resize.hpp b/src/backends/reference/workloads/Resize.hpp new file mode 100644 index 0000000..8bd8999 --- /dev/null +++ b/src/backends/reference/workloads/Resize.hpp @@ -0,0 +1,23 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "BaseIterator.hpp" +#include + +#include + +namespace armnn +{ + +void Resize(Decoder& in, + const TensorInfo& inputInfo, + Encoder& out, + const TensorInfo& outputInfo, + armnnUtils::DataLayoutIndexed dataLayout = DataLayout::NCHW, + ResizeMethod resizeMethod = ResizeMethod::NearestNeighbor); + +} //namespace armnn diff --git a/src/backends/reference/workloads/ResizeBilinear.hpp b/src/backends/reference/workloads/ResizeBilinear.hpp deleted file mode 100644 index ad2e487..0000000 --- a/src/backends/reference/workloads/ResizeBilinear.hpp +++ /dev/null @@ -1,22 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#pragma once - -#include "BaseIterator.hpp" -#include - -#include - -namespace armnn -{ - -void ResizeBilinear(Decoder& in, - const TensorInfo& inputInfo, - Encoder& out, - const TensorInfo& outputInfo, - armnnUtils::DataLayoutIndexed dataLayout = DataLayout::NCHW); - -} //namespace armnn