From: David Monahan Date: Wed, 19 Jun 2019 10:47:21 +0000 (+0100) Subject: IVGCVSW-3235 Add scalar to use as padding value in Reference Pad X-Git-Tag: submit/tizen/20200316.035456~506 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=34757810f8b734f5f59485a542b56934ad4cc5f0;p=platform%2Fupstream%2Farmnn.git IVGCVSW-3235 Add scalar to use as padding value in Reference Pad Signed-off-by: David Monahan Change-Id: If050f318fcb7626bbfae1b8737a1d232a4a5a915 --- diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp index 2fda8c1..0655d42 100644 --- a/include/armnn/Descriptors.hpp +++ b/include/armnn/Descriptors.hpp @@ -606,11 +606,11 @@ struct MeanDescriptor /// A PadDescriptor for the PadLayer. struct PadDescriptor { - PadDescriptor() + PadDescriptor() : m_padValue(0) {} - PadDescriptor(const std::vector>& padList) - : m_PadList(padList) + PadDescriptor(const std::vector>& padList, const float& padValue = 0) + : m_PadList(padList), m_padValue(padValue) {} /// @brief Specifies the padding for input dimension. @@ -618,6 +618,9 @@ struct PadDescriptor /// Second is the number of values to add after the tensor in the dimension. /// The number of pairs should match the number of dimensions in the input tensor. std::vector> m_PadList; + + /// Optional value to use for padding, defaults to 0 + float m_padValue; }; /// A StridedSliceDescriptor for the StridedSliceLayer. diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index 452ef8e..5372606 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -1390,6 +1390,7 @@ void Deserializer::ParsePad(GraphPtr graph, unsigned int layerIndex) auto flatBufferDescriptor = graph->layers()->Get(layerIndex)->layer_as_PadLayer()->descriptor(); auto flatBufferPadList = flatBufferDescriptor->padList(); + float padValue = flatBufferDescriptor->padValue(); if (flatBufferPadList->Length() % 2 != 0) { @@ -1404,7 +1405,7 @@ void Deserializer::ParsePad(GraphPtr graph, unsigned int layerIndex) padList.emplace_back(flatBufferPadList->Get(i), flatBufferPadList->Get(i+1)); } - armnn::PadDescriptor descriptor(padList); + armnn::PadDescriptor descriptor(padList, padValue); auto layerName = GetLayerName(graph, layerIndex); IConnectableLayer* layer = m_Network->AddPadLayer(descriptor, layerName.c_str()); diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs index 83275ca..7969d10 100644 --- a/src/armnnSerializer/ArmnnSchema.fbs +++ b/src/armnnSerializer/ArmnnSchema.fbs @@ -409,6 +409,7 @@ table PadLayer { table PadDescriptor { padList:[uint]; + padValue:float = 0; } table RsqrtLayer { diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index 126247b..208262b 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -577,7 +577,8 @@ void SerializerVisitor::VisitPadLayer(const armnn::IConnectableLayer* layer, } auto flatBufferPadDesc = serializer::CreatePadDescriptor(m_flatBufferBuilder, - m_flatBufferBuilder.CreateVector(padList)); + m_flatBufferBuilder.CreateVector(padList), + padDescriptor.m_padValue); auto flatBufferPadLayer = serializer::CreatePadLayer(m_flatBufferBuilder, flatBufferBaseLayer, diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index 8b1594e..294adec 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -1712,6 +1712,7 @@ private: void VerifyDescriptor(const armnn::PadDescriptor& descriptor) { BOOST_TEST(descriptor.m_PadList == m_Descriptor.m_PadList); + BOOST_TEST(descriptor.m_padValue == m_Descriptor.m_padValue); } armnn::PadDescriptor m_Descriptor; diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp index d9ae546..c9a5731 100644 --- a/src/backends/backendsCommon/test/LayerTests.cpp +++ b/src/backends/backendsCommon/test/LayerTests.cpp @@ -5869,13 +5869,14 @@ LayerTestResult Pad2dTestCommon( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, float qScale, - int32_t qOffset) + int32_t qOffset, + const float customPaddingValue = 0) { const armnn::TensorShape inputShape{ 3, 3 }; const armnn::TensorShape outputShape{ 7, 7 }; - const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); std::vector inputValues( QuantizedVector(qScale, qOffset, @@ -5886,17 +5887,38 @@ LayerTestResult Pad2dTestCommon( 3, 2, 4 })); - std::vector expectedOutputValues( - QuantizedVector(qScale, qOffset, + const T padValue = ConvertToDataType(customPaddingValue, inputTensorInfo); + + std::vector expectedOutputValues; + if (padValue == 0) { - 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, - 0, 0, 4, 8, 6, 0, 0, - 0, 0, 7, 4, 4, 0, 0, - 0, 0, 3, 2, 4, 0, 0, - 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0 - })); + expectedOutputValues = ( + QuantizedVector(qScale, qOffset, + { + 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + 0, 0, 4, 8, 6, 0, 0, + 0, 0, 7, 4, 4, 0, 0, + 0, 0, 3, 2, 4, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + })); + } + else + { + expectedOutputValues = ( + QuantizedVector(qScale, qOffset, + { + 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, + 1, 1, 4, 8, 6, 1, 1, + 1, 1, 7, 4, 4, 1, 1, + 1, 1, 3, 2, 4, 1, 1, + 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1 + })); + } + auto inputTensor = MakeTensor(inputTensorInfo, std::vector(inputValues)); @@ -5943,8 +5965,8 @@ LayerTestResult Pad3dTestCommon( const armnn::TensorShape inputShape{ 2, 2, 2 }; const armnn::TensorShape outputShape{ 3, 5, 6 }; - const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); std::vector inputValues( QuantizedVector(qScale,qOffset, @@ -6028,8 +6050,8 @@ LayerTestResult Pad4dTestCommon( const armnn::TensorShape inputShape{ 2, 2, 3, 2 }; const armnn::TensorShape outputShape{ 4, 5, 7, 4 }; - const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); - const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType); + const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset); + const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset); std::vector inputValues( QuantizedVector(qScale,qOffset, @@ -6263,6 +6285,13 @@ LayerTestResult PadUint82dTest( return Pad2dTestCommon(workloadFactory, memoryManager, 1.0f, 0); } +LayerTestResult PadUint82dCustomPaddingTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return Pad2dTestCommon(workloadFactory, memoryManager, 1.0f, 0); +} + LayerTestResult PadUint83dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) @@ -6284,6 +6313,13 @@ LayerTestResult PadFloat322dTest( return Pad2dTestCommon(workloadFactory, memoryManager, 0.0f, 0); } +LayerTestResult PadFloat322dCustomPaddingTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + return Pad2dTestCommon(workloadFactory, memoryManager, 0.0f, 0); +} + LayerTestResult PadFloat323dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 25ccfa0..be16819 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -1382,6 +1382,10 @@ LayerTestResult PadUint82dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult PadUint82dCustomPaddingTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult PadUint83dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); @@ -1394,6 +1398,10 @@ LayerTestResult PadFloat322dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); +LayerTestResult PadFloat322dCustomPaddingTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + LayerTestResult PadFloat323dTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); @@ -2022,6 +2030,18 @@ std::vector ConvertToDataType(const std::vector& input, return output; } +// Utility method to convert a single value to the correct type +template +T ConvertToDataType(const float& value, + const armnn::TensorInfo& tensorInfo) +{ + std::vector output(1); + std::unique_ptr> pEncoder = armnn::MakeEncoder(tensorInfo, output.data()); + armnn::Encoder& rEncoder = *pEncoder; + rEncoder.Set(value); + return output[0]; +} + template LayerTestResult Rsqrt2dTestCommon( armnn::IWorkloadFactory& workloadFactory, diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index cb9ee4b..9cb8d13 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -645,10 +645,12 @@ ARMNN_AUTO_TEST_CASE(L2NormalizationNonDefaultEpsilon, L2NormalizationNonDefault // Pad ARMNN_AUTO_TEST_CASE(PadFloat322d, PadFloat322dTest) +ARMNN_AUTO_TEST_CASE(PadFloat322dCustomPadding, PadFloat322dCustomPaddingTest) ARMNN_AUTO_TEST_CASE(PadFloat323d, PadFloat323dTest) ARMNN_AUTO_TEST_CASE(PadFloat324d, PadFloat324dTest) ARMNN_AUTO_TEST_CASE(PadUint82d, PadUint82dTest) +ARMNN_AUTO_TEST_CASE(PadUint82dCustomPadding, PadUint82dCustomPaddingTest) ARMNN_AUTO_TEST_CASE(PadUint83d, PadUint83dTest) ARMNN_AUTO_TEST_CASE(PadUint84d, PadUint84dTest) diff --git a/src/backends/reference/workloads/Pad.cpp b/src/backends/reference/workloads/Pad.cpp index 7a928a1..1e58124 100644 --- a/src/backends/reference/workloads/Pad.cpp +++ b/src/backends/reference/workloads/Pad.cpp @@ -5,8 +5,10 @@ #include "Pad.hpp" #include "backendsCommon/WorkloadData.hpp" -#include #include "TensorBufferArrayView.hpp" +#include "Encoders.hpp" + +#include #include #include #include @@ -15,12 +17,25 @@ namespace armnn { + +template +T ConvertToDataType(const float& value, + const armnn::TensorInfo& tensorInfo) +{ + std::vector output(1); + std::unique_ptr> pEncoder = armnn::MakeEncoder(tensorInfo, output.data()); + armnn::Encoder& rEncoder = *pEncoder; + rEncoder.Set(value); + return output[0]; +} + template void Pad(const TensorInfo& inputInfo, const TensorInfo& outputInfo, - std::vector> m_PadList, + std::vector> m_padList, const T* inputData, - T* outData) + T* outData, + const float padValue) { unsigned int numOutputElements = outputInfo.GetNumElements(); @@ -45,9 +60,11 @@ void Pad(const TensorInfo& inputInfo, unsigned int outputHeight = 0; unsigned int outputWidth = 0; + T convertedPadValue = ConvertToDataType(padValue, inputInfo); + for (unsigned int i = 0; i < numOutputElements; ++i) { - outData[i] = 0; + outData[i] = convertedPadValue; } switch(numInputDimensions) { @@ -58,7 +75,7 @@ void Pad(const TensorInfo& inputInfo, for (unsigned int w = 0; w < inputWidth ; w++) { - outData[w+std::get<0>(m_PadList[0])] = inputData[w]; + outData[w+std::get<0>(m_padList[0])] = inputData[w]; } break; @@ -74,8 +91,8 @@ void Pad(const TensorInfo& inputInfo, { for (unsigned int w = 0; w < inputWidth ; w++) { - outData[(h+std::get<0>(m_PadList[0]))*outputWidth - + (w+std::get<0>(m_PadList[1]))] = inputData[h * inputWidth + w]; + outData[(h+std::get<0>(m_padList[0]))*outputWidth + + (w+std::get<0>(m_padList[1]))] = inputData[h * inputWidth + w]; } } @@ -96,9 +113,9 @@ void Pad(const TensorInfo& inputInfo, { for (unsigned int w = 0; w < inputWidth ; w++) { - outData[(c+std::get<0>(m_PadList[0]))*outputHeight*outputWidth - + (h+std::get<0>(m_PadList[1]))*outputWidth - + (w+std::get<0>(m_PadList[2]))] = inputData[c * inputHeight * inputWidth + outData[(c+std::get<0>(m_padList[0]))*outputHeight*outputWidth + + (h+std::get<0>(m_padList[1]))*outputWidth + + (w+std::get<0>(m_padList[2]))] = inputData[c * inputHeight * inputWidth + h * inputWidth + w]; } @@ -125,10 +142,10 @@ void Pad(const TensorInfo& inputInfo, { for (unsigned int w = 0; w < inputWidth ; w++) { - outData[(b+std::get<0>(m_PadList[0])) * outputChannels * outputHeight * outputWidth - + (c+std::get<0>(m_PadList[1])) * outputHeight * outputWidth - + (h+std::get<0>(m_PadList[2])) * outputWidth - + (w+std::get<0>(m_PadList[3]))] = inputData[b * inputChannels * inputHeight + outData[(b+std::get<0>(m_padList[0])) * outputChannels * outputHeight * outputWidth + + (c+std::get<0>(m_padList[1])) * outputHeight * outputWidth + + (h+std::get<0>(m_padList[2])) * outputWidth + + (w+std::get<0>(m_padList[3]))] = inputData[b * inputChannels * inputHeight * inputWidth + c * inputHeight * inputWidth + h * inputWidth @@ -150,11 +167,13 @@ template void Pad(const TensorInfo& inputInfo, const TensorInfo& outputInfo, std::vector> m_PadList, const float* inputData, - float* outData); + float* outData, + const float padValue); template void Pad(const TensorInfo& inputInfo, const TensorInfo& outputInfo, std::vector> m_PadList, const uint8_t* inputData, - uint8_t* outData); + uint8_t* outData, + const float padValue); } //namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/Pad.hpp b/src/backends/reference/workloads/Pad.hpp index 42318d6..4297185 100644 --- a/src/backends/reference/workloads/Pad.hpp +++ b/src/backends/reference/workloads/Pad.hpp @@ -15,7 +15,8 @@ namespace armnn template void Pad(const TensorInfo& inputInfo, const TensorInfo& outputInfo, - std::vector> m_PadList, + std::vector> m_padList, const T* inputData, - T* outData); + T* outData, + const float padValue); } //namespace armnn diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp index 16032e7..8cb9d88 100644 --- a/src/backends/reference/workloads/RefPadWorkload.cpp +++ b/src/backends/reference/workloads/RefPadWorkload.cpp @@ -30,8 +30,7 @@ void RefPadWorkload::Execute() const const T* inputData = GetInputTensorData(0, m_Data); T* outputData = GetOutputTensorData(0, m_Data); - - Pad(inputInfo, outputInfo, m_Data.m_Parameters.m_PadList, inputData, outputData); + Pad(inputInfo, outputInfo, m_Data.m_Parameters.m_PadList, inputData, outputData, m_Data.m_Parameters.m_padValue); } template class RefPadWorkload;