TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
CHECK_VALID_SIZE(outputs.size(), 1);
+ armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
+
armnn::TensorInfo padTensorInfo = ToTensorInfo(inputs[1]);
BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
size_t step = 2;
armnn::PadDescriptor desc;
+ if (inputTensorInfo.IsQuantized())
+ {
+ desc.m_PadValue = static_cast<float>(inputTensorInfo.GetQuantizationOffset());
+ }
for (unsigned int i = 0; i < padTensorInfo.GetNumElements() / step; ++i)
{
desc.m_PadList.emplace_back(padBuffer[i * step], padBuffer[i * step + 1]);
struct PadFixture : public ParserFlatbuffersFixture
{
- explicit PadFixture(const std::string & inputShape,
- const std::string & outputShape,
- const std::string & padListShape,
- const std::string & padListData)
+ explicit PadFixture(const std::string& inputShape,
+ const std::string& outputShape,
+ const std::string& padListShape,
+ const std::string& padListData,
+ const std::string& dataType = "FLOAT32",
+ const std::string& scale = "1.0",
+ const std::string& offset = "0")
{
m_JsonString = R"(
{
"tensors": [
{
"shape": )" + inputShape + R"(,
- "type": "FLOAT32",
+ "type": )" + dataType + R"(,
"buffer": 0,
"name": "inputTensor",
"quantization": {
"min": [ 0.0 ],
"max": [ 255.0 ],
- "scale": [ 1.0 ],
- "zero_point": [ 0 ],
+ "scale": [ )" + scale + R"( ],
+ "zero_point": [ )" + offset + R"( ],
}
},
{
"shape": )" + outputShape + R"(,
- "type": "FLOAT32",
+ "type": )" + dataType + R"(,
"buffer": 1,
"name": "outputTensor",
"quantization": {
"min": [ 0.0 ],
"max": [ 255.0 ],
- "scale": [ 1.0 ],
- "zero_point": [ 0 ],
+ "scale": [ )" + scale + R"( ],
+ "zero_point": [ )" + offset + R"( ],
}
},
{
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }}});
}
+struct Uint8PadFixture : public PadFixture
+{
+ Uint8PadFixture() : PadFixture("[ 2, 3 ]", "[ 4, 7 ]", "[ 2, 2 ]",
+ "[ 1,0,0,0, 1,0,0,0, 2,0,0,0, 2,0,0,0 ]",
+ "UINT8", "-2.0", "3") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParsePadUint8, Uint8PadFixture)
+{
+ RunTest<2, armnn::DataType::QAsymmU8>
+ (0,
+ {{ "inputTensor", { 1, 2, 3, 4, 5, 6 }}},
+ {{ "outputTensor", { 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 1, 2, 3, 3, 3,
+ 3, 3, 4, 5, 6, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3 }}});
+}
+
+struct Int8PadFixture : public PadFixture
+{
+ Int8PadFixture() : PadFixture("[ 2, 3 ]", "[ 4, 7 ]", "[ 2, 2 ]",
+ "[ 1,0,0,0, 1,0,0,0, 2,0,0,0, 2,0,0,0 ]",
+ "INT8", "-2.0", "3") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParsePadInt8, Int8PadFixture)
+{
+ RunTest<2, armnn::DataType::QAsymmS8>
+ (0,
+ {{ "inputTensor", { 1, -2, 3, 4, 5, -6 }}},
+ {{ "outputTensor", { 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 1, -2, 3, 3, 3,
+ 3, 3, 4, 5, -6, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3 }}});
+}
+
BOOST_AUTO_TEST_SUITE_END()
return result;
}
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> PadQAsymmTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float qScale,
+ int32_t qOffset,
+ const float customPaddingValue)
+{
+ IgnoreUnused(memoryManager);
+ const armnn::TensorShape inputShape{ 3, 3 };
+ const armnn::TensorShape outputShape{ 7, 7 };
+
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
+
+ std::vector<T> inputValues =
+ {
+ // Height (3) x Width (3)
+ 4, 8, 6,
+ 7, 4, 4,
+ 3, 2, 4
+ };
+
+ T p = static_cast<T>(customPaddingValue);
+ std::vector<T> expectedOutputValues =
+ {
+ p, p, p, p, p, p, p,
+ p, p, p, p, p, p, p,
+ p, p, 4, 8, 6, p, p,
+ p, p, 7, 4, 4, p, p,
+ p, p, 3, 2, 4, p, p,
+ p, p, p, p, p, p, p,
+ p, p, p, p, p, p, p
+ };
+
+ auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
+
+ LayerTestResult<T, 2> result(outputTensorInfo);
+ result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+
+ armnn::PadQueueDescriptor descriptor;
+
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+ padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+
+ descriptor.m_Parameters.m_PadList = padList;
+ descriptor.m_Parameters.m_PadValue = customPaddingValue;
+ armnn::WorkloadInfo info;
+
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
+
+ workload->PostAllocationConfigure();
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
+
+ return result;
+}
+
//
// Explicit template specializations
//
float qScale,
int32_t qOffset);
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
+PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float qScale,
+ int32_t qOffset,
+ const float customPaddingValue);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+PadQAsymmTestCommon<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float qScale,
+ int32_t qOffset,
+ const float customPaddingValue);
+
//
// Implementation functions
//
{
return Pad4dTestCommon<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
}
+
+LayerTestResult<int8_t, 2> PadInt8AsymmTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
+ workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 2);
+}
+
+LayerTestResult<int8_t, 2> PadInt8CustomPaddingAsymmTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
+ workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 3, 1.0f);
+}
float qScale,
int32_t qOffset);
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> PadQAsymmTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ float qScale,
+ int32_t qOffset,
+ const float customPaddingValue = 0.0f);
+
LayerTestResult<uint8_t, 2> PadUint82dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
LayerTestResult<int8_t, 4> PadInt84dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int8_t, 2> PadInt82dAsymmTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+LayerTestResult<int8_t, 2> PadInt82dCustomPaddingAsymmTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory);
\ No newline at end of file
ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint82dCustomPadding, PadUint82dCustomPaddingTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint83d, PadUint83dTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint84d, PadUint84dTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint8Asymm, PadQAsymmTestCommon<DataType::QAsymmU8>, -2.0f, 3, 0.0f)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint8CustomPaddingAsymm, PadQAsymmTestCommon<DataType::QAsymmU8>, -2.0f, 3, 2.0f)
ARMNN_AUTO_TEST_CASE_WITH_THF(Pad2dQSymm16, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 0.0f)
ARMNN_AUTO_TEST_CASE_WITH_THF(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 1.0f)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadInt82dCustomPadding, PadInt82dCustomPaddingTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadInt83d, PadInt83dTest)
ARMNN_AUTO_TEST_CASE_WITH_THF(PadInt84d, PadInt84dTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadQAsymmS8, PadQAsymmTestCommon<DataType::QAsymmS8>, -2.0f, 3, 0.0f)
+ARMNN_AUTO_TEST_CASE_WITH_THF(PadQAsymmS8CustomPadding, PadQAsymmTestCommon<DataType::QAsymmS8>, -2.0f, 3, 2.0f)
// Constant
ARMNN_AUTO_TEST_CASE_WITH_THF(Constant, ConstantTest)