From c81855f6ca52eb025a303b95eee7a12a5e2f9557 Mon Sep 17 00:00:00 2001 From: Derek Lamberti Date: Thu, 13 Jun 2019 17:34:19 +0100 Subject: [PATCH] IVGCVSW-3278 Cl and Neon TensorHandles inherit from common base interface Change-Id: Ia68da09d8f0fb0a04af9cb61062d7edaa5f1b887 Signed-off-by: Derek Lamberti --- src/backends/aclCommon/ArmComputeTensorHandle.hpp | 23 ++++++ src/backends/aclCommon/CMakeLists.txt | 1 + src/backends/cl/ClTensorHandle.hpp | 3 +- src/backends/neon/NeonTensorHandle.hpp | 16 ++--- src/backends/neon/NeonWorkloadFactory.cpp | 2 +- src/backends/neon/test/NeonCreateWorkloadTests.cpp | 84 +++++++++++----------- .../neon/workloads/NeonActivationWorkload.cpp | 4 +- .../neon/workloads/NeonAdditionWorkload.cpp | 6 +- .../workloads/NeonBatchNormalizationWorkload.cpp | 4 +- src/backends/neon/workloads/NeonConcatWorkload.cpp | 4 +- .../neon/workloads/NeonConvolution2dWorkload.cpp | 4 +- .../workloads/NeonDepthwiseConvolutionWorkload.cpp | 4 +- .../neon/workloads/NeonDequantizeWorkload.cpp | 4 +- .../neon/workloads/NeonFloorFloatWorkload.cpp | 4 +- .../neon/workloads/NeonFullyConnectedWorkload.cpp | 4 +- .../neon/workloads/NeonGreaterWorkload.cpp | 6 +- .../workloads/NeonL2NormalizationFloatWorkload.cpp | 4 +- .../neon/workloads/NeonLstmFloatWorkload.cpp | 12 ++-- .../neon/workloads/NeonMaximumWorkload.cpp | 6 +- src/backends/neon/workloads/NeonMeanWorkload.cpp | 4 +- .../neon/workloads/NeonMinimumWorkload.cpp | 6 +- .../neon/workloads/NeonMultiplicationWorkload.cpp | 6 +- .../workloads/NeonNormalizationFloatWorkload.cpp | 4 +- src/backends/neon/workloads/NeonPadWorkload.cpp | 4 +- .../neon/workloads/NeonPermuteWorkload.cpp | 4 +- .../neon/workloads/NeonPooling2dWorkload.cpp | 4 +- .../neon/workloads/NeonQuantizeWorkload.cpp | 4 +- .../neon/workloads/NeonReshapeWorkload.cpp | 4 +- .../neon/workloads/NeonResizeBilinearWorkload.cpp | 4 +- .../neon/workloads/NeonSoftmaxFloatWorkload.cpp | 4 +- .../neon/workloads/NeonSoftmaxUint8Workload.cpp | 4 +- .../neon/workloads/NeonSplitterWorkload.cpp | 4 +- .../neon/workloads/NeonSubtractionWorkload.cpp | 6 +- 33 files changed, 137 insertions(+), 120 deletions(-) create mode 100644 src/backends/aclCommon/ArmComputeTensorHandle.hpp diff --git a/src/backends/aclCommon/ArmComputeTensorHandle.hpp b/src/backends/aclCommon/ArmComputeTensorHandle.hpp new file mode 100644 index 0000000..30710ba --- /dev/null +++ b/src/backends/aclCommon/ArmComputeTensorHandle.hpp @@ -0,0 +1,23 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include + +namespace armnn +{ + +class IAclTensorHandle : public ITensorHandle +{ +public: + virtual arm_compute::ITensor& GetTensor() = 0; + virtual arm_compute::ITensor const& GetTensor() const = 0; + virtual arm_compute::DataType GetDataType() const = 0; + virtual void SetMemoryGroup(const std::shared_ptr& memoryGroup) = 0; +}; + +} //namespace armnn \ No newline at end of file diff --git a/src/backends/aclCommon/CMakeLists.txt b/src/backends/aclCommon/CMakeLists.txt index 933d7ab..d518c5e 100644 --- a/src/backends/aclCommon/CMakeLists.txt +++ b/src/backends/aclCommon/CMakeLists.txt @@ -4,6 +4,7 @@ # list(APPEND armnnAclCommon_sources + ArmComputeTensorHandle.hpp ArmComputeTensorUtils.hpp ArmComputeTensorUtils.cpp ArmComputeUtils.hpp diff --git a/src/backends/cl/ClTensorHandle.hpp b/src/backends/cl/ClTensorHandle.hpp index 59a6bee..c0773a4 100644 --- a/src/backends/cl/ClTensorHandle.hpp +++ b/src/backends/cl/ClTensorHandle.hpp @@ -5,6 +5,7 @@ #pragma once #include +#include #include #include @@ -22,7 +23,7 @@ namespace armnn { -class IClTensorHandle : public ITensorHandle +class IClTensorHandle : public IAclTensorHandle { public: virtual arm_compute::ICLTensor& GetTensor() = 0; diff --git a/src/backends/neon/NeonTensorHandle.hpp b/src/backends/neon/NeonTensorHandle.hpp index b972043..3bbba78 100644 --- a/src/backends/neon/NeonTensorHandle.hpp +++ b/src/backends/neon/NeonTensorHandle.hpp @@ -5,6 +5,7 @@ #pragma once #include +#include #include #include @@ -19,16 +20,7 @@ namespace armnn { -class INeonTensorHandle : public ITensorHandle -{ -public: - virtual arm_compute::ITensor& GetTensor() = 0; - virtual arm_compute::ITensor const& GetTensor() const = 0; - virtual arm_compute::DataType GetDataType() const = 0; - virtual void SetMemoryGroup(const std::shared_ptr& memoryGroup) = 0; -}; - -class NeonTensorHandle : public INeonTensorHandle +class NeonTensorHandle : public IAclTensorHandle { public: NeonTensorHandle(const TensorInfo& tensorInfo) @@ -131,10 +123,10 @@ private: std::shared_ptr m_MemoryGroup; }; -class NeonSubTensorHandle : public INeonTensorHandle +class NeonSubTensorHandle : public IAclTensorHandle { public: - NeonSubTensorHandle(INeonTensorHandle* parent, + NeonSubTensorHandle(IAclTensorHandle* parent, const arm_compute::TensorShape& shape, const arm_compute::Coordinates& coords) : m_Tensor(&parent->GetTensor(), shape, coords) diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index f44ce41..d784a48 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -68,7 +68,7 @@ std::unique_ptr NeonWorkloadFactory::CreateSubTensorHandle(ITenso } return std::make_unique( - boost::polymorphic_downcast(&parent), shape, coords); + boost::polymorphic_downcast(&parent), shape, coords); } std::unique_ptr NeonWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp index 523588c..320ea69 100644 --- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp +++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp @@ -19,7 +19,7 @@ BOOST_AUTO_TEST_SUITE(CreateWorkloadNeon) namespace { -bool TestNeonTensorHandleInfo(armnn::INeonTensorHandle* handle, const armnn::TensorInfo& expectedInfo) +bool TestNeonTensorHandleInfo(armnn::IAclTensorHandle* handle, const armnn::TensorInfo& expectedInfo) { using namespace armnn::armcomputetensorutils; @@ -65,8 +65,8 @@ static void NeonCreateActivationWorkloadTest() // Checks that inputs/outputs are as we expect them (see definition of CreateActivationWorkloadTest). ActivationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({1, 1}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 1}, DataType))); } @@ -96,9 +96,9 @@ static void NeonCreateElementwiseWorkloadTest() auto workload = CreateElementwiseWorkloadTest(factory, graph); DescriptorType queueDescriptor = workload->GetData(); - auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto inputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto inputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({2, 3}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle2, TensorInfo({2, 3}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType))); @@ -186,8 +186,8 @@ static void NeonCreateBatchNormalizationWorkloadTest(DataLayout dataLayout) // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest). BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3}; TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3}; @@ -232,8 +232,8 @@ static void NeonCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayo // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest). Convolution2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType))); } @@ -272,8 +272,8 @@ static void NeonCreateDepthWiseConvolutionWorkloadTest(DataLayout dataLayout) // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest). DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list({ 2, 2, 5, 5 }) : std::initializer_list({ 2, 5, 5, 2 }); @@ -307,8 +307,8 @@ static void NeonCreateFullyConnectedWorkloadTest() // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest). FullyConnectedQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 1, 4, 5}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 7}, DataType))); } @@ -336,8 +336,8 @@ static void NeonCreateNormalizationWorkloadTest(DataLayout dataLayout) // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest). NormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5}; TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5}; @@ -383,8 +383,8 @@ static void NeonCreatePooling2dWorkloadTest(DataLayout dataLayout = DataLayout:: // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest). Pooling2dQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType))); } @@ -427,8 +427,8 @@ static void NeonCreateReshapeWorkloadTest() // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest). ReshapeQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 4}, DataType))); } @@ -461,8 +461,8 @@ static void NeonCreateSoftmaxWorkloadTest() // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest). SoftmaxQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({4, 1}, DataType))); } @@ -489,16 +489,16 @@ BOOST_AUTO_TEST_CASE(CreateSplitterWorkload) // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest). SplitterQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({5, 7, 7}, DataType::Float32))); - auto outputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto outputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle0, TensorInfo({1, 7, 7}, DataType::Float32))); - auto outputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[1]); + auto outputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[1]); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle1, TensorInfo({2, 7, 7}, DataType::Float32))); - auto outputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[2]); + auto outputHandle2 = boost::polymorphic_downcast(queueDescriptor.m_Outputs[2]); BOOST_TEST(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32))); } @@ -522,10 +522,10 @@ BOOST_AUTO_TEST_CASE(CreateSplitterConcat) auto wlConcat = std::move(workloads.second); //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction. - armnn::INeonTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); - armnn::INeonTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); - armnn::INeonTensorHandle* mIn0 = dynamic_cast(wlConcat->GetData().m_Inputs[0]); - armnn::INeonTensorHandle* mIn1 = dynamic_cast(wlConcat->GetData().m_Inputs[1]); + armnn::IAclTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); + armnn::IAclTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); + armnn::IAclTensorHandle* mIn0 = dynamic_cast(wlConcat->GetData().m_Inputs[0]); + armnn::IAclTensorHandle* mIn1 = dynamic_cast(wlConcat->GetData().m_Inputs[1]); BOOST_TEST(sOut0); BOOST_TEST(sOut1); @@ -556,12 +556,12 @@ BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs) NeonActivationWorkload, DataType::Float32>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1); - armnn::INeonTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); - armnn::INeonTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); - armnn::INeonTensorHandle* activ0_0Im = dynamic_cast(wlActiv0_0->GetData().m_Inputs[0]); - armnn::INeonTensorHandle* activ0_1Im = dynamic_cast(wlActiv0_1->GetData().m_Inputs[0]); - armnn::INeonTensorHandle* activ1_0Im = dynamic_cast(wlActiv1_0->GetData().m_Inputs[0]); - armnn::INeonTensorHandle* activ1_1Im = dynamic_cast(wlActiv1_1->GetData().m_Inputs[0]); + armnn::IAclTensorHandle* sOut0 = dynamic_cast(wlSplitter->GetData().m_Outputs[0]); + armnn::IAclTensorHandle* sOut1 = dynamic_cast(wlSplitter->GetData().m_Outputs[1]); + armnn::IAclTensorHandle* activ0_0Im = dynamic_cast(wlActiv0_0->GetData().m_Inputs[0]); + armnn::IAclTensorHandle* activ0_1Im = dynamic_cast(wlActiv0_1->GetData().m_Inputs[0]); + armnn::IAclTensorHandle* activ1_0Im = dynamic_cast(wlActiv1_0->GetData().m_Inputs[0]); + armnn::IAclTensorHandle* activ1_1Im = dynamic_cast(wlActiv1_1->GetData().m_Inputs[0]); BOOST_TEST(sOut0); @@ -581,7 +581,7 @@ BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsNeon) { NeonWorkloadFactory factory = NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager()); - CreateMemCopyWorkloads(factory); + CreateMemCopyWorkloads(factory); } template @@ -596,8 +596,8 @@ static void NeonCreateL2NormalizationWorkloadTest(DataLayout dataLayout) // Checks that inputs/outputs are as we expect them (see definition of CreateNormalizationWorkloadTest). L2NormalizationQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 }; @@ -641,9 +641,9 @@ static void NeonCreateConcatWorkloadTest(std::initializer_list out auto workload = CreateConcatWorkloadTest(factory, graph, outputShape, concatAxis); ConcatQueueDescriptor queueDescriptor = workload->GetData(); - auto inputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); - auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); - auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); + auto inputHandle0 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[0]); + auto inputHandle1 = boost::polymorphic_downcast(queueDescriptor.m_Inputs[1]); + auto outputHandle = boost::polymorphic_downcast(queueDescriptor.m_Outputs[0]); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle0, TensorInfo({ 2, 3, 2, 5 }, DataType))); BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({ 2, 3, 2, 5 }, DataType))); diff --git a/src/backends/neon/workloads/NeonActivationWorkload.cpp b/src/backends/neon/workloads/NeonActivationWorkload.cpp index 7715e5f..916d674 100644 --- a/src/backends/neon/workloads/NeonActivationWorkload.cpp +++ b/src/backends/neon/workloads/NeonActivationWorkload.cpp @@ -36,8 +36,8 @@ NeonActivationWorkload::NeonActivationWorkload(const ActivationQueueDescriptor& const arm_compute::ActivationLayerInfo activationLayerInfo = ConvertActivationDescriptorToAclActivationLayerInfo(m_Data.m_Parameters); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input, &output, activationLayerInfo); diff --git a/src/backends/neon/workloads/NeonAdditionWorkload.cpp b/src/backends/neon/workloads/NeonAdditionWorkload.cpp index fa53781..a025c0b 100644 --- a/src/backends/neon/workloads/NeonAdditionWorkload.cpp +++ b/src/backends/neon/workloads/NeonAdditionWorkload.cpp @@ -35,9 +35,9 @@ NeonAdditionWorkload::NeonAdditionWorkload(const AdditionQueueDescriptor& descri { m_Data.ValidateInputsOutputs("NeonAdditionWorkload", 2, 1); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input2 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input2 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE); diff --git a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp index fc80f41..cd931e3 100644 --- a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp +++ b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp @@ -53,8 +53,8 @@ NeonBatchNormalizationWorkload::NeonBatchNormalizationWorkload( { m_Data.ValidateInputsOutputs("NeonBatchNormalizationWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonConcatWorkload.cpp b/src/backends/neon/workloads/NeonConcatWorkload.cpp index 8ea535b..4a9f687 100644 --- a/src/backends/neon/workloads/NeonConcatWorkload.cpp +++ b/src/backends/neon/workloads/NeonConcatWorkload.cpp @@ -73,10 +73,10 @@ const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info) std::vector aclInputs; for (auto input : m_Data.m_Inputs) { - arm_compute::ITensor& aclInput = boost::polymorphic_pointer_downcast(input)->GetTensor(); + arm_compute::ITensor& aclInput = boost::polymorphic_pointer_downcast(input)->GetTensor(); aclInputs.emplace_back(&aclInput); } - arm_compute::ITensor& output = boost::polymorphic_pointer_downcast( + arm_compute::ITensor& output = boost::polymorphic_pointer_downcast( m_Data.m_Outputs[0])->GetTensor(); // Create the layer function diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp index 1080f32..a8137a2 100644 --- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp +++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp @@ -60,8 +60,8 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload( // todo: check tensor shapes match. - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp index d15b485..a685b8a 100644 --- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp +++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp @@ -107,8 +107,8 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload( m_Data.ValidateInputsOutputs("NeonDepthwiseConvolutionWorkload", 1, 1); - INeonTensorHandle* inputTensorHandle = static_cast(m_Data.m_Inputs[0]); - INeonTensorHandle* outputTensorHandle = static_cast(m_Data.m_Outputs[0]); + IAclTensorHandle* inputTensorHandle = static_cast(m_Data.m_Inputs[0]); + IAclTensorHandle* outputTensorHandle = static_cast(m_Data.m_Outputs[0]); arm_compute::ITensor& input = inputTensorHandle->GetTensor(); arm_compute::ITensor& output = outputTensorHandle->GetTensor(); diff --git a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp index 9840b48..aa454c9 100644 --- a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp +++ b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp @@ -30,8 +30,8 @@ NeonDequantizeWorkload::NeonDequantizeWorkload(const DequantizeQueueDescriptor& { m_Data.ValidateInputsOutputs("NeonDequantizeWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); m_Layer.reset(new arm_compute::NEDequantizationLayer()); m_Layer->configure(&input, &output); diff --git a/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp b/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp index f024fef..5b4e909 100644 --- a/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp @@ -19,8 +19,8 @@ NeonFloorFloatWorkload::NeonFloorFloatWorkload(const FloorQueueDescriptor& descr { m_Data.ValidateInputsOutputs("NeonFloorFloatWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input, &output); diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp index 7395270..56e5552 100644 --- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp +++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp @@ -51,8 +51,8 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue { m_Data.ValidateInputsOutputs("NeonFullyConnectedWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); m_WeightsTensor = std::make_unique(); BuildArmComputeTensor(*m_WeightsTensor, m_Data.m_Weight->GetTensorInfo()); diff --git a/src/backends/neon/workloads/NeonGreaterWorkload.cpp b/src/backends/neon/workloads/NeonGreaterWorkload.cpp index ece52d6..6239626 100644 --- a/src/backends/neon/workloads/NeonGreaterWorkload.cpp +++ b/src/backends/neon/workloads/NeonGreaterWorkload.cpp @@ -29,9 +29,9 @@ NeonGreaterWorkload::NeonGreaterWorkload(const GreaterQueueDescriptor& descri { m_Data.ValidateInputsOutputs("NeonGreaterWorkload", 2, 1); - arm_compute::ITensor& input0 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input0 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); m_GreaterLayer.configure(&input0, &input1, &output); } diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp index efc21ed..e6249b3 100644 --- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp @@ -33,8 +33,8 @@ NeonL2NormalizationFloatWorkload::NeonL2NormalizationFloatWorkload(const L2Norma { m_Data.ValidateInputsOutputs("NeonL2NormalizationFloatWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp index 1ab269f..c7f5f09 100644 --- a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp @@ -97,13 +97,13 @@ NeonLstmFloatWorkload::NeonLstmFloatWorkload(const LstmQueueDescriptor &descript lstm_param.set_peephole_params(m_CellToForgetWeightsTensor.get(), m_CellToOutputWeightsTensor.get()); } - const arm_compute::ITensor& input = static_cast(m_Data.m_Inputs[0])->GetTensor(); - const arm_compute::ITensor& output_state_in = static_cast(m_Data.m_Inputs[1])->GetTensor(); - const arm_compute::ITensor& cell_state_in = static_cast(m_Data.m_Inputs[2])->GetTensor(); + const arm_compute::ITensor& input = static_cast(m_Data.m_Inputs[0])->GetTensor(); + const arm_compute::ITensor& output_state_in = static_cast(m_Data.m_Inputs[1])->GetTensor(); + const arm_compute::ITensor& cell_state_in = static_cast(m_Data.m_Inputs[2])->GetTensor(); - arm_compute::ITensor& output_state_out = static_cast(m_Data.m_Outputs[1])->GetTensor(); - arm_compute::ITensor& cell_state_out = static_cast(m_Data.m_Outputs[2])->GetTensor(); - arm_compute::ITensor& output = static_cast(m_Data.m_Outputs[3])->GetTensor(); + arm_compute::ITensor& output_state_out = static_cast(m_Data.m_Outputs[1])->GetTensor(); + arm_compute::ITensor& cell_state_out = static_cast(m_Data.m_Outputs[2])->GetTensor(); + arm_compute::ITensor& output = static_cast(m_Data.m_Outputs[3])->GetTensor(); // Get the batch_size and the num_units from the cellStateIn dimensions const TensorInfo& inputTensorInfo = info.m_InputTensorInfos[2]; diff --git a/src/backends/neon/workloads/NeonMaximumWorkload.cpp b/src/backends/neon/workloads/NeonMaximumWorkload.cpp index 9994af9..c433d81 100644 --- a/src/backends/neon/workloads/NeonMaximumWorkload.cpp +++ b/src/backends/neon/workloads/NeonMaximumWorkload.cpp @@ -29,9 +29,9 @@ NeonMaximumWorkload::NeonMaximumWorkload(const MaximumQueueDescriptor& descripto { m_Data.ValidateInputsOutputs("NeonMaximumWorkload", 2, 1); - arm_compute::ITensor& input0 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input0 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); m_MaxLayer.configure(&input0, &input1, &output); } diff --git a/src/backends/neon/workloads/NeonMeanWorkload.cpp b/src/backends/neon/workloads/NeonMeanWorkload.cpp index d736e42..bb0870d 100644 --- a/src/backends/neon/workloads/NeonMeanWorkload.cpp +++ b/src/backends/neon/workloads/NeonMeanWorkload.cpp @@ -34,8 +34,8 @@ NeonMeanWorkload::NeonMeanWorkload(const MeanQueueDescriptor& descriptor, const { m_Data.ValidateInputsOutputs("NeonMeanWorkload", 1, 1); - arm_compute::ITensor& input = static_cast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = static_cast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::Coordinates coords = BuildArmComputeReductionCoordinates(input.info()->num_dimensions(), info.m_InputTensorInfos[0].GetNumDimensions(), diff --git a/src/backends/neon/workloads/NeonMinimumWorkload.cpp b/src/backends/neon/workloads/NeonMinimumWorkload.cpp index 9b97bf5..2867a80 100644 --- a/src/backends/neon/workloads/NeonMinimumWorkload.cpp +++ b/src/backends/neon/workloads/NeonMinimumWorkload.cpp @@ -29,9 +29,9 @@ NeonMinimumWorkload::NeonMinimumWorkload(const MinimumQueueDescriptor& descripto { m_Data.ValidateInputsOutputs("NeonMinimumWorkload", 2, 1); - arm_compute::ITensor& input0 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input0 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); m_MinLayer.configure(&input0, &input1, &output); } diff --git a/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp b/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp index 6398b65..66fbedf 100644 --- a/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp +++ b/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp @@ -37,9 +37,9 @@ NeonMultiplicationWorkload::NeonMultiplicationWorkload(const MultiplicationQueue { m_Data.ValidateInputsOutputs("NeonMultiplicationWorkload", 2, 1); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input2 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input2 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); // At the time of writing, configure() will fail if a rounding policy other than TO_ZERO is supplied to it, // when providing a scale of 1.0 for F32 tensors, even though the provided rounding policy appears to be diff --git a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp index 92c0396..8cb4ec9 100644 --- a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp @@ -77,8 +77,8 @@ NeonNormalizationFloatWorkload::NeonNormalizationFloatWorkload(const Normalizati throw InvalidArgumentException("Normalization requires input and output tensors to have equal dimensionality."); } - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); output.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonPadWorkload.cpp b/src/backends/neon/workloads/NeonPadWorkload.cpp index 79645f1..32b15e9 100644 --- a/src/backends/neon/workloads/NeonPadWorkload.cpp +++ b/src/backends/neon/workloads/NeonPadWorkload.cpp @@ -21,8 +21,8 @@ NeonPadWorkload::NeonPadWorkload(const PadQueueDescriptor& descriptor, const Wor { m_Data.ValidateInputsOutputs("NeonPadWorkload", 1, 1); - arm_compute::ITensor& input = static_cast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = static_cast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); std::vector> reversed_PadList(descriptor.m_Parameters.m_PadList.size()); diff --git a/src/backends/neon/workloads/NeonPermuteWorkload.cpp b/src/backends/neon/workloads/NeonPermuteWorkload.cpp index d2e62ee..a5ecbcb 100644 --- a/src/backends/neon/workloads/NeonPermuteWorkload.cpp +++ b/src/backends/neon/workloads/NeonPermuteWorkload.cpp @@ -32,8 +32,8 @@ NeonPermuteWorkload::NeonPermuteWorkload(const PermuteQueueDescriptor& descripto m_Data.ValidateInputsOutputs(GetName(), 1, 1); - const arm_compute::ITensor& input = static_cast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); + const arm_compute::ITensor& input = static_cast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = static_cast(m_Data.m_Outputs[0])->GetTensor(); const armnn::PermutationVector& mappings = m_Data.m_Parameters.m_DimMappings; // Run the layer. diff --git a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp index 75bceb1..9934c29 100644 --- a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp +++ b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp @@ -37,8 +37,8 @@ NeonPooling2dWorkload::NeonPooling2dWorkload( { m_Data.ValidateInputsOutputs("NeonPooling2dWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonQuantizeWorkload.cpp b/src/backends/neon/workloads/NeonQuantizeWorkload.cpp index ef24a7f..cb8393b 100644 --- a/src/backends/neon/workloads/NeonQuantizeWorkload.cpp +++ b/src/backends/neon/workloads/NeonQuantizeWorkload.cpp @@ -28,9 +28,9 @@ NeonQuantizeWorkload::NeonQuantizeWorkload(const QuantizeQueueDescriptor& descri const WorkloadInfo& workloadInfo) : BaseWorkload(descriptor, workloadInfo) { - arm_compute::ITensor& input = boost::polymorphic_pointer_downcast( + arm_compute::ITensor& input = boost::polymorphic_pointer_downcast( m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_pointer_downcast( + arm_compute::ITensor& output = boost::polymorphic_pointer_downcast( m_Data.m_Outputs[0])->GetTensor(); m_Layer.reset(new arm_compute::NEQuantizationLayer()); diff --git a/src/backends/neon/workloads/NeonReshapeWorkload.cpp b/src/backends/neon/workloads/NeonReshapeWorkload.cpp index 40fbef6..7f2056c 100644 --- a/src/backends/neon/workloads/NeonReshapeWorkload.cpp +++ b/src/backends/neon/workloads/NeonReshapeWorkload.cpp @@ -20,8 +20,8 @@ NeonReshapeWorkload::NeonReshapeWorkload(const ReshapeQueueDescriptor& descripto { m_Data.ValidateInputsOutputs("NeonReshapeWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input, &output); diff --git a/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp b/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp index 37f97bf..6943d87 100644 --- a/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp +++ b/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp @@ -35,8 +35,8 @@ NeonResizeBilinearWorkload::NeonResizeBilinearWorkload(const ResizeBilinearQueue { m_Data.ValidateInputsOutputs("NeonResizeBilinearWorkload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp index afc6135..19c50db 100644 --- a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp @@ -20,8 +20,8 @@ NeonSoftmaxFloatWorkload::NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor& m_Data.ValidateInputsOutputs("NeonSoftmaxFloatWorkload", 1, 1); // The ArmCompute softmax layer uses 2D input/output tensors, so flatten the first three dimensions. - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]); auto layer = std::make_unique(memoryManager); diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp index 7f295d6..7b2d290 100644 --- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp +++ b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp @@ -20,8 +20,8 @@ NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor& { m_Data.ValidateInputsOutputs("NeonSoftmaxUint8Workload", 1, 1); - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); const auto outputQuantization = output.info()->quantization_info(); diff --git a/src/backends/neon/workloads/NeonSplitterWorkload.cpp b/src/backends/neon/workloads/NeonSplitterWorkload.cpp index bf35939..5b56146 100644 --- a/src/backends/neon/workloads/NeonSplitterWorkload.cpp +++ b/src/backends/neon/workloads/NeonSplitterWorkload.cpp @@ -73,12 +73,12 @@ NeonSplitterWorkload::NeonSplitterWorkload(const SplitterQueueDescriptor& descri return; } - arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); std::vector aclOutputs; for (auto output : m_Data.m_Outputs) { - arm_compute::ITensor& aclOutput = boost::polymorphic_pointer_downcast(output)->GetTensor(); + arm_compute::ITensor& aclOutput = boost::polymorphic_pointer_downcast(output)->GetTensor(); aclOutputs.emplace_back(&aclOutput); } diff --git a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp index 39176aa..f4b4707 100644 --- a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp +++ b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp @@ -34,9 +34,9 @@ NeonSubtractionWorkload::NeonSubtractionWorkload(const SubtractionQueueDescripto { m_Data.ValidateInputsOutputs("NeonSubtractionWorkload", 2, 1); - arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& input2 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input2 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); auto layer = std::make_unique(); layer->configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE); -- 2.7.4