From 0790dcea1056298d63f97dec904c8ade5d21f439 Mon Sep 17 00:00:00 2001 From: Derek Lamberti Date: Mon, 15 Apr 2019 18:37:35 +0100 Subject: [PATCH] IVGCVSW-2957 MergerLayer subtensor optimization now backend agnostic + Update clframework pin + Cl and Neon Merger workloads updated to use MemoryLayout agnostic API + Workloads only use sub-tensor optimization if ALL input tensors are sub-tensors + Refactor LayerSupportCommon code to be a bit more succinct Change-Id: Ib61ad4ccbd767e924dff07e61022e0cda4069828 Signed-off-by: Derek Lamberti --- include/armnn/Tensor.hpp | 6 ++- scripts/get_compute_library.sh | 2 +- src/armnn/LayerSupportCommon.hpp | 49 +++++++------------ src/armnn/Tensor.cpp | 14 ++++++ src/armnn/layers/MergerLayer.cpp | 56 ++++++++++++++++++---- src/backends/cl/ClLayerSupport.cpp | 31 +++++++++--- src/backends/cl/ClWorkloadFactory.cpp | 6 +++ src/backends/cl/workloads/ClMergerWorkload.cpp | 51 +++++++++++++------- src/backends/cl/workloads/ClMergerWorkload.hpp | 3 +- src/backends/neon/NeonLayerSupport.cpp | 38 ++++++++++----- src/backends/neon/NeonWorkloadFactory.cpp | 6 +++ src/backends/neon/workloads/NeonMergerWorkload.cpp | 52 ++++++++++++-------- src/backends/neon/workloads/NeonMergerWorkload.hpp | 7 ++- 13 files changed, 218 insertions(+), 103 deletions(-) diff --git a/include/armnn/Tensor.hpp b/include/armnn/Tensor.hpp index 503c161..160ccca 100644 --- a/include/armnn/Tensor.hpp +++ b/include/armnn/Tensor.hpp @@ -80,7 +80,11 @@ public: int32_t GetQuantizationOffset() const { return m_Quantization.m_Offset; } void SetQuantizationScale(float scale) { m_Quantization.m_Scale = scale; } void SetQuantizationOffset(int32_t offset) { m_Quantization.m_Offset = offset; } - bool IsQuantized() const { return m_DataType == DataType::QuantisedAsymm8; } + bool IsQuantized() const { return m_DataType == DataType::QuantisedAsymm8 || + m_DataType == DataType::QuantisedSymm16; } + + /// Check that the types are the same and, if quantize, that the quantization parameters are the same. + bool IsTypeSpaceMatch(const TensorInfo& other) const; unsigned int GetNumBytes() const; diff --git a/scripts/get_compute_library.sh b/scripts/get_compute_library.sh index f3d1a8c..8a35bd3 100755 --- a/scripts/get_compute_library.sh +++ b/scripts/get_compute_library.sh @@ -10,7 +10,7 @@ CMD=$( basename $0 ) # DEFAULT_CLFRAMEWORKREVISION="branches/arm_compute_19_02" # Release 19.02 # # For pinning to a revision use this: -DEFAULT_CLFRAMEWORKREVISION="a4bba9c594c4022c9f85192bb8fd3593ad1a8d3c" # COMPMID-1995: Fix 32-bit NEDepthwiseConvolution errors. +DEFAULT_CLFRAMEWORKREVISION="9e4824c909b14dbaf7106e9527b0ffa22ef09bdc" usage() { echo "Usage: $CMD (Use the default clframework SHA)" diff --git a/src/armnn/LayerSupportCommon.hpp b/src/armnn/LayerSupportCommon.hpp index 70b5f18..3e2a124 100644 --- a/src/armnn/LayerSupportCommon.hpp +++ b/src/armnn/LayerSupportCommon.hpp @@ -12,6 +12,15 @@ namespace armnn { +template +void SetValueChecked(Optional optionalRef, V&& val) +{ + if (optionalRef) + { + optionalRef.value() = val; + } +} + template bool IsSupportedForDataTypeGeneric(Optional reasonIfUnsupported, @@ -55,80 +64,56 @@ bool FalseFunc(Optional reasonIfUnsupported, Params&&... params) template bool FalseFuncF16(Optional reasonIfUnsupported, Params&&... params) { - if (reasonIfUnsupported) - { - reasonIfUnsupported.value() = "Layer is not supported with float16 data type"; - } + SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type"); return false; } template bool FalseFuncF32(Optional reasonIfUnsupported, Params&&... params) { - if (reasonIfUnsupported) - { - reasonIfUnsupported.value() = "Layer is not supported with float32 data type"; - } + SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type"); return false; } template bool FalseFuncU8(Optional reasonIfUnsupported, Params&&... params) { - if (reasonIfUnsupported) - { - reasonIfUnsupported.value() = "Layer is not supported with 8-bit data type"; - } + SetValueChecked(reasonIfUnsupported, "Layer is not supported with 8-bit data type"); return false; } template bool FalseFuncI32(Optional reasonIfUnsupported, Params&&... params) { - if (reasonIfUnsupported) - { - reasonIfUnsupported.value() = "Layer is not supported with int32 data type"; - } + SetValueChecked(reasonIfUnsupported, "Layer is not supported with int32 data type"); return false; } template bool FalseInputFuncF32(Optional reasonIfUnsupported, Params&&... params) { - if (reasonIfUnsupported) - { - reasonIfUnsupported.value() = "Layer is not supported with float32 data type input"; - } + SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type input"); return false; } template bool FalseInputFuncF16(Optional reasonIfUnsupported, Params&&... params) { - if (reasonIfUnsupported) - { - reasonIfUnsupported.value() = "Layer is not supported with float16 data type input"; - } + SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type input"); return false; } template bool FalseOutputFuncF32(Optional reasonIfUnsupported, Params&&... params) { - if (reasonIfUnsupported) - { - reasonIfUnsupported.value() = "Layer is not supported with float32 data type output"; - } + SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type output"); return false; } template bool FalseOutputFuncF16(Optional reasonIfUnsupported, Params&&... params) { - if (reasonIfUnsupported) - { - reasonIfUnsupported.value() = "Layer is not supported with float16 data type output"; - } + SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type output"); return false; } diff --git a/src/armnn/Tensor.cpp b/src/armnn/Tensor.cpp index da19e5b..614abc7 100644 --- a/src/armnn/Tensor.cpp +++ b/src/armnn/Tensor.cpp @@ -188,6 +188,20 @@ unsigned int TensorInfo::GetNumBytes() const return GetDataTypeSize(m_DataType) * GetNumElements(); } +bool TensorInfo::IsTypeSpaceMatch(const TensorInfo& other) const +{ + bool match = true; + + match &= m_DataType == other.m_DataType; + + if (IsQuantized()) + { + match &= GetQuantizationScale() == other.GetQuantizationScale() && + GetQuantizationOffset() == other.GetQuantizationOffset(); + } + return match; +} + // --- // --- BaseTensor // --- diff --git a/src/armnn/layers/MergerLayer.cpp b/src/armnn/layers/MergerLayer.cpp index f87f349..c674f64 100644 --- a/src/armnn/layers/MergerLayer.cpp +++ b/src/armnn/layers/MergerLayer.cpp @@ -36,14 +36,12 @@ std::unique_ptr MergerLayer::CreateWorkload(const Graph& graph, const void MergerLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& factory) { - //If sub tensors are supported than the merger + //If sub tensors are supported then the merger //just needs to make sure that the outputs of the prev layer //are made subtensors of the output of the merger layer. m_OutputHandlers[0].CreateTensorHandles(factory); - unsigned int innerAxis = m_Param.GetNumDimensions() - m_Param.GetConcatAxis(); - - if (factory.SupportsSubTensors() && innerAxis != 1) + if (factory.SupportsSubTensors()) { std::queue m_MergerLayers; @@ -52,23 +50,65 @@ void MergerLayer::CreateTensorHandles(Graph& graph, const IWorkloadFactory& fact { MergerLayer* currentLayer = m_MergerLayers.front(); ITensorHandle* parentTensor = currentLayer->GetOutputHandler(0).GetData(); - + const TensorInfo& parentInfo = currentLayer->GetOutputHandler(0).GetTensorInfo(); m_MergerLayers.pop(); const unsigned int numInputSlots = currentLayer->GetNumInputSlots(); + + // First go through all the input slots and verify that we can sub-tensor all the inputs. + std::vector> subTensors(0); + subTensors.reserve(numInputSlots); for (unsigned int i = 0; i < numInputSlots; ++i) { OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot(); + const TensorInfo& info = slot->GetTensorInfo(); + + auto CreateSubTensor = [&]() + { + // Make sure quantization parameters are in the same space + if (parentInfo.IsTypeSpaceMatch(info)) + { + return factory.CreateSubTensorHandle(*parentTensor, + info.GetShape(), + currentLayer->m_Param.GetViewOrigin(i)); + } + return std::unique_ptr(); + }; + + auto subTensor = CreateSubTensor(); + if (!subTensor) + { + break; //Failed to create a valid sub-tensor, so stop trying with the rest of the inputs. + } + else + { + subTensors.push_back(std::move(subTensor)); // store the valid sub-tensor. + } + } + + // Ensure that ALL inputs can be substituted with valid sub-tensors + if (subTensors.size() < numInputSlots) + { + continue; // Don't optimize this Merge layer with sub-tensors + } + + // Substitute input tensors with sub-tensors by replacing the output tensors on the connected layers. + unsigned int i=0; + for (auto& subTensor : subTensors) + { + OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot(); OutputHandler& outputHandler = slot->GetOutputHandler(); - outputHandler.SetData(factory.CreateSubTensorHandle(*parentTensor, - outputHandler.GetTensorInfo().GetShape(), - currentLayer->m_Param.GetViewOrigin(i))); + + BOOST_ASSERT_MSG(subTensor, "MergerLayer: Expected a valid sub-tensor for substitution."); + outputHandler.SetData(std::move(subTensor)); Layer& inputLayer = slot->GetOwningLayer(); if (inputLayer.GetType() == LayerType::Merger) { + // Continue with the substitution if the connected inputs are also merger layers m_MergerLayers.push(boost::polymorphic_downcast(&inputLayer)); } + ++i; } } } diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp index cfc0f11..a5c5f2b 100644 --- a/src/backends/cl/ClLayerSupport.cpp +++ b/src/backends/cl/ClLayerSupport.cpp @@ -416,7 +416,14 @@ bool ClLayerSupport::IsMergerSupported(const std::vector inpu const OriginsDescriptor& descriptor, Optional reasonIfUnsupported) const { - if(descriptor.GetNumDimensions() - descriptor.GetConcatAxis() == 1) + if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis()) + { + SetValueChecked(reasonIfUnsupported, "Cl Merger: Concat axis > Number of dimensions."); + return false; + } + + unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1; + if(concatInnerAxis < 3) // Width, height, or channels { FORWARD_WORKLOAD_VALIDATE_FUNC(ClMergerWorkloadValidate, reasonIfUnsupported, @@ -424,12 +431,24 @@ bool ClLayerSupport::IsMergerSupported(const std::vector inpu output, descriptor); } - else + else if (concatInnerAxis == 3) + { + // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use + // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work. + for (auto& input : inputs) + { + if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space + { + SetValueChecked(reasonIfUnsupported, "Cl Merger: Types and quantization parameters must match."); + return false; + } + } + return true; // Sub-tensors support concat along batch + } + else // > 4 dimensions not supported. { - return IsSupportedForDataTypeCl(reasonIfUnsupported, - inputs[0]->GetDataType(), - &TrueFunc<>, - &TrueFunc<>); + SetValueChecked(reasonIfUnsupported, "Cl Merger: Maximum of 4 dimensions supported."); + return false; } } diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp index d41a7e5..e4097a1 100644 --- a/src/backends/cl/ClWorkloadFactory.cpp +++ b/src/backends/cl/ClWorkloadFactory.cpp @@ -113,6 +113,12 @@ std::unique_ptr ClWorkloadFactory::CreateSubTensorHandle(ITensorH coords.set(i, boost::numeric_cast(subTensorOrigin[revertedIndex])); } + const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape()); + if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape)) + { + return nullptr; + } + return std::make_unique( boost::polymorphic_downcast(&parent), shape, coords); } diff --git a/src/backends/cl/workloads/ClMergerWorkload.cpp b/src/backends/cl/workloads/ClMergerWorkload.cpp index e06d8c5..610acb9 100644 --- a/src/backends/cl/workloads/ClMergerWorkload.cpp +++ b/src/backends/cl/workloads/ClMergerWorkload.cpp @@ -9,16 +9,25 @@ #include #include +#include + #include namespace armnn { using namespace armcomputetensorutils; +namespace +{ +size_t CalcAxis(const MergerDescriptor& desc) +{ + return (desc.GetNumDimensions() - desc.GetConcatAxis()) - 1; +} +} //namespace + arm_compute::Status ClMergerWorkloadValidate(const std::vector& inputs, const TensorInfo& output, const MergerDescriptor& descriptor) - { std::vector aclInputs; for (const TensorInfo* input : inputs) @@ -27,59 +36,65 @@ arm_compute::Status ClMergerWorkloadValidate(const std::vector aclInputPtrs; for (arm_compute::ITensorInfo& input : aclInputs) { aclInputPtrs.emplace_back(&input); } + size_t aclAxis = CalcAxis(descriptor); return arm_compute::CLConcatenateLayer::validate(aclInputPtrs, &aclOutputInfo, aclAxis); - } ClMergerWorkload::ClMergerWorkload(const MergerQueueDescriptor& descriptor, const WorkloadInfo& info) : BaseWorkload(descriptor, info) { - m_Execute = true; + bool allInputsAreSubtensors = true; - unsigned int innerAxisOrder = descriptor.m_Parameters.GetNumDimensions() - descriptor.m_Parameters.GetConcatAxis(); + // Check that all inputs are sub-tensors + for (auto input : descriptor.m_Inputs) + { + if (!input->GetParent()) + { + // Non sub-tensor input found so we need to execute the merger function + allInputsAreSubtensors = false; + break; + } + } - if (innerAxisOrder != 1) + if (allInputsAreSubtensors) { - m_Execute = false; + // Can skip configuring the merger function since it's not executed return; } std::vector aclInputs; - arm_compute::DataLayout aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW); for (auto input : m_Data.m_Inputs) { arm_compute::ICLTensor& aclInput = boost::polymorphic_pointer_downcast(input)->GetTensor(); - aclInput.info()->set_data_layout(aclDataLayout); aclInputs.emplace_back(&aclInput); } arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast( m_Data.m_Outputs[0])->GetTensor(); - output.info()->set_data_layout(aclDataLayout); - - arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH; - m_Layer.configure(aclInputs, &output, aclAxis); + // Create the layer function + m_Layer.reset(new arm_compute::CLConcatenateLayer()); - m_Layer.prepare(); + // Configure input and output tensors + size_t aclAxis = CalcAxis(descriptor.m_Parameters); + m_Layer->configure(aclInputs, &output, aclAxis); + // Prepare + m_Layer->prepare(); } void ClMergerWorkload::Execute() const { - if (m_Execute) + if (m_Layer) { ARMNN_SCOPED_PROFILING_EVENT_CL("ClMergerWorkload_Execute"); - m_Layer.run(); + m_Layer->run(); } - } } //namespace armnn \ No newline at end of file diff --git a/src/backends/cl/workloads/ClMergerWorkload.hpp b/src/backends/cl/workloads/ClMergerWorkload.hpp index 8189a1b..1c2f823 100644 --- a/src/backends/cl/workloads/ClMergerWorkload.hpp +++ b/src/backends/cl/workloads/ClMergerWorkload.hpp @@ -24,8 +24,7 @@ public: void Execute() const override; private: - mutable arm_compute::CLConcatenateLayer m_Layer; - bool m_Execute; + mutable std::unique_ptr m_Layer; }; } //namespace armnn diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index 46a7e6f..898660c 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -52,10 +52,7 @@ bool IsNeonBackendSupported(Optional reasonIfUnsupported) #if defined(ARMCOMPUTENEON_ENABLED) return true; #else - if (reasonIfUnsupported) - { - reasonIfUnsupported.value() = "The armnn library has been built without NEON support"; - } + SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support"); return false; #endif } @@ -304,7 +301,14 @@ bool NeonLayerSupport::IsMergerSupported(const std::vector in const OriginsDescriptor& descriptor, Optional reasonIfUnsupported) const { - if(descriptor.GetNumDimensions() - descriptor.GetConcatAxis() == 1) + if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis()) + { + SetValueChecked(reasonIfUnsupported, "Neon Merger: Concat axis > Number of dimensions."); + return false; + } + + unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1; + if(concatInnerAxis < 3) // Width, height, or channels { FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMergerWorkloadValidate, reasonIfUnsupported, @@ -312,13 +316,23 @@ bool NeonLayerSupport::IsMergerSupported(const std::vector in output, descriptor); } - else - { - return IsSupportedForDataTypeNeon(reasonIfUnsupported, - inputs[0]->GetDataType(), - &TrueFunc<>, - &TrueFunc<>); - } + else if (concatInnerAxis == 3) + { + for (auto& input : inputs) + { + if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space + { + SetValueChecked(reasonIfUnsupported, "Neon Merger: Types and quantization parameters must match."); + return false; + } + } + return true; // Sub-tensors support concat along batch + } + else // > 4 dimensions not supported. + { + SetValueChecked(reasonIfUnsupported, "Neon Merger: Maximum of 4 dimensions supported."); + return false; + } } bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0, diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index 101e59d..8db5f9a 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -61,6 +61,12 @@ std::unique_ptr NeonWorkloadFactory::CreateSubTensorHandle(ITenso coords.set(i, boost::numeric_cast(subTensorOrigin[revertedIndex])); } + const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape()); + if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape)) + { + return nullptr; + } + return std::make_unique( boost::polymorphic_downcast(&parent), shape, coords); } diff --git a/src/backends/neon/workloads/NeonMergerWorkload.cpp b/src/backends/neon/workloads/NeonMergerWorkload.cpp index be096b4..64d4d93 100644 --- a/src/backends/neon/workloads/NeonMergerWorkload.cpp +++ b/src/backends/neon/workloads/NeonMergerWorkload.cpp @@ -11,12 +11,20 @@ #include #include -#include + namespace armnn { using namespace armcomputetensorutils; +namespace +{ +size_t CalcAxis(const armnn::MergerDescriptor& desc) +{ + return (desc.GetNumDimensions() - desc.GetConcatAxis()) - 1; +} +} //namespace + arm_compute::Status NeonMergerWorkloadValidate(const std::vector& inputs, const TensorInfo& output, const MergerDescriptor& descriptor) @@ -25,60 +33,66 @@ arm_compute::Status NeonMergerWorkloadValidate(const std::vector aclInputs; for (const TensorInfo* input : inputs) { - arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(*input, armnn::DataLayout::NCHW); - aclInputs.emplace_back(aclInputInfo); + arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(*input, armnn::DataLayout::NCHW); + aclInputs.emplace_back(aclInputInfo); } const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output); - arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH; - std::vector aclInputPtrs; for (arm_compute::ITensorInfo& input : aclInputs) { aclInputPtrs.emplace_back(&input); } + size_t aclAxis = CalcAxis(descriptor); return arm_compute::NEConcatenateLayer::validate(aclInputPtrs, &aclOutputInfo, aclAxis); - } NeonMergerWorkload::NeonMergerWorkload( const MergerQueueDescriptor& descriptor, const WorkloadInfo& info) : BaseWorkload(descriptor, info) { - m_Execute = true; + bool allInputsAreSubtensors = true; - unsigned int innerAxisOrder = descriptor.m_Parameters.GetNumDimensions() - descriptor.m_Parameters.GetConcatAxis(); + // Check that all inputs are sub-tensors + for (auto input : descriptor.m_Inputs) + { + if (!input->GetParent()) + { + // Non sub-tensor input found so we need to execute the merger function + allInputsAreSubtensors = false; + break; + } + } - if (innerAxisOrder != 1) + if (allInputsAreSubtensors) { - m_Execute = false; + // Can skip configuring the merger function since it's not executed return; } std::vector aclInputs; - arm_compute::DataLayout aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW); for (auto input : m_Data.m_Inputs) { arm_compute::ITensor& aclInput = boost::polymorphic_pointer_downcast(input)->GetTensor(); - aclInput.info()->set_data_layout(aclDataLayout); aclInputs.emplace_back(&aclInput); } arm_compute::ITensor& output = boost::polymorphic_pointer_downcast( - m_Data.m_Outputs[0])->GetTensor(); - output.info()->set_data_layout(aclDataLayout); + m_Data.m_Outputs[0])->GetTensor(); - arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH; + // Create the layer function + m_Layer.reset(new arm_compute::NEConcatenateLayer()); - auto layer = std::make_unique(); - layer->configure(aclInputs, &output, aclAxis); - m_Layer.reset(layer.release()); + // Configure input and output tensors + size_t aclAxis = CalcAxis(descriptor.m_Parameters); + m_Layer->configure(aclInputs, &output, aclAxis); + // Prepare m_Layer->prepare(); } void NeonMergerWorkload::Execute() const { - if (m_Execute) + if (m_Layer) { ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonMergerWorkload_Execute"); m_Layer->run(); diff --git a/src/backends/neon/workloads/NeonMergerWorkload.hpp b/src/backends/neon/workloads/NeonMergerWorkload.hpp index 3432c62..1dd9309 100644 --- a/src/backends/neon/workloads/NeonMergerWorkload.hpp +++ b/src/backends/neon/workloads/NeonMergerWorkload.hpp @@ -9,7 +9,8 @@ #include #include -# +#include + #include namespace armnn @@ -27,9 +28,7 @@ public: void Execute() const override; private: - std::unique_ptr m_Layer; - bool m_Execute; - + std::unique_ptr m_Layer; }; } //namespace armnn -- 2.7.4