IVGCVSW-4483 Removes boost::polymorphic_pointer_downcast
authorJan Eilers <jan.eilers@arm.com>
Fri, 10 Apr 2020 12:00:44 +0000 (13:00 +0100)
committerJan Eilers <jan.eilers@arm.com>
Tue, 14 Apr 2020 08:24:26 +0000 (09:24 +0100)
 * replace boost::polymorphic_pointer_downcast by PolymorphicPointerDowncast
 * replaced/removed includes

Signed-off-by: Jan Eilers <jan.eilers@arm.com>
Change-Id: I0ef934a3804cf05e4c38dec6c4ec49c76111a302

23 files changed:
src/backends/cl/ClBackend.cpp
src/backends/cl/ClContextControl.cpp
src/backends/cl/ClTensorHandle.hpp
src/backends/cl/test/ClWorkloadFactoryHelper.hpp
src/backends/cl/workloads/ClConcatWorkload.cpp
src/backends/cl/workloads/ClDepthToSpaceWorkload.cpp
src/backends/cl/workloads/ClDequantizeWorkload.cpp
src/backends/cl/workloads/ClSpaceToBatchNdWorkload.cpp
src/backends/cl/workloads/ClSplitterWorkload.cpp
src/backends/cl/workloads/ClStackWorkload.cpp
src/backends/neon/NeonBackend.cpp
src/backends/neon/NeonTensorHandle.hpp
src/backends/neon/NeonWorkloadFactory.cpp
src/backends/neon/test/NeonWorkloadFactoryHelper.hpp
src/backends/neon/workloads/NeonBatchToSpaceNdWorkload.cpp
src/backends/neon/workloads/NeonComparisonWorkload.cpp
src/backends/neon/workloads/NeonConcatWorkload.cpp
src/backends/neon/workloads/NeonDepthToSpaceWorkload.cpp
src/backends/neon/workloads/NeonQuantizeWorkload.cpp
src/backends/neon/workloads/NeonSpaceToBatchNdWorkload.cpp
src/backends/neon/workloads/NeonSplitterWorkload.cpp
src/backends/neon/workloads/NeonStackWorkload.cpp
src/backends/reference/RefBackend.cpp

index f662754..0a898ec 100644 (file)
 #include <armnn/backends/IBackendContext.hpp>
 #include <armnn/backends/IMemoryManager.hpp>
 
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
 #include <Optimizer.hpp>
 
 #include <arm_compute/runtime/CL/CLBufferAllocator.h>
 
-#include <boost/polymorphic_pointer_cast.hpp>
-
 namespace armnn
 {
 
@@ -41,7 +41,7 @@ IBackendInternal::IWorkloadFactoryPtr ClBackend::CreateWorkloadFactory(
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const
 {
     return std::make_unique<ClWorkloadFactory>(
-        boost::polymorphic_pointer_downcast<ClMemoryManager>(memoryManager));
+        PolymorphicPointerDowncast<ClMemoryManager>(memoryManager));
 }
 
 IBackendInternal::IWorkloadFactoryPtr ClBackend::CreateWorkloadFactory(
@@ -52,7 +52,7 @@ IBackendInternal::IWorkloadFactoryPtr ClBackend::CreateWorkloadFactory(
     registry.RegisterMemoryManager(memoryManager);
 
     return std::make_unique<ClWorkloadFactory>(
-            boost::polymorphic_pointer_downcast<ClMemoryManager>(memoryManager));
+            PolymorphicPointerDowncast<ClMemoryManager>(memoryManager));
 }
 
 std::vector<ITensorHandleFactory::FactoryId> ClBackend::GetHandleFactoryPreferences() const
index dbcccce..40357d5 100644 (file)
@@ -16,7 +16,6 @@
 #include <arm_compute/runtime/CL/CLScheduler.h>
 
 #include <boost/format.hpp>
-#include <boost/polymorphic_cast.hpp>
 
 namespace cl
 {
index 1830d18..0481307 100644 (file)
@@ -9,6 +9,8 @@
 
 #include <Half.hpp>
 
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
 #include <arm_compute/runtime/CL/CLTensor.h>
 #include <arm_compute/runtime/CL/CLSubTensor.h>
 #include <arm_compute/runtime/IMemoryGroup.h>
@@ -16,8 +18,6 @@
 #include <arm_compute/core/TensorShape.h>
 #include <arm_compute/core/Coordinates.h>
 
-#include <boost/polymorphic_pointer_cast.hpp>
-
 namespace armnn
 {
 
@@ -71,7 +71,7 @@ public:
 
     virtual void SetMemoryGroup(const std::shared_ptr<arm_compute::IMemoryGroup>& memoryGroup) override
     {
-        m_MemoryGroup = boost::polymorphic_pointer_downcast<arm_compute::MemoryGroup>(memoryGroup);
+        m_MemoryGroup = PolymorphicPointerDowncast<arm_compute::MemoryGroup>(memoryGroup);
     }
 
     TensorShape GetStrides() const override
index 1dfba75..6ea2f11 100644 (file)
@@ -7,13 +7,12 @@
 
 #include <armnn/backends/IBackendInternal.hpp>
 #include <armnn/backends/IMemoryManager.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/test/WorkloadFactoryHelper.hpp>
 
 #include <cl/ClBackend.hpp>
 #include <cl/ClWorkloadFactory.hpp>
 
-#include <boost/polymorphic_pointer_cast.hpp>
-
 namespace
 {
 
@@ -29,7 +28,7 @@ struct WorkloadFactoryHelper<armnn::ClWorkloadFactory>
     static armnn::ClWorkloadFactory GetFactory(
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
     {
-        return armnn::ClWorkloadFactory(boost::polymorphic_pointer_downcast<armnn::ClMemoryManager>(memoryManager));
+        return armnn::ClWorkloadFactory(armnn::PolymorphicPointerDowncast<armnn::ClMemoryManager>(memoryManager));
     }
 };
 
index 5370466..e0aebd3 100644 (file)
@@ -5,6 +5,7 @@
 #include "ClConcatWorkload.hpp"
 #include "ClWorkloadUtils.hpp"
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <cl/ClTensorHandle.hpp>
 #include <cl/ClLayerSupport.hpp>
@@ -12,8 +13,6 @@
 #include <arm_compute/core/Types.h>
 #include <arm_compute/runtime/CL/functions/CLConcatenateLayer.h>
 
-#include <boost/polymorphic_pointer_cast.hpp>
-
 namespace armnn
 {
 using namespace armcomputetensorutils;
@@ -72,11 +71,12 @@ ClConcatWorkload::ClConcatWorkload(const ConcatQueueDescriptor& descriptor, cons
     std::vector<arm_compute::ICLTensor *> aclInputs;
     for (auto input : m_Data.m_Inputs)
     {
-        arm_compute::ICLTensor& aclInput  = boost::polymorphic_pointer_downcast<IClTensorHandle>(input)->GetTensor();
+        arm_compute::ICLTensor& aclInput  = armnn::PolymorphicPointerDowncast<IClTensorHandle>(input)->GetTensor();
         aclInputs.emplace_back(&aclInput);
     }
-    arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>(
-                                                                         m_Data.m_Outputs[0])->GetTensor();
+
+    arm_compute::ICLTensor& output =
+            armnn::PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
 
     // Create the layer function
     auto layer = std::make_unique<arm_compute::CLConcatenateLayer>();
index 800a984..04885b1 100644 (file)
@@ -8,11 +8,11 @@
 #include "ClWorkloadUtils.hpp"
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 
 #include <cl/ClTensorHandle.hpp>
 
 #include <boost/numeric/conversion/cast.hpp>
-#include <boost/polymorphic_pointer_cast.hpp>
 
 namespace armnn
 {
@@ -45,13 +45,13 @@ ClDepthToSpaceWorkload::ClDepthToSpaceWorkload(const DepthToSpaceQueueDescriptor
     arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
 
     arm_compute::ICLTensor& input =
-        boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
+        PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
     input.info()->set_data_layout(aclDataLayout);
 
     int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
 
     arm_compute::ICLTensor& output =
-        boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
+        PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
     output.info()->set_data_layout(aclDataLayout);
 
     m_Layer.configure(&input, &output, blockSize);
index eca795d..eb63900 100644 (file)
@@ -7,6 +7,7 @@
 #include "ClWorkloadUtils.hpp"
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 
 #include <arm_compute/core/Types.h>
@@ -14,8 +15,6 @@
 #include <cl/ClLayerSupport.hpp>
 #include <cl/ClTensorHandle.hpp>
 
-#include <boost/polymorphic_pointer_cast.hpp>
-
 namespace armnn
 {
 using namespace armcomputetensorutils;
@@ -34,10 +33,10 @@ ClDequantizeWorkload::ClDequantizeWorkload(const DequantizeQueueDescriptor& desc
 {
     m_Data.ValidateInputsOutputs("ClDequantizeWorkload", 1, 1);
 
-    arm_compute::ICLTensor& input = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+    arm_compute::ICLTensor& input = armnn::PolymorphicPointerDowncast<IClTensorHandle>(
             m_Data.m_Inputs[0])->GetTensor();
 
-    arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+    arm_compute::ICLTensor& output = armnn::PolymorphicPointerDowncast<IClTensorHandle>(
             m_Data.m_Outputs[0])->GetTensor();
 
     m_Layer.reset(new arm_compute::CLDequantizationLayer());
index 64da92c..b87658b 100644 (file)
@@ -9,13 +9,12 @@
 
 #include <aclCommon/ArmComputeUtils.hpp>
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <cl/ClLayerSupport.hpp>
 #include <cl/ClTensorHandle.hpp>
 #include <cl/ClLayerSupport.hpp>
 
-#include <boost/polymorphic_pointer_cast.hpp>
-
 namespace armnn
 {
 using namespace armcomputetensorutils;
@@ -51,9 +50,9 @@ ClSpaceToBatchNdWorkload::ClSpaceToBatchNdWorkload(
     m_Data.ValidateInputsOutputs("ClSpaceToBatchNdWorkload", 1, 1);
 
     arm_compute::ICLTensor& input  =
-        boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
+        armnn::PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ICLTensor& output =
-        boost::polymorphic_pointer_downcast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
+        armnn::PolymorphicPointerDowncast<IClTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
 
     // ArmNN blockShape is [H, W] Cl asks for W, H
     int32_t blockHeight = boost::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]);
index 296e0a3..045fbb7 100644 (file)
@@ -10,6 +10,7 @@
 #include <aclCommon/ArmComputeTensorUtils.hpp>
 #include <aclCommon/ArmComputeUtils.hpp>
 #include <arm_compute/runtime/CL/functions/CLSplit.h>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <cl/ClTensorHandle.hpp>
 
@@ -74,13 +75,13 @@ ClSplitterWorkload::ClSplitterWorkload(const SplitterQueueDescriptor& descriptor
         return;
     }
 
-    arm_compute::ICLTensor& input = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+    arm_compute::ICLTensor& input = armnn::PolymorphicPointerDowncast<IClTensorHandle>(
             m_Data.m_Inputs[0])->GetTensor();
 
     std::vector<arm_compute::ICLTensor *> aclOutputs;
     for (auto output : m_Data.m_Outputs)
     {
-        arm_compute::ICLTensor& aclOutput  = boost::polymorphic_pointer_downcast<IClTensorHandle>(output)->GetTensor();
+        arm_compute::ICLTensor& aclOutput  = armnn::PolymorphicPointerDowncast<IClTensorHandle>(output)->GetTensor();
         aclOutputs.emplace_back(&aclOutput);
     }
 
index 3ba698e..e434f98 100644 (file)
@@ -5,6 +5,7 @@
 #include "ClStackWorkload.hpp"
 #include "ClWorkloadUtils.hpp"
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <cl/ClTensorHandle.hpp>
 #include <cl/ClLayerSupport.hpp>
@@ -12,7 +13,6 @@
 #include <arm_compute/core/Types.h>
 
 #include <boost/numeric/conversion/cast.hpp>
-#include <boost/polymorphic_pointer_cast.hpp>
 
 namespace armnn
 {
@@ -51,10 +51,10 @@ ClStackWorkload::ClStackWorkload(const StackQueueDescriptor& descriptor, const W
     std::vector<arm_compute::ICLTensor*> aclInputs;
     for (auto input : m_Data.m_Inputs)
     {
-        arm_compute::ICLTensor& aclInput = boost::polymorphic_pointer_downcast<IClTensorHandle>(input)->GetTensor();
+        arm_compute::ICLTensor& aclInput = armnn::PolymorphicPointerDowncast<IClTensorHandle>(input)->GetTensor();
         aclInputs.emplace_back(&aclInput);
     }
-    arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+    arm_compute::ICLTensor& output = armnn::PolymorphicPointerDowncast<IClTensorHandle>(
                                                                          m_Data.m_Outputs[0])->GetTensor();
 
     m_Layer.reset(new arm_compute::CLStackLayer());
index 4201ba8..841ed27 100644 (file)
 #include <armnn/backends/IBackendContext.hpp>
 #include <armnn/backends/IMemoryManager.hpp>
 
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
 #include <Optimizer.hpp>
 
 #include <arm_compute/runtime/Allocator.h>
 
 #include <boost/cast.hpp>
-#include <boost/polymorphic_pointer_cast.hpp>
 
 namespace armnn
 {
@@ -42,7 +43,7 @@ IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory(
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const
 {
     return std::make_unique<NeonWorkloadFactory>(
-        boost::polymorphic_pointer_downcast<NeonMemoryManager>(memoryManager));
+        PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager));
 }
 
 IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory(
@@ -53,7 +54,7 @@ IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory(
 
     tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager);
     return std::make_unique<NeonWorkloadFactory>(
-        boost::polymorphic_pointer_downcast<NeonMemoryManager>(memoryManager));
+        PolymorphicPointerDowncast<NeonMemoryManager>(memoryManager));
 }
 
 IBackendInternal::IBackendContextPtr NeonBackend::CreateBackendContext(const IRuntime::CreationOptions&) const
index f251034..4cc610c 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <aclCommon/ArmComputeTensorHandle.hpp>
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 
 #include <arm_compute/runtime/MemoryGroup.h>
 #include <arm_compute/runtime/IMemoryGroup.h>
@@ -19,8 +20,6 @@
 #include <arm_compute/core/TensorShape.h>
 #include <arm_compute/core/Coordinates.h>
 
-#include <boost/polymorphic_pointer_cast.hpp>
-
 namespace armnn
 {
 
@@ -77,7 +76,7 @@ public:
 
     virtual void SetMemoryGroup(const std::shared_ptr<arm_compute::IMemoryGroup>& memoryGroup) override
     {
-        m_MemoryGroup = boost::polymorphic_pointer_downcast<arm_compute::MemoryGroup>(memoryGroup);
+        m_MemoryGroup = PolymorphicPointerDowncast<arm_compute::MemoryGroup>(memoryGroup);
     }
 
     virtual const void* Map(bool /* blocking = true */) const override
index b3104b9..b7609ee 100644 (file)
@@ -21,8 +21,6 @@
 #include <neon/workloads/NeonWorkloadUtils.hpp>
 #include <neon/workloads/NeonWorkloads.hpp>
 
-#include <boost/polymorphic_cast.hpp>
-
 namespace armnn
 {
 
index 708d231..8d92ddf 100644 (file)
@@ -7,13 +7,12 @@
 
 #include <armnn/backends/IBackendInternal.hpp>
 #include <armnn/backends/IMemoryManager.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/test/WorkloadFactoryHelper.hpp>
 
 #include <neon/NeonBackend.hpp>
 #include <neon/NeonWorkloadFactory.hpp>
 
-#include <boost/polymorphic_pointer_cast.hpp>
-
 namespace
 {
 
@@ -30,7 +29,7 @@ struct WorkloadFactoryHelper<armnn::NeonWorkloadFactory>
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
     {
         return armnn::NeonWorkloadFactory(
-            boost::polymorphic_pointer_downcast<armnn::NeonMemoryManager>(memoryManager));
+            armnn::PolymorphicPointerDowncast<armnn::NeonMemoryManager>(memoryManager));
     }
 };
 
index a6e7aa4..d2f5387 100644 (file)
@@ -6,6 +6,8 @@
 #include "NeonBatchToSpaceNdWorkload.hpp"
 
 #include "NeonWorkloadUtils.hpp"
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <ResolveType.hpp>
 
 namespace armnn
@@ -38,9 +40,9 @@ NeonBatchToSpaceNdWorkload::NeonBatchToSpaceNdWorkload(const BatchToSpaceNdQueue
     m_Data.ValidateInputsOutputs("NeonBatchToSpaceNdWorkload", 1, 1);
 
     arm_compute::ITensor& input  =
-            boost::polymorphic_pointer_downcast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
+            armnn::PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ITensor& output =
-            boost::polymorphic_pointer_downcast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
+            armnn::PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
 
     arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
     input.info()->set_data_layout(aclDataLayout);
index 0edb332..6e1f208 100644 (file)
@@ -6,6 +6,7 @@
 #include "NeonComparisonWorkload.hpp"
 #include <aclCommon/ArmComputeUtils.hpp>
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 
 namespace armnn
@@ -35,9 +36,9 @@ NeonComparisonWorkload::NeonComparisonWorkload(const ComparisonQueueDescriptor&
 {
     m_Data.ValidateInputsOutputs("NeonComparisonWorkload", 2, 1);
 
-    arm_compute::ITensor& input0 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
-    arm_compute::ITensor& input1 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
-    arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+    arm_compute::ITensor& input0 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ITensor& input1 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+    arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
 
     const arm_compute::ComparisonOperation comparisonOperation = ConvertComparisonOperationToAcl(m_Data.m_Parameters);
 
index 4a9f687..65678aa 100644 (file)
@@ -8,11 +8,10 @@
 #include "NeonWorkloadUtils.hpp"
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <neon/NeonTensorHandle.hpp>
 
-
-
 namespace armnn
 {
 using namespace armcomputetensorutils;
@@ -73,10 +72,10 @@ const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info)
     std::vector<arm_compute::ITensor *> aclInputs;
     for (auto input : m_Data.m_Inputs)
     {
-        arm_compute::ITensor& aclInput  = boost::polymorphic_pointer_downcast<IAclTensorHandle>(input)->GetTensor();
+        arm_compute::ITensor& aclInput  = armnn::PolymorphicPointerDowncast<IAclTensorHandle>(input)->GetTensor();
         aclInputs.emplace_back(&aclInput);
     }
-    arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<IAclTensorHandle>(
+    arm_compute::ITensor& output = armnn::PolymorphicPointerDowncast<IAclTensorHandle>(
         m_Data.m_Outputs[0])->GetTensor();
 
     // Create the layer function
index b30dfcd..12e7d20 100644 (file)
@@ -8,9 +8,9 @@
 #include "NeonWorkloadUtils.hpp"
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 
 #include <boost/numeric/conversion/cast.hpp>
-#include <boost/polymorphic_pointer_cast.hpp>
 
 namespace armnn
 {
@@ -39,13 +39,13 @@ NeonDepthToSpaceWorkload::NeonDepthToSpaceWorkload(const DepthToSpaceQueueDescri
     arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
 
     arm_compute::ITensor& input =
-        boost::polymorphic_pointer_downcast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
+            PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
     input.info()->set_data_layout(aclDataLayout);
 
     int32_t blockSize = boost::numeric_cast<int32_t>(desc.m_Parameters.m_BlockSize);
 
     arm_compute::ITensor& output =
-        boost::polymorphic_pointer_downcast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
+            PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
     output.info()->set_data_layout(aclDataLayout);
 
     m_Layer.configure(&input, &output, blockSize);
index 4f3ea2c..14fbdf3 100644 (file)
@@ -8,10 +8,9 @@
 
 #include <neon/NeonTensorHandle.hpp>
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <arm_compute/core/Types.h>
 
-#include <boost/polymorphic_pointer_cast.hpp>
-
 namespace armnn
 {
 using namespace armcomputetensorutils;
@@ -30,9 +29,9 @@ NeonQuantizeWorkload::NeonQuantizeWorkload(const QuantizeQueueDescriptor& descri
 {
     m_Data.ValidateInputsOutputs("NeonQuantizeWorkload", 1, 1);
 
-    arm_compute::ITensor& input = boost::polymorphic_pointer_downcast<IAclTensorHandle>(
+    arm_compute::ITensor& input = PolymorphicPointerDowncast<IAclTensorHandle>(
                                                                       m_Data.m_Inputs[0])->GetTensor();
-    arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<IAclTensorHandle>(
+    arm_compute::ITensor& output = PolymorphicPointerDowncast<IAclTensorHandle>(
                                                                        m_Data.m_Outputs[0])->GetTensor();
 
     m_Layer.reset(new arm_compute::NEQuantizationLayer());
index 199e926..d68ab4c 100644 (file)
@@ -6,6 +6,8 @@
 #include "NeonSpaceToBatchNdWorkload.hpp"
 
 #include "NeonWorkloadUtils.hpp"
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <ResolveType.hpp>
 
 namespace armnn
@@ -44,9 +46,9 @@ NeonSpaceToBatchNdWorkload::NeonSpaceToBatchNdWorkload(const SpaceToBatchNdQueue
     m_Data.ValidateInputsOutputs("NESpaceToBatchNdWorkload", 1, 1);
 
     arm_compute::ITensor& input  =
-            boost::polymorphic_pointer_downcast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
+            PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Inputs[0])->GetTensor();
     arm_compute::ITensor& output =
-            boost::polymorphic_pointer_downcast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
+            PolymorphicPointerDowncast<IAclTensorHandle>(m_Data.m_Outputs[0])->GetTensor();
 
     // ArmNN blockShape is [H, W] Cl asks for W, H
     int32_t blockHeight = boost::numeric_cast<int32_t>(m_Data.m_Parameters.m_BlockShape[0]);
index 19fa7c6..de6f137 100644 (file)
@@ -80,7 +80,7 @@ NeonSplitterWorkload::NeonSplitterWorkload(const SplitterQueueDescriptor& descri
     std::vector<arm_compute::ITensor *> aclOutputs;
     for (auto output : m_Data.m_Outputs)
     {
-        arm_compute::ITensor& aclOutput  = boost::polymorphic_pointer_downcast<IAclTensorHandle>(output)->GetTensor();
+        arm_compute::ITensor& aclOutput  = PolymorphicPointerDowncast<IAclTensorHandle>(output)->GetTensor();
         aclOutputs.emplace_back(&aclOutput);
     }
 
index b214943..a3ba8d8 100644 (file)
@@ -6,11 +6,11 @@
 #include "NeonWorkloadUtils.hpp"
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <neon/NeonTensorHandle.hpp>
 
 #include <boost/numeric/conversion/cast.hpp>
-#include <boost/polymorphic_pointer_cast.hpp>
 
 namespace armnn
 {
@@ -53,10 +53,10 @@ NeonStackWorkload::NeonStackWorkload(const StackQueueDescriptor& descriptor, con
     std::vector<arm_compute::ITensor*> aclInputs;
     for (auto input : m_Data.m_Inputs)
     {
-        arm_compute::ITensor& aclInput = boost::polymorphic_pointer_downcast<IAclTensorHandle>(input)->GetTensor();
+        arm_compute::ITensor& aclInput = PolymorphicPointerDowncast<IAclTensorHandle>(input)->GetTensor();
         aclInputs.emplace_back(&aclInput);
     }
-    arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<IAclTensorHandle>(
+    arm_compute::ITensor& output = PolymorphicPointerDowncast<IAclTensorHandle>(
         m_Data.m_Outputs[0])->GetTensor();
 
     m_Layer.reset(new arm_compute::NEStackLayer());
index 584ce78..590fde3 100644 (file)
 #include "RefTensorHandleFactory.hpp"
 
 #include <armnn/BackendRegistry.hpp>
-
 #include <armnn/backends/IBackendContext.hpp>
 #include <armnn/backends/IMemoryManager.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 
 #include <Optimizer.hpp>
 
-#include <boost/polymorphic_pointer_cast.hpp>
-
 namespace armnn
 {
 
@@ -30,7 +28,7 @@ const BackendId& RefBackend::GetIdStatic()
 IBackendInternal::IWorkloadFactoryPtr RefBackend::CreateWorkloadFactory(
     const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const
 {
-    return std::make_unique<RefWorkloadFactory>(boost::polymorphic_pointer_downcast<RefMemoryManager>(memoryManager));
+    return std::make_unique<RefWorkloadFactory>(PolymorphicPointerDowncast<RefMemoryManager>(memoryManager));
 }
 
 IBackendInternal::IWorkloadFactoryPtr RefBackend::CreateWorkloadFactory(
@@ -40,7 +38,7 @@ IBackendInternal::IWorkloadFactoryPtr RefBackend::CreateWorkloadFactory(
 
     tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager);
 
-    return std::make_unique<RefWorkloadFactory>(boost::polymorphic_pointer_downcast<RefMemoryManager>(memoryManager));
+    return std::make_unique<RefWorkloadFactory>(PolymorphicPointerDowncast<RefMemoryManager>(memoryManager));
 }
 
 IBackendInternal::IBackendContextPtr RefBackend::CreateBackendContext(const IRuntime::CreationOptions&) const