IVGCVSW-4246 Clean build of backends with -Wextra
authorDerek Lamberti <derek.lamberti@arm.com>
Tue, 10 Dec 2019 22:07:09 +0000 (22:07 +0000)
committerDerek Lamberti <derek.lamberti@arm.com>
Thu, 2 Jan 2020 15:16:28 +0000 (15:16 +0000)
Change-Id: I9e8d5576b3ec04c871785d5f2f9545bf1136e59b
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
26 files changed:
include/armnn/backends/IBackendInternal.hpp
include/armnn/backends/ITensorHandle.hpp
src/backends/backendsCommon/IBackendInternal.cpp
src/backends/backendsCommon/LayerSupportBase.cpp
src/backends/backendsCommon/LayerSupportRules.hpp
src/backends/backendsCommon/MakeWorkloadHelper.hpp
src/backends/backendsCommon/Workload.hpp
src/backends/backendsCommon/WorkloadData.cpp
src/backends/backendsCommon/WorkloadFactory.cpp
src/backends/backendsCommon/WorkloadFactoryBase.hpp
src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
src/backends/backendsCommon/test/MockBackend.cpp
src/backends/backendsCommon/test/MockBackend.hpp
src/backends/backendsCommon/test/TestDynamicBackend.cpp
src/backends/cl/ClLayerSupport.cpp
src/backends/cl/ClWorkloadFactory.cpp
src/backends/cl/OpenClTimer.cpp
src/backends/neon/NeonLayerSupport.cpp
src/backends/reference/RefLayerSupport.cpp
src/backends/reference/RefWorkloadFactory.cpp
src/backends/reference/test/RefWorkloadFactoryHelper.hpp
src/backends/reference/workloads/ArgMinMax.cpp
src/backends/reference/workloads/Dequantize.cpp
src/backends/reference/workloads/DetectionPostProcess.cpp
src/backends/reference/workloads/Gather.cpp
src/backends/reference/workloads/Pooling2d.cpp

index 3296d81..3533ace 100644 (file)
@@ -127,7 +127,7 @@ public:
     /// (Optional) Register TensorHandleFactories
     /// Either this method or CreateMemoryManager() and
     /// IWorkloadFactory::CreateTensor()/IWorkloadFactory::CreateSubtensor() methods must be implemented.
-    virtual void RegisterTensorHandleFactories(class TensorHandleFactoryRegistry& registry) {}
+    virtual void RegisterTensorHandleFactories(class TensorHandleFactoryRegistry& /*registry*/) {}
 
     /// Returns the version of the Backend API
     static constexpr BackendVersion GetApiVersion() { return BackendVersion(1, 0); }
index e1b80b8..6ef0e32 100644 (file)
@@ -6,6 +6,8 @@
 
 #include <armnn/MemorySources.hpp>
 
+#include <boost/core/ignore_unused.hpp>
+
 namespace armnn
 {
 
@@ -71,7 +73,11 @@ public:
     /// \param memory base address of the memory being imported.
     /// \param source source of the allocation for the memory being imported.
     /// \return true on success or false on failure
-    virtual bool Import(void* memory, MemorySource source) { return false; };
+    virtual bool Import(void* memory, MemorySource source)
+    {
+        boost::ignore_unused(memory, source);
+        return false;
+    };
 };
 
 }
index d1422d1..ad09730 100644 (file)
@@ -10,7 +10,7 @@ namespace armnn
 
 ARMNN_NO_DEPRECATE_WARN_BEGIN
 IBackendInternal::ISubGraphConverterPtr IBackendInternal::CreateSubGraphConverter(
-    const std::shared_ptr<SubGraph>& subGraph) const
+    const std::shared_ptr<SubGraph>& /*subGrapg*/) const
 {
     return ISubGraphConverterPtr{};
 }
@@ -20,7 +20,7 @@ IBackendInternal::Optimizations IBackendInternal::GetOptimizations() const
     return Optimizations{};
 }
 
-IBackendInternal::SubGraphUniquePtr IBackendInternal::OptimizeSubGraph(const SubGraph& subGraph,
+IBackendInternal::SubGraphUniquePtr IBackendInternal::OptimizeSubGraph(const SubGraph& /*subGraph*/,
                                                                        bool& optimizationAttempted) const
 {
     optimizationAttempted = false;
@@ -34,7 +34,7 @@ IMemoryManagerUniquePtr IBackendInternal::CreateMemoryManager() const
 }
 
 IBackendInternal::IWorkloadFactoryPtr IBackendInternal::CreateWorkloadFactory(
-    class TensorHandleFactoryRegistry& tensorHandleFactoryRegistry) const
+    class TensorHandleFactoryRegistry& /*tensorHandleFactoryRegistry*/) const
 {
     return IWorkloadFactoryPtr{};
 }
index 00f1d02..8332774 100644 (file)
@@ -34,295 +34,292 @@ bool DefaultLayerSupport(const char* func,
 namespace armnn
 {
 
-bool LayerSupportBase::IsAbsSupported(const TensorInfo &input,
-                                      const TensorInfo &output,
+bool LayerSupportBase::IsAbsSupported(const TensorInfo& /*input*/,
+                                      const TensorInfo& /*output*/,
                                       Optional<std::string &> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsActivationSupported(const TensorInfo& input,
-                                             const TensorInfo& output,
-                                             const ActivationDescriptor& descriptor,
+bool LayerSupportBase::IsActivationSupported(const TensorInfo& /*input*/,
+                                             const TensorInfo& /*output*/,
+                                             const ActivationDescriptor& /*descriptor*/,
                                              Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsAdditionSupported(const TensorInfo& input0,
-                                           const TensorInfo& input1,
-                                           const TensorInfo& output,
+bool LayerSupportBase::IsAdditionSupported(const TensorInfo& /*input0*/,
+                                           const TensorInfo& /*input1*/,
+                                           const TensorInfo& /*output*/,
                                            Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsArgMinMaxSupported(const armnn::TensorInfo &input, const armnn::TensorInfo &output,
-                                            const armnn::ArgMinMaxDescriptor& descriptor,
+bool LayerSupportBase::IsArgMinMaxSupported(const armnn::TensorInfo &/*input*/,
+                                            const armnn::TensorInfo &/*output*/,
+                                            const armnn::ArgMinMaxDescriptor& /*descriptor*/,
                                             armnn::Optional<std::string &> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsBatchNormalizationSupported(const TensorInfo& input,
-                                                     const TensorInfo& output,
-                                                     const TensorInfo& mean,
-                                                     const TensorInfo& var,
-                                                     const TensorInfo& beta,
-                                                     const TensorInfo& gamma,
-                                                     const BatchNormalizationDescriptor& descriptor,
+bool LayerSupportBase::IsBatchNormalizationSupported(const TensorInfo& /*input*/,
+                                                     const TensorInfo& /*output*/,
+                                                     const TensorInfo& /*mean*/,
+                                                     const TensorInfo& /*var*/,
+                                                     const TensorInfo& /*beta*/,
+                                                     const TensorInfo& /*gamma*/,
+                                                     const BatchNormalizationDescriptor& /*descriptor*/,
                                                      Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsBatchToSpaceNdSupported(const TensorInfo& input,
-                                                 const TensorInfo& output,
-                                                 const BatchToSpaceNdDescriptor& descriptor,
+bool LayerSupportBase::IsBatchToSpaceNdSupported(const TensorInfo& /*input*/,
+                                                 const TensorInfo& /*output*/,
+                                                 const BatchToSpaceNdDescriptor& /*descriptor*/,
                                                  Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsComparisonSupported(const TensorInfo& input0,
-                                             const TensorInfo& input1,
-                                             const TensorInfo& output,
-                                             const ComparisonDescriptor& descriptor,
+bool LayerSupportBase::IsComparisonSupported(const TensorInfo& /*input0*/,
+                                             const TensorInfo& /*input1*/,
+                                             const TensorInfo& /*output*/,
+                                             const ComparisonDescriptor& /*descriptor*/,
                                              Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
-                                         const TensorInfo& output,
-                                         const OriginsDescriptor& descriptor,
+bool LayerSupportBase::IsConcatSupported(const std::vector<const TensorInfo*> /*inputs*/,
+                                         const TensorInfo& /*output*/,
+                                         const OriginsDescriptor& /*descriptor*/,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsConstantSupported(const TensorInfo& output,
+bool LayerSupportBase::IsConstantSupported(const TensorInfo& /*output*/,
                                            Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo& input,
-                                                    const TensorInfo& output,
+bool LayerSupportBase::IsConvertFp16ToFp32Supported(const TensorInfo& /*input*/,
+                                                    const TensorInfo& /*output*/,
                                                     Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsConvertFp32ToFp16Supported(const TensorInfo& input,
-                                                    const TensorInfo& output,
+bool LayerSupportBase::IsConvertFp32ToFp16Supported(const TensorInfo& /*input*/,
+                                                    const TensorInfo& /*output*/,
                                                     Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsConvolution2dSupported(const TensorInfo& input,
-                                                const TensorInfo& output,
-                                                const Convolution2dDescriptor& descriptor,
-                                                const TensorInfo& weights,
-                                                const Optional<TensorInfo>& biases,
+bool LayerSupportBase::IsConvolution2dSupported(const TensorInfo& /*input*/,
+                                                const TensorInfo& /*output*/,
+                                                const Convolution2dDescriptor& /*descriptor*/,
+                                                const TensorInfo& /*weights*/,
+                                                const Optional<TensorInfo>& /*biases*/,
                                                 Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsDebugSupported(const TensorInfo& input,
-                                        const TensorInfo& output,
+bool LayerSupportBase::IsDebugSupported(const TensorInfo& /*input*/,
+                                        const TensorInfo& /*output*/,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsDepthToSpaceSupported(const TensorInfo& input,
-                                               const TensorInfo& output,
-                                               const DepthToSpaceDescriptor& descriptor,
+bool LayerSupportBase::IsDepthToSpaceSupported(const TensorInfo& /*input*/,
+                                               const TensorInfo& /*output*/,
+                                               const DepthToSpaceDescriptor& /*descriptor*/,
                                                Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsDepthwiseConvolutionSupported(const TensorInfo& input,
-                                                       const TensorInfo& output,
-                                                       const DepthwiseConvolution2dDescriptor& descriptor,
-                                                       const TensorInfo& weights,
-                                                       const Optional<TensorInfo>& biases,
+bool LayerSupportBase::IsDepthwiseConvolutionSupported(const TensorInfo& /*input*/,
+                                                       const TensorInfo& /*output*/,
+                                                       const DepthwiseConvolution2dDescriptor& /*descriptor*/,
+                                                       const TensorInfo& /*weights*/,
+                                                       const Optional<TensorInfo>& /*biases*/,
                                                        Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsDequantizeSupported(const TensorInfo& input,
-                                             const TensorInfo& output,
+bool LayerSupportBase::IsDequantizeSupported(const TensorInfo& /*input*/,
+                                             const TensorInfo& /*output*/,
                                              Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsDetectionPostProcessSupported(const TensorInfo& boxEncodings,
-                                                       const TensorInfo& scores,
-                                                       const TensorInfo& anchors,
-                                                       const TensorInfo& detectionBoxes,
-                                                       const TensorInfo& detectionClasses,
-                                                       const TensorInfo& detectionScores,
-                                                       const TensorInfo& numDetections,
-                                                       const DetectionPostProcessDescriptor& descriptor,
+bool LayerSupportBase::IsDetectionPostProcessSupported(const TensorInfo& /*boxEncodings*/,
+                                                       const TensorInfo& /*scores*/,
+                                                       const TensorInfo& /*anchors*/,
+                                                       const TensorInfo& /*detectionBoxes*/,
+                                                       const TensorInfo& /*detectionClasses*/,
+                                                       const TensorInfo& /*detectionScores*/,
+                                                       const TensorInfo& /*numDetections*/,
+                                                       const DetectionPostProcessDescriptor& /*descriptor*/,
                                                        Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& input,
-                                                              const TensorInfo& output,
-                                                              const DepthwiseConvolution2dDescriptor& descriptor,
-                                                              const TensorInfo& weights,
-                                                              const Optional<TensorInfo>& biases,
+bool LayerSupportBase::IsDilatedDepthwiseConvolutionSupported(const TensorInfo& /*input*/,
+                                                              const TensorInfo& /*output*/,
+                                                              const DepthwiseConvolution2dDescriptor& /*descriptor*/,
+                                                              const TensorInfo& /*weights*/,
+                                                              const Optional<TensorInfo>& /*biases*/,
                                                               Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsDivisionSupported(const TensorInfo& input0,
-                                           const TensorInfo& input1,
-                                           const TensorInfo& output,
+bool LayerSupportBase::IsDivisionSupported(const TensorInfo& /*input0*/,
+                                           const TensorInfo& /*input1*/,
+                                           const TensorInfo& /*output*/,
                                            Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo& input0,
-                                        const armnn::TensorInfo& input1,
-                                        const armnn::TensorInfo& output,
+bool LayerSupportBase::IsEqualSupported(const armnn::TensorInfo& /*input0*/,
+                                        const armnn::TensorInfo& /*input1*/,
+                                        const armnn::TensorInfo& /*output*/,
                                         armnn::Optional<std::string &> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsFakeQuantizationSupported(const TensorInfo& input,
-                                                   const FakeQuantizationDescriptor& descriptor,
+bool LayerSupportBase::IsFakeQuantizationSupported(const TensorInfo& /*input*/,
+                                                   const FakeQuantizationDescriptor& /*descriptor*/,
                                                    Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsFloorSupported(const TensorInfo& input,
-                                        const TensorInfo& output,
+bool LayerSupportBase::IsFloorSupported(const TensorInfo& /*input*/,
+                                        const TensorInfo& /*output*/,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsFullyConnectedSupported(const TensorInfo& input,
-                                                 const TensorInfo& output,
-                                                 const TensorInfo& weights,
-                                                 const TensorInfo& biases,
-                                                 const FullyConnectedDescriptor& descriptor,
+bool LayerSupportBase::IsFullyConnectedSupported(const TensorInfo& /*input*/,
+                                                 const TensorInfo& /*output*/,
+                                                 const TensorInfo& /*weights*/,
+                                                 const TensorInfo& /*biases*/,
+                                                 const FullyConnectedDescriptor& /*descriptor*/,
                                                  Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo& input0,
-                                         const armnn::TensorInfo& input1,
-                                         const armnn::TensorInfo& output,
+bool LayerSupportBase::IsGatherSupported(const armnn::TensorInfo& /*input0*/,
+                                         const armnn::TensorInfo& /*input1*/,
+                                         const armnn::TensorInfo& /*output*/,
                                          armnn::Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsGreaterSupported(const TensorInfo& input0,
-                                          const TensorInfo& input1,
-                                          const TensorInfo& output,
+bool LayerSupportBase::IsGreaterSupported(const TensorInfo& /*input0*/,
+                                          const TensorInfo& /*input1*/,
+                                          const TensorInfo& /*output*/,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsInputSupported(const TensorInfo& input,
+bool LayerSupportBase::IsInputSupported(const TensorInfo& /*input*/,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsInstanceNormalizationSupported(const TensorInfo& input,
-                                                        const TensorInfo& output,
-                                                        const InstanceNormalizationDescriptor& descriptor,
+bool LayerSupportBase::IsInstanceNormalizationSupported(const TensorInfo& /*input*/,
+                                                        const TensorInfo& /*output*/,
+                                                        const InstanceNormalizationDescriptor& /*descriptor*/,
                                                         Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsL2NormalizationSupported(const TensorInfo& input,
-                                                  const TensorInfo& output,
-                                                  const L2NormalizationDescriptor& descriptor,
+bool LayerSupportBase::IsL2NormalizationSupported(const TensorInfo& /*input*/,
+                                                  const TensorInfo& /*output*/,
+                                                  const L2NormalizationDescriptor& /*descriptor*/,
                                                   Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsLogSoftmaxSupported(const TensorInfo& input,
-                                             const TensorInfo& output,
-                                             const LogSoftmaxDescriptor& descriptor,
+bool LayerSupportBase::IsLogSoftmaxSupported(const TensorInfo& /*input*/,
+                                             const TensorInfo& /*output*/,
+                                             const LogSoftmaxDescriptor& /*descriptor*/,
                                              Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsLstmSupported(const TensorInfo& input,
-                                       const TensorInfo& outputStateIn,
-                                       const TensorInfo& cellStateIn,
-                                       const TensorInfo& scratchBuffer,
-                                       const TensorInfo& outputStateOut,
-                                       const TensorInfo& cellStateOut,
-                                       const TensorInfo& output,
-                                       const LstmDescriptor& descriptor,
-                                       const LstmInputParamsInfo& paramsInfo,
+bool LayerSupportBase::IsLstmSupported(const TensorInfo& /*input*/,
+                                       const TensorInfo& /*outputStateIn*/,
+                                       const TensorInfo& /*cellStateIn*/,
+                                       const TensorInfo& /*scratchBuffer*/,
+                                       const TensorInfo& /*outputStateOut*/,
+                                       const TensorInfo& /*cellStateOut*/,
+                                       const TensorInfo& /*output*/,
+                                       const LstmDescriptor& /*descriptor*/,
+                                       const LstmInputParamsInfo& /*paramsInfo*/,
                                        Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsMaximumSupported(const TensorInfo& input0,
-                                          const TensorInfo& input1,
-                                          const TensorInfo& output,
+bool LayerSupportBase::IsMaximumSupported(const TensorInfo& /*input0*/,
+                                          const TensorInfo& /*input1*/,
+                                          const TensorInfo& /*output*/,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsMeanSupported(const TensorInfo& input,
-                                       const TensorInfo& output,
-                                       const MeanDescriptor& descriptor,
+bool LayerSupportBase::IsMeanSupported(const TensorInfo& /*input*/,
+                                       const TensorInfo& /*output*/,
+                                       const MeanDescriptor& /*descriptor*/,
                                        Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsMemCopySupported(const armnn::TensorInfo& input,
-                                          const armnn::TensorInfo& output,
-                                          armnn::Optional<std::string &> reasonIfUnsupported) const
+bool LayerSupportBase::IsMemCopySupported(const armnn::TensorInfo& /*input*/,
+                                          const armnn::TensorInfo& /*output*/,
+                                          armnn::Optional<std::string &> /*reasonIfUnsupported*/) const
 {
-    boost::ignore_unused(input);
-    boost::ignore_unused(output);
     return true;
 }
 
-bool LayerSupportBase::IsMemImportSupported(const armnn::TensorInfo& input,
-                                            const armnn::TensorInfo& output,
-                                            armnn::Optional<std::string &> reasonIfUnsupported) const
+bool LayerSupportBase::IsMemImportSupported(const armnn::TensorInfo& /*input*/,
+                                            const armnn::TensorInfo& /*output*/,
+                                            armnn::Optional<std::string &> /*reasonIfUnsupported*/) const
 {
-    boost::ignore_unused(input);
-    boost::ignore_unused(output);
     return true;
 }
 
-bool LayerSupportBase::IsMergeSupported(const TensorInfo& input0,
-                                        const TensorInfo& input1,
-                                        const TensorInfo& output,
+bool LayerSupportBase::IsMergeSupported(const TensorInfo& /*input0*/,
+                                        const TensorInfo& /*input1*/,
+                                        const TensorInfo& /*output*/,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
@@ -336,180 +333,180 @@ bool LayerSupportBase::IsMergerSupported(const std::vector<const TensorInfo*> in
     return IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsMinimumSupported(const TensorInfo& input0,
-                                          const TensorInfo& input1,
-                                          const TensorInfo& output,
+bool LayerSupportBase::IsMinimumSupported(const TensorInfo& /*input0*/,
+                                          const TensorInfo& /*input1*/,
+                                          const TensorInfo& /*output*/,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsMultiplicationSupported(const TensorInfo& input0,
-                                                 const TensorInfo& input1,
-                                                 const TensorInfo& output,
+bool LayerSupportBase::IsMultiplicationSupported(const TensorInfo& /*input0*/,
+                                                 const TensorInfo& /*input1*/,
+                                                 const TensorInfo& /*output*/,
                                                  Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsNormalizationSupported(const TensorInfo& input,
-                                                const TensorInfo& output,
-                                                const NormalizationDescriptor& descriptor,
+bool LayerSupportBase::IsNormalizationSupported(const TensorInfo& /*input*/,
+                                                const TensorInfo& /*output*/,
+                                                const NormalizationDescriptor& /*descriptor*/,
                                                 Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsOutputSupported(const TensorInfo& output,
+bool LayerSupportBase::IsOutputSupported(const TensorInfo& /*output*/,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsPadSupported(const TensorInfo& input,
-                                      const TensorInfo& output,
-                                      const PadDescriptor& descriptor,
+bool LayerSupportBase::IsPadSupported(const TensorInfo& /*input*/,
+                                      const TensorInfo& /*output*/,
+                                      const PadDescriptor& /*descriptor*/,
                                       Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsPermuteSupported(const TensorInfo& input,
-                                          const TensorInfo& output,
-                                          const PermuteDescriptor& descriptor,
+bool LayerSupportBase::IsPermuteSupported(const TensorInfo& /*input*/,
+                                          const TensorInfo& /*output*/,
+                                          const PermuteDescriptor& /*descriptor*/,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsPooling2dSupported(const TensorInfo& input,
-                                            const TensorInfo& output,
-                                            const Pooling2dDescriptor& descriptor,
+bool LayerSupportBase::IsPooling2dSupported(const TensorInfo& /*input*/,
+                                            const TensorInfo& /*output*/,
+                                            const Pooling2dDescriptor& /*descriptor*/,
                                             Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsPreCompiledSupported(const TensorInfo& input,
-                                              const PreCompiledDescriptor& descriptor,
+bool LayerSupportBase::IsPreCompiledSupported(const TensorInfo& /*input*/,
+                                              const PreCompiledDescriptor& /*descriptor*/,
                                               Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsPreluSupported(const TensorInfo& input,
-                                        const TensorInfo& alpha,
-                                        const TensorInfo& output,
+bool LayerSupportBase::IsPreluSupported(const TensorInfo& /*input*/,
+                                        const TensorInfo& /*alpha*/,
+                                        const TensorInfo& /*output*/,
                                         Optional<std::string &> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsQuantizeSupported(const armnn::TensorInfo& input,
-                                           const armnn::TensorInfo& output,
+bool LayerSupportBase::IsQuantizeSupported(const armnn::TensorInfo& /*input*/,
+                                           const armnn::TensorInfo& /*output*/,
                                            armnn::Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsQuantizedLstmSupported(const TensorInfo& input,
-                                                const TensorInfo& previousCellStateIn,
-                                                const TensorInfo& previousOutputIn,
-                                                const TensorInfo& cellStateOut,
-                                                const TensorInfo& output,
-                                                const QuantizedLstmInputParamsInfo& paramsInfo,
+bool LayerSupportBase::IsQuantizedLstmSupported(const TensorInfo& /*input*/,
+                                                const TensorInfo& /*previousCellStateIn*/,
+                                                const TensorInfo& /*previousOutputIn*/,
+                                                const TensorInfo& /*cellStateOut*/,
+                                                const TensorInfo& /*output*/,
+                                                const QuantizedLstmInputParamsInfo& /*paramsInfo*/,
                                                 Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsReshapeSupported(const TensorInfo& input,
-                                          const ReshapeDescriptor& descriptor,
+bool LayerSupportBase::IsReshapeSupported(const TensorInfo& /*input*/,
+                                          const ReshapeDescriptor& /*descriptor*/,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsResizeBilinearSupported(const TensorInfo& input,
-                                                 const TensorInfo& output,
+bool LayerSupportBase::IsResizeBilinearSupported(const TensorInfo& /*input*/,
+                                                 const TensorInfo& /*output*/,
                                                  Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsResizeSupported(const TensorInfo& input,
-                                         const TensorInfo& output,
-                                         const ResizeDescriptor& descriptor,
+bool LayerSupportBase::IsResizeSupported(const TensorInfo& /*input*/,
+                                         const TensorInfo& /*output*/,
+                                         const ResizeDescriptor& /*descriptor*/,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsRsqrtSupported(const TensorInfo &input,
-                                        const TensorInfo &output,
+bool LayerSupportBase::IsRsqrtSupported(const TensorInfo &/*input*/,
+                                        const TensorInfo &/*output*/,
                                         Optional<std::string &> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsSliceSupported(const TensorInfo& input,
-                                        const TensorInfo& output,
-                                        const SliceDescriptor& descriptor,
+bool LayerSupportBase::IsSliceSupported(const TensorInfo& /*input*/,
+                                        const TensorInfo& /*output*/,
+                                        const SliceDescriptor& /*descriptor*/,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsSoftmaxSupported(const TensorInfo& input,
-                                          const TensorInfo& output,
-                                          const SoftmaxDescriptor& descriptor,
+bool LayerSupportBase::IsSoftmaxSupported(const TensorInfo& /*input*/,
+                                          const TensorInfo& /*output*/,
+                                          const SoftmaxDescriptor& /*descriptor*/,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
-
-bool LayerSupportBase::IsSpaceToBatchNdSupported(const TensorInfo& input,
-                                                 const TensorInfo& output,
-                                                 const SpaceToBatchNdDescriptor& descriptor,
+/**/
+bool LayerSupportBase::IsSpaceToBatchNdSupported(const TensorInfo& /*input*/,
+                                                 const TensorInfo& /*output*/,
+                                                 const SpaceToBatchNdDescriptor& /*descriptor*/,
                                                  Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsSpaceToDepthSupported(const TensorInfo& input,
-                                               const TensorInfo& output,
-                                               const SpaceToDepthDescriptor& descriptor,
+bool LayerSupportBase::IsSpaceToDepthSupported(const TensorInfo& /*input*/,
+                                               const TensorInfo& /*output*/,
+                                               const SpaceToDepthDescriptor& /*descriptor*/,
                                                Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input,
-                                           const ViewsDescriptor& descriptor,
+bool LayerSupportBase::IsSplitterSupported(const TensorInfo& /*input*/,
+                                           const ViewsDescriptor& /*descriptor*/,
                                            Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsSplitterSupported(const TensorInfo& input,
-                                           const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
-                                           const ViewsDescriptor& descriptor,
+bool LayerSupportBase::IsSplitterSupported(const TensorInfo& /*input*/,
+                                           const std::vector<std::reference_wrapper<TensorInfo>>& /*outputs*/,
+                                           const ViewsDescriptor& /*descriptor*/,
                                            Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
-                                        const TensorInfo& output,
-                                        const StackDescriptor& descriptor,
+bool LayerSupportBase::IsStackSupported(const std::vector<const TensorInfo*>& /*inputs*/,
+                                        const TensorInfo& /*output*/,
+                                        const StackDescriptor& /*descriptor*/,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
-                                          const std::vector<const TensorInfo*>& outputs,
-                                          const StandInDescriptor& descriptor,
+bool LayerSupportBase::IsStandInSupported(const std::vector<const TensorInfo*>& /*inputs*/,
+                                          const std::vector<const TensorInfo*>& /*outputs*/,
+                                          const StandInDescriptor& /*descriptor*/,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
     if (reasonIfUnsupported)
@@ -523,36 +520,36 @@ bool LayerSupportBase::IsStandInSupported(const std::vector<const TensorInfo*>&
     return false;
 }
 
-bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& input,
-                                               const TensorInfo& output,
-                                               const StridedSliceDescriptor& descriptor,
+bool LayerSupportBase::IsStridedSliceSupported(const TensorInfo& /*input*/,
+                                               const TensorInfo& /*output*/,
+                                               const StridedSliceDescriptor& /*descriptor*/,
                                                Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsSubtractionSupported(const TensorInfo& input0,
-                                              const TensorInfo& input1,
-                                              const TensorInfo& output,
+bool LayerSupportBase::IsSubtractionSupported(const TensorInfo& /*input0*/,
+                                              const TensorInfo& /*input1*/,
+                                              const TensorInfo& /*output*/,
                                               Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsSwitchSupported(const TensorInfo& input0,
-                                         const TensorInfo& input1,
-                                         const TensorInfo& output0,
-                                         const TensorInfo& output1,
+bool LayerSupportBase::IsSwitchSupported(const TensorInfo& /*input0*/,
+                                         const TensorInfo& /*input1*/,
+                                         const TensorInfo& /*output0*/,
+                                         const TensorInfo& /*output1*/,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
-bool LayerSupportBase::IsTransposeConvolution2dSupported(const TensorInfo& input,
-                                                         const TensorInfo& output,
-                                                         const TransposeConvolution2dDescriptor& descriptor,
-                                                         const TensorInfo& weights,
-                                                         const Optional<TensorInfo>& biases,
+bool LayerSupportBase::IsTransposeConvolution2dSupported(const TensorInfo& /*input*/,
+                                                         const TensorInfo& /*output*/,
+                                                         const TransposeConvolution2dDescriptor& /*descriptor*/,
+                                                         const TensorInfo& /*weights*/,
+                                                         const Optional<TensorInfo>& /*biases*/,
                                                          Optional<std::string&> reasonIfUnsupported) const
 {
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
index bf997db..08189f9 100644 (file)
@@ -55,7 +55,7 @@ struct Rule
 };
 
 template<typename T>
-bool AllTypesAreEqualImpl(T t)
+bool AllTypesAreEqualImpl(T)
 {
     return true;
 }
index 71358bb..9d8174c 100644 (file)
@@ -31,6 +31,9 @@ struct MakeWorkloadForType<NullWorkload>
                                               const WorkloadInfo& info,
                                               Args&&... args)
     {
+        boost::ignore_unused(descriptor);
+        boost::ignore_unused(info);
+        boost::ignore_unused(args...);
         return nullptr;
     }
 };
index b5851ad..e030686 100644 (file)
@@ -27,7 +27,7 @@ public:
 
     virtual profiling::ProfilingGuid GetGuid() const = 0;
 
-    virtual void RegisterDebugCallback(const DebugCallbackFunction& func) {}
+    virtual void RegisterDebugCallback(const DebugCallbackFunction& /*func*/) {}
 };
 
 // NullWorkload used to denote an unsupported workload when used by the MakeWorkload<> template
index d9a1f46..c3dd601 100644 (file)
@@ -2554,7 +2554,7 @@ void SwitchQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
                               "output_1");
 }
 
-void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& /*workloadInfo*/) const
 {
     // This is internally generated so it should not need validation.
 }
index 805ec7b..a4327e4 100644 (file)
@@ -1074,358 +1074,358 @@ bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLaye
 }
 
 // Default Implementations
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
-                                                       const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
+                                                       const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
-                                                              const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
+                                                              const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
-                                                            const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
+                                                            const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
-                                                             const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
+                                                             const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchNormalization(
-    const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
+    const BatchNormalizationQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
-                                                                  const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& /*desc*/,
+                                                                  const WorkloadInfo& /*Info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& descriptor,
-                                                              const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
+                                                              const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& descriptor,
-                                                          const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
+                                                          const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
-                                                            const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
+                                                            const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
-                                                                     const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*desc*/,
+                                                                     const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
-                                                                     const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*desc*/,
+                                                                     const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
-                                                                 const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
+                                                                 const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
-                                                         const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
+                                                         const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
-                                                                const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
+                                                                const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthwiseConvolution2d(
-    const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
+    const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize(
-    const DequantizeQueueDescriptor& descriptor, const WorkloadInfo& info) const
+    const DequantizeQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateDetectionPostProcess(
-    const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info) const
+    const DetectionPostProcessQueueDescriptor& /*descriptor*/, const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& descriptor,
-                                                            const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
+                                                            const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
-                                                         const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& /*descriptor*/,
+                                                         const WorkloadInfo& /*Info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
-                                                                    const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*desc*/,
+                                                                    const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
-                                                         const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
+                                                         const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
-                                                                  const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
+                                                                  const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& descriptor,
-                                                          const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& /*descriptor*/,
+                                                          const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
-                                                           const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& /*descriptor*/,
+                                                           const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateInstanceNormalization(
-    const InstanceNormalizationQueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
+    const InstanceNormalizationQueueDescriptor& /*descriptor*/,
+    const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
-                                                                   const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& /*desc*/,
+                                                                   const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
-                                                              const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
+                                                              const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
-                                                        const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
+                                                        const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
-                                                           const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
+                                                           const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
-                                                        const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& /*descriptor*/,
+                                                        const WorkloadInfo& /*Info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
-                                                           const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
+                                                           const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& descriptor,
-                                                             const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
+                                                             const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& descriptor,
-                                                         const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
+                                                         const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
-                                                          const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& /*descriptor*/,
+                                                          const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
-                                                           const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
+                                                           const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
-                                                                  const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
+                                                                  const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
-                                                                 const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
+                                                                 const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
-                                                          const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
+                                                          const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
-                                                       const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& /*descriptor*/,
+                                                       const WorkloadInfo& /*Info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
-                                                           const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
+                                                           const WorkloadInfo&/**/ /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
-                                                             const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
+                                                             const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
-                                                               const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
+                                                               const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &descriptor,
-                                                         const WorkloadInfo &info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePrelu(const PreluQueueDescriptor &/*descriptor*/,
+                                                         const WorkloadInfo &/*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
-                                                            const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
+                                                            const WorkloadInfo& /*Info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
-                                                                 const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
+                                                                 const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
-                                                           const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
+                                                           const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
-                                                                  const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& /*descriptor*/,
+                                                                  const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
-                                                            const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
+                                                            const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
-                                                         const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
+                                                         const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
-                                                         const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
+                                                         const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
-
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
-                                                           const WorkloadInfo& info) const
+/**/
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
+                                                           const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
-                                                            const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
+                                                            const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
-                                                                  const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
+                                                                  const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
-                                                                const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
+                                                                const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
-                                                         const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateStack(const StackQueueDescriptor& /*descriptor*/,
+                                                         const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
-                                                                const WorkloadInfo& Info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
+                                                                const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
-                                                               const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
+                                                               const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
-std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& descriptor,
-                                                          const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
+                                                          const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
 
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateTransposeConvolution2d(
-    const TransposeConvolution2dQueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
+    const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
+    const WorkloadInfo& /*info*/) const
 {
     return std::unique_ptr<IWorkload>();
 }
index 6f74374..1947c69 100644 (file)
@@ -16,240 +16,242 @@ public:
     bool SupportsSubTensors() const override
     { return false; };
 
-    std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent, TensorShape const& subTensorShape,
-                                                         unsigned int const *subTensorOrigin) const override
+    std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& /*parent*/,
+                                                         TensorShape const& /*subTensorShape*/,
+                                                         unsigned int const */*subTensorOrigin*/) const override
     { return nullptr; };
 
-    std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateInput(const InputQueueDescriptor& /*descriptor*/,
+                                           const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
-                                                      const bool IsMemoryManaged = true) const override
+    std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& /*tensorInfo*/,
+                                                      const bool /*IsMemoryManaged*/) const override
     { return nullptr; }
 
-    std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo, DataLayout dataLayout,
-                                                      const bool IsMemoryManaged = true) const override
+    std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& /*tensorInfo*/,
+                                                      DataLayout /*dataLayout*/,
+                                                      const bool /*IsMemoryManaged*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
-                                         const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& /*descriptor*/,
+                                         const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
-                                                const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& /*descriptor*/,
+                                                const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
-                                              const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& /*descriptor*/,
+                                              const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
-                                               const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& /*descriptor*/,
+                                               const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
-                                                        const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& /*descriptor*/,
+                                                        const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
-                                                    const WorkloadInfo& Info) const override
+    std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& /*descriptor*/,
+                                                    const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& descriptor,
-                                                const WorkloadInfo& Info) const override
+    std::unique_ptr<IWorkload> CreateComparison(const ComparisonQueueDescriptor& /*descriptor*/,
+                                                const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& descriptor,
-                                            const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateConcat(const ConcatQueueDescriptor& /*descriptor*/,
+                                            const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
-                                              const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& /*descriptor*/,
+                                              const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
-                                                       const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& /*descriptor*/,
+                                                       const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
-                                                       const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& /*descriptor*/,
+                                                       const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
-                                                   const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& /*descriptor*/,
+                                                   const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& /*descriptor*/,
+                                           const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateDepthToSpace(const DepthToSpaceQueueDescriptor& descriptor,
-                                                  const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateDepthToSpace(const DepthToSpaceQueueDescriptor& /*descriptor*/,
+                                                  const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& descriptor,
-                                                            const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(const DepthwiseConvolution2dQueueDescriptor& /*descriptor*/,
+                                                            const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor,
-                                                const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& /*descriptor*/,
+                                                const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& descriptor,
-                                                          const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateDetectionPostProcess(const DetectionPostProcessQueueDescriptor& /*descriptor*/,
+                                                          const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
-                                              const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& /*descriptor*/,
+                                              const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
-                                                      const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& /*descriptor*/,
+                                                      const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& /*descriptor*/,
+                                           const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
-                                                    const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& /*descriptor*/,
+                                                    const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
-                                            const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& /*descriptor*/,
+                                            const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor& descriptor,
-                                                           const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateInstanceNormalization(const InstanceNormalizationQueueDescriptor& /*descriptor*/,
+                                                           const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
-                                                     const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& /*descriptor*/,
+                                                     const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
-                                                const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
+                                                const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& descriptor,
-                                          const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& /*descriptor*/,
+                                          const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& descriptor,
-                                             const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& /*descriptor*/,
+                                             const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
-                                          const WorkloadInfo& Info) const override
+    std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& /*descriptor*/,
+                                          const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
-                                             const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& /*descriptor*/,
+                                             const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& descriptor,
-                                               const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateMemImport(const MemImportQueueDescriptor& /*descriptor*/,
+                                               const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateMerge(const MergeQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateMerge(const MergeQueueDescriptor& /*descriptor*/,
+                                           const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
-                                             const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& /*descriptor*/,
+                                             const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
-                                                    const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& /*descriptor*/,
+                                                    const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
-                                                   const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& /*descriptor*/,
+                                                   const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
-                                            const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& /*descriptor*/,
+                                            const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
-                                         const WorkloadInfo& Info) const override
+    std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& /*descriptor*/,
+                                         const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor,
-                                             const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& /*descriptor*/,
+                                             const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
-                                               const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& /*descriptor*/,
+                                               const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
-                                                 const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
+                                                 const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreatePrelu(const PreluQueueDescriptor& /*descriptor*/,
+                                           const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
-                                              const WorkloadInfo& Info) const override
+    std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& /*descriptor*/,
+                                              const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
-                                                   const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& /*descriptor*/,
+                                                   const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
-                                             const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
+                                             const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
-                                            const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& /*descriptor*/,
+                                            const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& /*descriptor*/,
+                                           const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& /*descriptor*/,
+                                           const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
-                                             const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& /*descriptor*/,
+                                             const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
-                                                    const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& /*descriptor*/,
+                                                    const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
-                                                  const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& /*descriptor*/,
+                                                  const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
-                                                 const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& /*descriptor*/,
+                                                 const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
-                                              const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& /*descriptor*/,
+                                              const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
-                                           const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& /*descriptor*/,
+                                           const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
-                                                  const WorkloadInfo& Info) const override
+    std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& /*descriptor*/,
+                                                  const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateSwitch(const SwitchQueueDescriptor& descriptor,
-                                            const WorkloadInfo& Info) const override
+    std::unique_ptr<IWorkload> CreateSwitch(const SwitchQueueDescriptor& /*descriptor*/,
+                                            const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
-    std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& descriptor,
-                                                            const WorkloadInfo& info) const override
+    std::unique_ptr<IWorkload> CreateTransposeConvolution2d(const TransposeConvolution2dQueueDescriptor& /*descriptor*/,
+                                                            const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 };
 
index 7ab5ee4..6924beb 100644 (file)
@@ -414,6 +414,7 @@ struct LayerTypePolicy<armnn::LayerType::name, DataType> \
     static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
         unsigned int nIn, unsigned int nOut) \
     { \
+        boost::ignore_unused(factory, nIn, nOut); \
         return std::unique_ptr<armnn::IWorkload>(); \
     } \
 };
index c5a4ed9..367d9cb 100644 (file)
@@ -88,7 +88,7 @@ const BackendId& MockBackend::GetIdStatic()
 }
 
 IBackendInternal::IWorkloadFactoryPtr MockBackend::CreateWorkloadFactory(
-    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager) const
+    const IBackendInternal::IMemoryManagerSharedPtr& /*memoryManager*/) const
 {
     return IWorkloadFactoryPtr{};
 }
index 437f23d..771e499 100644 (file)
@@ -37,32 +37,32 @@ public:
 
 class MockLayerSupport : public LayerSupportBase {
 public:
-    bool IsInputSupported(const TensorInfo& input,
-                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override
+    bool IsInputSupported(const TensorInfo& /*input*/,
+                          Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
     {
         return true;
     }
 
-    bool IsOutputSupported(const TensorInfo& input,
-                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override
+    bool IsOutputSupported(const TensorInfo& /*input*/,
+                          Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
     {
         return true;
     }
 
-    bool IsAdditionSupported(const TensorInfo& input0,
-                             const TensorInfo& input1,
-                             const TensorInfo& output,
-                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override
+    bool IsAdditionSupported(const TensorInfo& /*input0*/,
+                             const TensorInfo& /*input1*/,
+                             const TensorInfo& /*output*/,
+                             Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
     {
         return true;
     }
 
-    bool IsConvolution2dSupported(const TensorInfo& input,
-                                  const TensorInfo& output,
-                                  const Convolution2dDescriptor& descriptor,
-                                  const TensorInfo& weights,
-                                  const Optional<TensorInfo>& biases,
-                                  Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override
+    bool IsConvolution2dSupported(const TensorInfo& /*input*/,
+                                  const TensorInfo& /*output*/,
+                                  const Convolution2dDescriptor& /*descriptor*/,
+                                  const TensorInfo& /*weights*/,
+                                  const Optional<TensorInfo>& /*biases*/,
+                                  Optional<std::string&> /*reasonIfUnsupported = EmptyOptional()*/) const override
     {
         return true;
     }
index a53f169..cbfe093 100644 (file)
@@ -7,6 +7,8 @@
 
 #include <armnn/backends/IBackendInternal.hpp>
 
+#include <boost/core/ignore_unused.hpp>
+
 constexpr const char* TestDynamicBackendId()
 {
 #if defined(VALID_TEST_DYNAMIC_BACKEND_1)
@@ -63,6 +65,7 @@ public:
     }
     IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager) const override
     {
+        boost::ignore_unused(memoryManager);
         return IWorkloadFactoryPtr{};
     }
     ILayerSupportSharedPtr GetLayerSupport() const override
index 6e1e9d9..bce91ab 100644 (file)
@@ -88,8 +88,10 @@ bool IsMatchingStride(uint32_t actualStride)
     return IsMatchingStride<FirstStride>(actualStride) || IsMatchingStride<SecondStride, ValidStrides...>(actualStride);
 }
 
-bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported)
+template<typename ... Args>
+bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
 {
+    boost::ignore_unused(reasonIfUnsupported, (args)...);
 #if defined(ARMCOMPUTECL_ENABLED)
     return true;
 #else
@@ -124,7 +126,7 @@ inline bool IsWorkloadSupported(FuncType&& func, Optional<std::string&> reasonIf
     return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
 #else
 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
-    return IsClBackendSupported(reasonIfUnsupported);
+    return IsClBackendSupported(reasonIfUnsupported, __VA_ARGS__);
 #endif
 
 template<typename FloatFunc, typename Uint8Func, typename ... Params>
@@ -461,7 +463,7 @@ bool ClLayerSupport::IsGreaterSupported(const TensorInfo& input0,
 bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
                                       Optional<std::string&> reasonIfUnsupported) const
 {
-    return IsClBackendSupported(reasonIfUnsupported);
+    return IsClBackendSupported(reasonIfUnsupported, input);
 }
 
 bool ClLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
@@ -579,7 +581,7 @@ bool ClLayerSupport::IsNormalizationSupported(const TensorInfo& input,
 bool ClLayerSupport::IsOutputSupported(const TensorInfo& output,
                                        Optional<std::string&> reasonIfUnsupported) const
 {
-    return IsClBackendSupported(reasonIfUnsupported);
+    return IsClBackendSupported(reasonIfUnsupported, output);
 }
 
 bool ClLayerSupport::IsPadSupported(const TensorInfo& input,
@@ -758,6 +760,7 @@ bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
                                        *splitAxis.begin());
     }
 #endif
+    boost::ignore_unused(descriptor);
     for (auto output : outputs)
     {
         if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
index 531f371..4746167 100644 (file)
@@ -23,6 +23,7 @@
 #include <arm_compute/runtime/CL/CLBufferAllocator.h>
 #include <arm_compute/runtime/CL/CLScheduler.h>
 
+#include <boost/core/ignore_unused.hpp>
 #include <boost/polymorphic_cast.hpp>
 #include <boost/format.hpp>
 
@@ -84,6 +85,7 @@ ClWorkloadFactory::ClWorkloadFactory(const std::shared_ptr<ClMemoryManager>& mem
 std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
                                                                      const bool IsMemoryManaged) const
 {
+    boost::ignore_unused(IsMemoryManaged);
     std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo);
     tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
 
@@ -94,6 +96,7 @@ std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const Tenso
                                                                      DataLayout dataLayout,
                                                                      const bool IsMemoryManaged) const
 {
+    boost::ignore_unused(IsMemoryManaged);
     std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo, dataLayout);
     tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
 
index 57552d7..ee3c114 100644 (file)
@@ -8,6 +8,8 @@
 #include <string>
 #include <sstream>
 
+#include <boost/core/ignore_unused.hpp>
+
 namespace armnn
 {
 
@@ -29,6 +31,7 @@ void OpenClTimer::Start()
                                 const cl_event * event_wait_list,
                                 cl_event *       event)
         {
+            boost::ignore_unused(event);
             cl_int retVal = 0;
 
             // Get the name of the kernel
index 3fc3233..c79aa78 100644 (file)
@@ -66,8 +66,10 @@ namespace armnn
 namespace
 {
 
-bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported)
+template< typename ... Args>
+bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
 {
+    boost::ignore_unused((args)...);
 #if defined(ARMCOMPUTENEON_ENABLED)
     return true;
 #else
@@ -111,7 +113,7 @@ inline bool IsWorkloadSupported(FuncType& func, Optional<std::string&> reasonIfU
     return IsWorkloadSupported(func, reasonIfUnsupported, __VA_ARGS__);
 #else
 #define FORWARD_WORKLOAD_VALIDATE_FUNC(func, reasonIfUnsupported, ...) \
-    return IsNeonBackendSupported(reasonIfUnsupported);
+    return IsNeonBackendSupported(reasonIfUnsupported, __VA_ARGS__);
 #endif
 
 #if defined(ARMCOMPUTENEON_ENABLED)
@@ -427,7 +429,7 @@ bool NeonLayerSupport::IsGreaterSupported(const armnn::TensorInfo& input0,
 bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
-    return IsNeonBackendSupported(reasonIfUnsupported);
+    return IsNeonBackendSupported(reasonIfUnsupported, input);
 }
 
 bool NeonLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
@@ -545,7 +547,7 @@ bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
 bool NeonLayerSupport::IsOutputSupported(const TensorInfo& output,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
-    return IsNeonBackendSupported(reasonIfUnsupported);
+    return IsNeonBackendSupported(reasonIfUnsupported, output);
 }
 
 bool NeonLayerSupport::IsPadSupported(const TensorInfo& input,
@@ -721,6 +723,7 @@ bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
                                        *splitAxis.begin());
     }
 #endif
+    boost::ignore_unused(descriptor);
     for (auto output : outputs)
     {
         if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
index 19b7615..ebcd1f6 100644 (file)
@@ -648,6 +648,8 @@ bool RefLayerSupport::IsDetectionPostProcessSupported(const TensorInfo& boxEncod
                                                       const DetectionPostProcessDescriptor& descriptor,
                                                       Optional<std::string&> reasonIfUnsupported) const
 {
+    boost::ignore_unused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
+
     bool supported = true;
 
     std::array<DataType,3> supportedInputTypes =
@@ -863,8 +865,8 @@ bool RefLayerSupport::IsGreaterSupported(const TensorInfo& input0,
                                  reasonIfUnsupported);
 }
 
-bool RefLayerSupport::IsInputSupported(const TensorInfo& input,
-                                       Optional<std::string&> reasonIfUnsupported) const
+bool RefLayerSupport::IsInputSupported(const TensorInfo& /*input*/,
+                                       Optional<std::string&> /*reasonIfUnsupported*/) const
 {
     return true;
 }
@@ -1301,8 +1303,8 @@ bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input,
     return supported;
 }
 
-bool RefLayerSupport::IsOutputSupported(const TensorInfo& output,
-                                        Optional<std::string&> reasonIfUnsupported) const
+bool RefLayerSupport::IsOutputSupported(const TensorInfo& /*output*/,
+                                        Optional<std::string&> /*reasonIfUnsupported*/) const
 {
     return true;
 }
@@ -1470,6 +1472,7 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input,
                                         const ResizeDescriptor& descriptor,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
+    boost::ignore_unused(descriptor);
     bool supported = true;
     std::array<DataType,4> supportedTypes =
     {
@@ -1524,7 +1527,7 @@ bool RefLayerSupport::IsSliceSupported(const TensorInfo& input,
                                        const SliceDescriptor& descriptor,
                                        Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
+    boost::ignore_unused(descriptor);
     bool supported = true;
 
     std::array<DataType, 3> supportedTypes =
@@ -1551,7 +1554,7 @@ bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
                                          const SoftmaxDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(output);
+    boost::ignore_unused(descriptor);
     bool supported = true;
     std::array<DataType,4> supportedTypes =
     {
@@ -1578,7 +1581,7 @@ bool RefLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
                                                 const SpaceToBatchNdDescriptor& descriptor,
                                                 Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(output);
+    boost::ignore_unused(descriptor);
     bool supported = true;
     std::array<DataType,4> supportedTypes =
     {
@@ -1811,6 +1814,7 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
                                                         const Optional<TensorInfo>& biases,
                                                         Optional<std::string&> reasonIfUnsupported) const
 {
+    boost::ignore_unused(descriptor);
     bool supported = true;
 
     std::array<DataType,4> supportedTypes =
index 8d044ee..dffb13d 100644 (file)
@@ -77,19 +77,21 @@ bool RefWorkloadFactory::IsLayerSupported(const Layer& layer,
 }
 
 std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
-                                                                      const bool IsMemoryManaged) const
+                                                                      const bool isMemoryManaged) const
 {
     // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
     // to unmanaged memory. This also ensures memory alignment.
+    boost::ignore_unused(isMemoryManaged);
     return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
 }
 
 std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
                                                                       DataLayout dataLayout,
-                                                                      const bool IsMemoryManaged) const
+                                                                      const bool isMemoryManaged) const
 {
     // For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
     // to unmanaged memory. This also ensures memory alignment.
+    boost::ignore_unused(isMemoryManaged, dataLayout);
     return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
 }
 
@@ -218,6 +220,7 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDivision(const DivisionQueu
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const
 {
+    boost::ignore_unused(descriptor);
     ComparisonQueueDescriptor comparisonDescriptor;
     comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Equal;
 
@@ -253,6 +256,7 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGather(const GatherQueueDes
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
                                                              const WorkloadInfo& info) const
 {
+    boost::ignore_unused(descriptor);
     ComparisonQueueDescriptor comparisonDescriptor;
     comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Greater;
 
@@ -410,8 +414,8 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePooling2d(const Pooling2dQu
     return std::make_unique<RefPooling2dWorkload>(descriptor, info);
 }
 
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
-                                                                 const WorkloadInfo& info) const
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& /*descriptor*/,
+                                                                 const WorkloadInfo& /*info*/) const
 {
     return nullptr;
 }
index b49a6dd..10e5b9f 100644 (file)
@@ -25,6 +25,7 @@ struct WorkloadFactoryHelper<armnn::RefWorkloadFactory>
     static armnn::RefWorkloadFactory GetFactory(
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr)
     {
+        boost::ignore_unused(memoryManager);
         return armnn::RefWorkloadFactory();
     }
 };
index 76616f1..db85b95 100644 (file)
@@ -15,6 +15,8 @@ namespace armnn
 void ArgMinMax(Decoder<float>& in, int32_t* out, const TensorInfo& inputTensorInfo,
                const TensorInfo& outputTensorInfo, ArgMinMaxFunction function, int axis)
 {
+    boost::ignore_unused(outputTensorInfo);
+
     unsigned int uAxis = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
 
     const unsigned int outerElements = armnnUtils::GetNumElementsBetween(inputTensorInfo.GetShape(), 0, uAxis);
index fafc03e..4025e8d 100644 (file)
@@ -5,6 +5,7 @@
 
 #include "Dequantize.hpp"
 
+#include <boost/core/ignore_unused.hpp>
 namespace armnn
 {
 
@@ -13,6 +14,7 @@ void Dequantize(Decoder<float>& inputDecoder,
                 const TensorInfo& inputInfo,
                 const TensorInfo& outputInfo)
 {
+    boost::ignore_unused(outputInfo);
     BOOST_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
     for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
     {
index d475dd8..3b384f1 100644 (file)
@@ -155,6 +155,8 @@ void DetectionPostProcess(const TensorInfo& boxEncodingsInfo,
                           float* detectionScores,
                           float* numDetections)
 {
+    boost::ignore_unused(anchorsInfo, detectionClassesInfo, detectionScoresInfo, numDetectionsInfo);
+
     // Transform center-size format which is (ycenter, xcenter, height, width) to box-corner format,
     // which represents the lower left corner and the upper right corner (ymin, xmin, ymax, xmax)
     std::vector<float> boxCorners(boxEncodingsInfo.GetNumElements());
index c848a7c..5416855 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <backendsCommon/WorkloadData.hpp>
 
+#include <boost/core/ignore_unused.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 namespace armnn
@@ -21,6 +22,7 @@ void Gather(const TensorInfo& paramsInfo,
             const int32_t* indices,
             Encoder<float>& output)
 {
+    boost::ignore_unused(outputInfo);
     const TensorShape& paramsShape = paramsInfo.GetShape();
 
     unsigned int paramsProduct = 1;
index ea8f4ee..8ff2eb4 100644 (file)
@@ -84,7 +84,7 @@ namespace
         {
             case PoolingAlgorithm::Max:
             {
-                return [](float & accumulated, float kernelSize) {};
+                return [](float & /*accumulated*/, float /*kernelSize*/) {};
             }
 
             case PoolingAlgorithm::Average: