IVGCVSW-4246 Enable -Wextra by default
authorDerek Lamberti <derek.lamberti@arm.com>
Tue, 10 Dec 2019 22:20:54 +0000 (22:20 +0000)
committerDerek Lamberti <derek.lamberti@arm.com>
Tue, 7 Jan 2020 16:13:46 +0000 (16:13 +0000)
!referencetests:218340

Change-Id: If24a604310d0363b1f09b406e4d53ebfeb106aad
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
13 files changed:
CMakeLists.txt
cmake/GlobalConfig.cmake
src/armnn/CompatibleTypes.hpp
src/armnn/LayerSupportCommon.hpp
src/armnn/test/CreateWorkload.hpp
src/armnnTfParser/test/Split.cpp
src/armnnUtils/TensorUtils.cpp
src/backends/cl/ClWorkloadFactory.cpp
src/backends/cl/ClWorkloadFactory.hpp
src/backends/neon/NeonWorkloadFactory.cpp
src/backends/neon/NeonWorkloadFactory.hpp
src/backends/reference/RefMemoryManager.cpp
tests/DeepSpeechV1InferenceTest.hpp

index d0f3d7a..d268983 100644 (file)
@@ -133,7 +133,7 @@ if(BUILD_TF_PARSER)
         )
     # The generated tensorflow protobuf .cc files are not warning clean and we can't fix them.
     if(COMPILER_IS_GNU_LIKE)
-        set_source_files_properties(${TF_PROTOBUFS} PROPERTIES COMPILE_FLAGS "-Wno-conversion -Wno-sign-conversion")
+        set_source_files_properties(${TF_PROTOBUFS} PROPERTIES COMPILE_FLAGS "-Wno-unused-variable -Wno-unused-parameter -Wno-conversion -Wno-sign-conversion")
     endif()
 
     add_library_ex(armnnTfParser SHARED ${armnn_tf_parser_sources})
index 4a2c026..ccf0ecc 100644 (file)
@@ -55,7 +55,7 @@ endif()
 # Compiler flags that are always set
 set(CMAKE_POSITION_INDEPENDENT_CODE ON)
 if(COMPILER_IS_GNU_LIKE)
-    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -Wall -Werror -Wold-style-cast -Wno-missing-braces -Wconversion -Wsign-conversion")
+    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -Wall -Wextra -Werror -Wold-style-cast -Wno-missing-braces -Wconversion -Wsign-conversion")
 elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL MSVC)
        # Disable C4996 (use of deprecated identifier) due to https://developercommunity.visualstudio.com/content/problem/252574/deprecated-compilation-warning-for-virtual-overrid.html
     set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc /MP /wd4996")
index fd33f6c..cc545a9 100644 (file)
@@ -12,7 +12,7 @@ namespace armnn
 {
 
 template<typename T>
-bool CompatibleTypes(DataType dataType)
+bool CompatibleTypes(DataType)
 {
     return false;
 }
index 8fca3d4..557e72a 100644 (file)
@@ -70,6 +70,7 @@ bool FalseFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 template<typename ... Params>
 bool FalseFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
+    boost::ignore_unused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type");
     return false;
 }
@@ -77,6 +78,7 @@ bool FalseFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params
 template<typename ... Params>
 bool FalseFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
+    boost::ignore_unused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type");
     return false;
 }
@@ -84,6 +86,7 @@ bool FalseFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params
 template<typename ... Params>
 bool FalseFuncU8(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
+    boost::ignore_unused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with 8-bit data type");
     return false;
 }
@@ -91,6 +94,7 @@ bool FalseFuncU8(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 template<typename ... Params>
 bool FalseFuncI32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
+    boost::ignore_unused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with int32 data type");
     return false;
 }
@@ -98,6 +102,7 @@ bool FalseFuncI32(Optional<std::string&> reasonIfUnsupported, Params&&... params
 template<typename ... Params>
 bool FalseInputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
+    boost::ignore_unused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type input");
     return false;
 }
@@ -105,6 +110,7 @@ bool FalseInputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... p
 template<typename ... Params>
 bool FalseInputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
+    boost::ignore_unused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type input");
     return false;
 }
@@ -112,6 +118,7 @@ bool FalseInputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... p
 template<typename ... Params>
 bool FalseOutputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
+    boost::ignore_unused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type output");
     return false;
 }
@@ -119,6 +126,7 @@ bool FalseOutputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&...
 template<typename ... Params>
 bool FalseOutputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
 {
+    boost::ignore_unused(params...);
     SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type output");
     return false;
 }
index 4e7967b..f6928f8 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <boost/test/unit_test.hpp>
 #include <boost/cast.hpp>
+#include <boost/core/ignore_unused.hpp>
 
 #include <utility>
 
@@ -1324,6 +1325,8 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
     armnn::Graph& graph,
     bool biasEnabled = false)
 {
+    boost::ignore_unused(graph);
+
     // To create a PreCompiled layer, create a network and Optimize it.
     armnn::Network net;
 
index 10ff04d..d53ae67 100644 (file)
@@ -176,6 +176,7 @@ BOOST_FIXTURE_TEST_CASE(ParseSplit, InputFirstSplitFixture)
 struct SplitLastDimFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
 {
     SplitLastDimFixture(bool withDimZero=false) {
+        boost::ignore_unused(withDimZero);
         m_Prototext = R"(
         node {
           name: "Placeholder"
index 84fc8db..535d68a 100644 (file)
@@ -114,7 +114,6 @@ unsigned int GetNumElementsBetween(const TensorShape& shape,
                                    const unsigned int firstAxisInclusive,
                                    const unsigned int lastAxisExclusive)
 {
-    BOOST_ASSERT(0 <= firstAxisInclusive);
     BOOST_ASSERT(firstAxisInclusive <= lastAxisExclusive);
     BOOST_ASSERT(lastAxisExclusive <= shape.GetNumDimensions());
     unsigned int count = 1;
@@ -141,7 +140,6 @@ unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
 unsigned int GetNumElementsAfter(const armnn::TensorShape& shape, unsigned int axis)
 {
     unsigned int numDim = shape.GetNumDimensions();
-    BOOST_ASSERT(0 >= axis);
     BOOST_ASSERT(axis <= numDim - 1);
     unsigned int count = 1;
     for (unsigned int i = axis; i < numDim; i++)
index 4746167..f9e6632 100644 (file)
@@ -260,13 +260,6 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateEqual(const EqualQueueDescri
     return CreateComparison(comparisonDescriptor, info);
 }
 
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFakeQuantization(
-    const FakeQuantizationQueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
-{
-    return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
-}
-
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
                                                           const WorkloadInfo& info) const
 {
index 8c94818..8f377e9 100644 (file)
@@ -95,9 +95,6 @@ public:
     std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
-    std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
-                                                      const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
index 8d798ec..1cc9e50 100644 (file)
@@ -225,13 +225,6 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateEqual(const EqualQueueDesc
     return CreateComparison(comparisonDescriptor, info);
 }
 
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFakeQuantization(
-    const FakeQuantizationQueueDescriptor& descriptor,
-    const WorkloadInfo& info) const
-{
-    return nullptr;
-}
-
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
                                                             const WorkloadInfo& info) const
 {
@@ -441,12 +434,6 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSoftmax(const SoftmaxQueue
         descriptor, info, m_MemoryManager->GetIntraLayerManager());
 }
 
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
-                                                                     const WorkloadInfo& info) const
-{
-    return nullptr;
-}
-
 std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSpaceToDepth(
     const armnn::SpaceToDepthQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const
 {
index 6bdc237..b76a3a3 100644 (file)
@@ -97,9 +97,6 @@ public:
     std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
-    std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
-                                                      const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
                                            const WorkloadInfo& info) const override;
 
@@ -193,9 +190,6 @@ public:
     std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
-    std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
-                                                    const WorkloadInfo& info) const override;
-
     std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
                                                   const WorkloadInfo& info) const override;
 
index fdd008d..4f15e39 100644 (file)
@@ -88,7 +88,6 @@ void RefMemoryManager::Pool::Reserve(unsigned int numBytes)
 void RefMemoryManager::Pool::Acquire()
 {
     BOOST_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Acquire() called when memory already acquired");
-    BOOST_ASSERT(m_Size >= 0);
     m_Pointer = ::operator new(size_t(m_Size));
 }
 
index c46fa57..ac28bbb 100644 (file)
@@ -8,6 +8,7 @@
 #include "DeepSpeechV1Database.hpp"
 
 #include <boost/assert.hpp>
+#include <boost/core/ignore_unused.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 #include <boost/test/tools/floating_point_comparison.hpp>
 
@@ -36,6 +37,7 @@ public:
 
     TestCaseResult ProcessResult(const InferenceTestOptions& options) override
     {
+        boost::ignore_unused(options);
         const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // logits
         BOOST_ASSERT(output1.size() == k_OutputSize1);