[neurun] Enable compile option Wall/Wextra/Werror (#2517)
author이한종/동작제어Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Thu, 30 Aug 2018 07:51:27 +0000 (16:51 +0900)
committer이춘석/동작제어Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>
Thu, 30 Aug 2018 07:51:27 +0000 (16:51 +0900)
To keep our code clean, let us introduce those compile options.

- Wall : Enable most(not exactly all) of compiler warnings
- Wextra : Enable extra compiler warnings
- Werror : Treat warnings as error

Signed-off-by: Hanjoung Lee <hanjoung.lee@samsung.com>
include/util/feature/IndexIterator.h
runtimes/neurun/CMakeLists.txt
runtimes/neurun/src/codegen/Planner.cc
runtimes/neurun/src/frontend/compilation.cc
runtimes/neurun/src/frontend/execution.cc
runtimes/neurun/src/graph/operand/Set.cc
runtimes/neurun/src/internal/Sink.h
runtimes/neurun/src/internal/Source.h

index f076b38..e2a7196 100644 (file)
@@ -37,13 +37,13 @@ public:
 public:
   template <typename Callable> IndexIterator &iter(Callable cb)
   {
-    for (uint32_t batch = 0; batch < _shape.N; ++batch)
+    for (int32_t batch = 0; batch < _shape.N; ++batch)
     {
-      for (uint32_t ch = 0; ch < _shape.C; ++ch)
+      for (int32_t ch = 0; ch < _shape.C; ++ch)
       {
-        for (uint32_t row = 0; row < _shape.H; ++row)
+        for (int32_t row = 0; row < _shape.H; ++row)
         {
-          for (uint32_t col = 0; col < _shape.W; ++col)
+          for (int32_t col = 0; col < _shape.W; ++col)
           {
             cb(batch, ch, row, col);
           }
index 3dc0d03..ebdfcad 100644 (file)
@@ -39,6 +39,8 @@ target_link_libraries(${LIB_NEURUN} nnfw_support_nnapi)
 target_link_libraries(${LIB_NEURUN} ${LIB_NEURUN_BACKEND_CPU})
 target_link_libraries(${LIB_NEURUN} ${LIB_NEURUN_BACKEND_ACL_CL})
 
+target_compile_options(${LIB_NEURUN} PRIVATE -Wall -Wextra -Werror)
+
 set_target_properties(${LIB_NEURUN} PROPERTIES OUTPUT_NAME neuralnetworks)
 
 install(TARGETS ${LIB_NEURUN} DESTINATION lib/neurun)
index 526b5b6..0e7f8c4 100644 (file)
@@ -92,8 +92,6 @@ void Planner::visit(const graph::operation::Concat::Node &node)
   _builder.addShapeConstr(ofm_index, ::internal::asTensorInfo(ofm_shape));
 
   // Set Shape Constraints (for input)
-  uint32_t depth = 0;
-
   for (const auto &index : node.getInputs().list())
   {
     const ::neurun::graph::operand::Index ifm_index{index};
@@ -204,7 +202,7 @@ void Planner::visit(const graph::operation::Softmax::Node &node)
   _builder.addStage(stage_gen->generate(node));
 }
 
-void Planner::visit(const graph::operation::NOP::Node &node)
+void Planner::visit(const graph::operation::NOP::Node & /* node */)
 {
   // DO NOTHING
   // TODO : It's just for graph manipulation test now, it should be added tensor copy stage later.
index af14604..9f3a986 100644 (file)
@@ -45,7 +45,7 @@ void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation *compilation)
 }
 
 int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation *compilation,
-                                             int32_t preference)
+                                             int32_t /* preference */)
 {
   if (compilation == nullptr)
   {
index eb34203..98c502f 100644 (file)
@@ -34,8 +34,8 @@ int ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation,
 }
 
 int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32_t index,
-                                      const ANeuralNetworksOperandType *type, const void *buffer,
-                                      size_t length)
+                                      const ANeuralNetworksOperandType * /* type */,
+                                      const void *buffer, size_t length)
 {
   // Don't check type
   // Comment about ANeuralNetworksOperandType in NeuralNetworks.h:
@@ -80,7 +80,7 @@ int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32
 }
 
 int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int32_t index,
-                                       const ANeuralNetworksOperandType *type, void *buffer,
+                                       const ANeuralNetworksOperandType * /* type */, void *buffer,
                                        size_t length)
 {
   // Don't check type
@@ -184,12 +184,13 @@ int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution,
   return ANEURALNETWORKS_NO_ERROR;
 }
 
-void ANeuralNetworksExecution_free(ANeuralNetworksExecution *execution) {}
+void ANeuralNetworksExecution_free(ANeuralNetworksExecution * /* execution */) {}
 
-int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution *execution, int32_t index,
-                                                const ANeuralNetworksOperandType *type,
-                                                const ANeuralNetworksMemory *memory, size_t offset,
-                                                size_t length)
+int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution *execution,
+                                                int32_t /* index */,
+                                                const ANeuralNetworksOperandType * /* type */,
+                                                const ANeuralNetworksMemory *memory,
+                                                size_t /* offset */, size_t /* length */)
 {
   if ((execution == nullptr) || (memory == nullptr))
   {
@@ -200,10 +201,11 @@ int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution *execut
   return ANEURALNETWORKS_NO_ERROR;
 }
 
-int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution *execution, int32_t index,
-                                                 const ANeuralNetworksOperandType *type,
-                                                 const ANeuralNetworksMemory *memory, size_t offset,
-                                                 size_t length)
+int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution *execution,
+                                                 int32_t /* index */,
+                                                 const ANeuralNetworksOperandType * /* type */,
+                                                 const ANeuralNetworksMemory *memory,
+                                                 size_t /* offset */, size_t /* length */)
 {
   if ((execution == nullptr) || (memory == nullptr))
   {
index 0e107b9..c5cc17b 100644 (file)
@@ -29,7 +29,7 @@ const Object &Set::at(const Index &index) const { return *(_objects.at(index.asI
 
 Object &Set::at(const Index &index) { return *(_objects.at(index.asInt())); }
 
-bool Set::exist(const Index &index) const { return index.asInt() < _objects.size(); }
+bool Set::exist(const Index &index) const { return index.value() < _objects.size(); }
 
 } // namespace operand
 } // namespace graph
index e300d03..cf0de1c 100644 (file)
@@ -27,6 +27,7 @@ class VectorSink final : public Sink
 public:
   VectorSink(const int32_t vlen, uint8_t *base, const size_t size) : _vlen{vlen}, _base{base}
   {
+    (void)size; // Workaround for unused variable in release mode
     assert(size >= _vlen * sizeof(float));
   }
 
index 7d626fd..b3cad38 100644 (file)
@@ -30,6 +30,7 @@ public:
   VectorSource(const int32_t vlen, const uint8_t *base, const size_t size)
       : _vlen{vlen}, _base{base}
   {
+    (void)size; // Workaround for unused variable in release mode
     assert(size >= _vlen * sizeof(float));
   }