[neurun] Enable compile option Wall/Wextra/Werror for backend modules (#2604)
author김수진/동작제어Lab(SR)/Engineer/삼성전자 <sjsujin.kim@samsung.com>
Wed, 5 Sep 2018 08:24:12 +0000 (17:24 +0900)
committer오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Wed, 5 Sep 2018 08:24:12 +0000 (17:24 +0900)
This commit enables compile option Wall/Wextra/Werror for backend modules.

Signed-off-by: sjsujinkim <sjsujin.kim@samsung.com>
include/util/kernel/IndexIterator.h
runtimes/neurun/src/backend/acl_cl/CMakeLists.txt
runtimes/neurun/src/backend/acl_cl/InitializerGenerator.cc
runtimes/neurun/src/backend/acl_cl/StageGenerator.cc
runtimes/neurun/src/backend/cpu/CMakeLists.txt
runtimes/neurun/src/backend/cpu/InitializerGenerator.cc
runtimes/neurun/src/backend/cpu/StageGenerator.cc

index 7dab7d3..d016564 100644 (file)
@@ -37,13 +37,13 @@ public:
 public:
   template <typename Callable> IndexIterator &iter(Callable cb)
   {
-    for (uint32_t nth = 0; nth < _shape.N; ++nth)
+    for (int32_t nth = 0; nth < _shape.N; ++nth)
     {
-      for (uint32_t ch = 0; ch < _shape.C; ++ch)
+      for (int32_t ch = 0; ch < _shape.C; ++ch)
       {
-        for (uint32_t row = 0; row < _shape.H; ++row)
+        for (int32_t row = 0; row < _shape.H; ++row)
         {
-          for (uint32_t col = 0; col < _shape.W; ++col)
+          for (int32_t col = 0; col < _shape.W; ++col)
           {
             cb(nth, ch, row, col);
           }
index 2a9fd66..d64c23a 100644 (file)
@@ -10,6 +10,8 @@ target_link_libraries(${LIB_NEURUN_BACKEND_ACL_CL} arm_compute)
 target_link_libraries(${LIB_NEURUN_BACKEND_ACL_CL} nnfw_support_nnapi)
 target_link_libraries(${LIB_NEURUN_BACKEND_ACL_CL} ${LIB_NEURUN_KERNEL_ACL_CL})
 
+target_compile_options(${LIB_NEURUN_BACKEND_ACL_CL} PRIVATE -Wall -Wextra -Werror)
+
 set_target_properties(${LIB_NEURUN_BACKEND_ACL_CL} PROPERTIES POSITION_INDEPENDENT_CODE ON)
 set_target_properties(${LIB_NEURUN_BACKEND_ACL_CL} PROPERTIES OUTPUT_NAME backend_acl_cl)
 install(TARGETS ${LIB_NEURUN_BACKEND_ACL_CL} DESTINATION lib/neurun)
index 5804827..bf9876c 100644 (file)
@@ -87,7 +87,7 @@ Initializer InitializerGenerator::generateBias(const graph::operation::Conv2D::I
   const auto bias_size = _ctx.at(bias_index).shape().asVector();
 
   return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
-    for (uint32_t n = 0; n < bias_size; ++n)
+    for (int32_t n = 0; n < bias_size; ++n)
     {
       const ::arm_compute::Coordinates coordinate{n};
 
@@ -109,7 +109,7 @@ Initializer InitializerGenerator::generateBias(const graph::operation::FullyConn
   const auto bias_size = _ctx.at(bias_index).shape().asVector();
 
   return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
-    for (uint32_t n = 0; n < bias_size; ++n)
+    for (int32_t n = 0; n < bias_size; ++n)
     {
       const ::arm_compute::Coordinates coordinate{n};
 
index 2fda73f..ca76447 100644 (file)
@@ -120,7 +120,6 @@ Stage StageGenerator::generate(const graph::operation::Conv2D::Implicit::Node &n
   const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
   const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
   const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
-  const auto bias_size = _ctx.at(bias_index).shape().asVector();
 
   const PaddingCode padding_type =
       static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
@@ -512,7 +511,7 @@ Stage StageGenerator::generate(const graph::operation::Softmax::Node &node)
   };
 }
 
-Stage StageGenerator::generate(const graph::operation::NOP::Node &node)
+Stage StageGenerator::generate(const graph::operation::NOP::Node & /* node */)
 {
   // DO NOTHING
   return nullptr;
index ba4efb1..95e9af6 100644 (file)
@@ -12,6 +12,8 @@ target_link_libraries(${LIB_NEURUN_BACKEND_CPU} nnfw_util)
 target_link_libraries(${LIB_NEURUN_BACKEND_CPU} nnfw_support_nnapi)
 target_link_libraries(${LIB_NEURUN_BACKEND_CPU} ${LIB_NEURUN_KERNEL_CPU})
 
+target_compile_options(${LIB_NEURUN_BACKEND_CPU} PRIVATE -Wall -Wextra -Werror)
+
 set_target_properties(${LIB_NEURUN_BACKEND_CPU} PROPERTIES POSITION_INDEPENDENT_CODE ON)
 set_target_properties(${LIB_NEURUN_BACKEND_CPU} PROPERTIES OUTPUT_NAME backend_cpu)
 install(TARGETS ${LIB_NEURUN_BACKEND_CPU} DESTINATION lib/neurun)
index 05d2cc1..c524518 100644 (file)
@@ -124,7 +124,7 @@ Initializer InitializerGenerator::generateBias(const graph::operation::Conv2D::I
   const auto bias_size = _ctx.at(bias_index).shape().asVector();
 
   return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
-    for (uint32_t n = 0; n < bias_size; ++n)
+    for (int32_t n = 0; n < bias_size; ++n)
     {
       const ::arm_compute::Coordinates coordinate{n};
 
@@ -151,7 +151,7 @@ Initializer InitializerGenerator::generateBias(const graph::operation::FullyConn
     case ::neurun::graph::operand::DataType::TENSOR_FLOAT32:
     {
       return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
-        for (uint32_t n = 0; n < bias_size; ++n)
+        for (int32_t n = 0; n < bias_size; ++n)
         {
           const ::arm_compute::Coordinates coordinate{n};
 
@@ -167,7 +167,7 @@ Initializer InitializerGenerator::generateBias(const graph::operation::FullyConn
     case ::neurun::graph::operand::DataType::TENSOR_QUANT8_ASYMM:
     {
       return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
-        for (uint32_t n = 0; n < bias_size; ++n)
+        for (int32_t n = 0; n < bias_size; ++n)
         {
           const ::arm_compute::Coordinates coordinate{n};
 
index def1e3c..6488a8d 100644 (file)
@@ -509,7 +509,7 @@ Stage StageGenerator::generate(const graph::operation::Softmax::Node &node)
   };
 }
 
-Stage StageGenerator::generate(const graph::operation::NOP::Node &node)
+Stage StageGenerator::generate(const graph::operation::NOP::Node & /* node */)
 {
   // DO NOTHING
   return nullptr;