This commit enables compile option Wall/Wextra/Werror for backend modules.
Signed-off-by: sjsujinkim <sjsujin.kim@samsung.com>
public:
template <typename Callable> IndexIterator &iter(Callable cb)
{
- for (uint32_t nth = 0; nth < _shape.N; ++nth)
+ for (int32_t nth = 0; nth < _shape.N; ++nth)
{
- for (uint32_t ch = 0; ch < _shape.C; ++ch)
+ for (int32_t ch = 0; ch < _shape.C; ++ch)
{
- for (uint32_t row = 0; row < _shape.H; ++row)
+ for (int32_t row = 0; row < _shape.H; ++row)
{
- for (uint32_t col = 0; col < _shape.W; ++col)
+ for (int32_t col = 0; col < _shape.W; ++col)
{
cb(nth, ch, row, col);
}
target_link_libraries(${LIB_NEURUN_BACKEND_ACL_CL} nnfw_support_nnapi)
target_link_libraries(${LIB_NEURUN_BACKEND_ACL_CL} ${LIB_NEURUN_KERNEL_ACL_CL})
+target_compile_options(${LIB_NEURUN_BACKEND_ACL_CL} PRIVATE -Wall -Wextra -Werror)
+
set_target_properties(${LIB_NEURUN_BACKEND_ACL_CL} PROPERTIES POSITION_INDEPENDENT_CODE ON)
set_target_properties(${LIB_NEURUN_BACKEND_ACL_CL} PROPERTIES OUTPUT_NAME backend_acl_cl)
install(TARGETS ${LIB_NEURUN_BACKEND_ACL_CL} DESTINATION lib/neurun)
const auto bias_size = _ctx.at(bias_index).shape().asVector();
return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
- for (uint32_t n = 0; n < bias_size; ++n)
+ for (int32_t n = 0; n < bias_size; ++n)
{
const ::arm_compute::Coordinates coordinate{n};
const auto bias_size = _ctx.at(bias_index).shape().asVector();
return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
- for (uint32_t n = 0; n < bias_size; ++n)
+ for (int32_t n = 0; n < bias_size; ++n)
{
const ::arm_compute::Coordinates coordinate{n};
const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
- const auto bias_size = _ctx.at(bias_index).shape().asVector();
const PaddingCode padding_type =
static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
};
}
-Stage StageGenerator::generate(const graph::operation::NOP::Node &node)
+Stage StageGenerator::generate(const graph::operation::NOP::Node & /* node */)
{
// DO NOTHING
return nullptr;
target_link_libraries(${LIB_NEURUN_BACKEND_CPU} nnfw_support_nnapi)
target_link_libraries(${LIB_NEURUN_BACKEND_CPU} ${LIB_NEURUN_KERNEL_CPU})
+target_compile_options(${LIB_NEURUN_BACKEND_CPU} PRIVATE -Wall -Wextra -Werror)
+
set_target_properties(${LIB_NEURUN_BACKEND_CPU} PROPERTIES POSITION_INDEPENDENT_CODE ON)
set_target_properties(${LIB_NEURUN_BACKEND_CPU} PROPERTIES OUTPUT_NAME backend_cpu)
install(TARGETS ${LIB_NEURUN_BACKEND_CPU} DESTINATION lib/neurun)
const auto bias_size = _ctx.at(bias_index).shape().asVector();
return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
- for (uint32_t n = 0; n < bias_size; ++n)
+ for (int32_t n = 0; n < bias_size; ++n)
{
const ::arm_compute::Coordinates coordinate{n};
case ::neurun::graph::operand::DataType::TENSOR_FLOAT32:
{
return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
- for (uint32_t n = 0; n < bias_size; ++n)
+ for (int32_t n = 0; n < bias_size; ++n)
{
const ::arm_compute::Coordinates coordinate{n};
case ::neurun::graph::operand::DataType::TENSOR_QUANT8_ASYMM:
{
return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
- for (uint32_t n = 0; n < bias_size; ++n)
+ for (int32_t n = 0; n < bias_size; ++n)
{
const ::arm_compute::Coordinates coordinate{n};
};
}
-Stage StageGenerator::generate(const graph::operation::NOP::Node &node)
+Stage StageGenerator::generate(const graph::operation::NOP::Node & /* node */)
{
// DO NOTHING
return nullptr;