From 82266dba3ffc94f00533a3a4b8244a7902865dac Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=98=A4=ED=98=95=EC=84=9D/On-Device=20Lab=28SR=29/Staff?= =?utf8?q?=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Tue, 15 Jan 2019 09:18:18 +0900 Subject: [PATCH] Use more strict compile option for tflite library (#4196) Use more strict compile option for tflite library Remove use of size_t as possible Signed-off-by: Hyeongseok Oh --- libs/tflite/CMakeLists.txt | 1 + libs/tflite/include/tflite/TensorLogger.h | 2 +- libs/tflite/include/tflite/ext/kernels/CustomOps.h | 18 ++++++++------- libs/tflite/src/ext/kernels/Abs.cpp | 4 ++-- libs/tflite/src/ext/kernels/SquaredDifference.cpp | 7 ++---- libs/tflite/src/ext/kernels/TensorFlowMax.cpp | 16 ++++++------- libs/tflite/src/ext/kernels/TensorFlowSum.cpp | 16 ++++++------- libs/tflite/src/ext/nnapi_delegate.cpp | 27 +++++++++++----------- .../nnapi_delegate_ex_AddOpsAndParams_lambda.inc | 11 --------- 9 files changed, 46 insertions(+), 56 deletions(-) diff --git a/libs/tflite/CMakeLists.txt b/libs/tflite/CMakeLists.txt index e844d1c..a9351e5 100644 --- a/libs/tflite/CMakeLists.txt +++ b/libs/tflite/CMakeLists.txt @@ -7,6 +7,7 @@ set_target_properties(nnfw_lib_tflite PROPERTIES POSITION_INDEPENDENT_CODE ON) target_include_directories(nnfw_lib_tflite PUBLIC ${NNFW_INCLUDE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/include) target_link_libraries(nnfw_lib_tflite tensorflow-lite ${LIB_PTHREAD} dl) target_link_libraries(nnfw_lib_tflite nnfw_lib_misc) +target_compile_options(nnfw_lib_tflite PRIVATE -Wall -Wextra -Werror) add_executable(nnfw_lib_tflite_test_TensorView src/TensorView.test.cpp) target_link_libraries(nnfw_lib_tflite_test_TensorView nnfw_lib_tflite) diff --git a/libs/tflite/include/tflite/TensorLogger.h b/libs/tflite/include/tflite/TensorLogger.h index e56a76b..a878dfe 100644 --- a/libs/tflite/include/tflite/TensorLogger.h +++ b/libs/tflite/include/tflite/TensorLogger.h @@ -132,7 +132,7 @@ private: { _outfile << "tensor_shape_gen.append('{"; - size_t r = 0; + int r = 0; for (; r < tensor->dims->size - 1; r++) { _outfile << tensor->dims->data[r] << ", "; diff --git a/libs/tflite/include/tflite/ext/kernels/CustomOps.h b/libs/tflite/include/tflite/ext/kernels/CustomOps.h index 3f9459b..e5374fc 100644 --- a/libs/tflite/include/tflite/ext/kernels/CustomOps.h +++ b/libs/tflite/include/tflite/ext/kernels/CustomOps.h @@ -36,14 +36,16 @@ namespace tflite namespace custom { -#define REGISTER_FUNCTION(Name) \ - TfLiteRegistration *Register_##Name(void) \ - { \ - static TfLiteRegistration r = { \ - Name::Init##Name, Name::Free##Name, Name::Prepare##Name, Name::Eval##Name, \ - }; \ - r.custom_name = #Name; \ - return &r; \ +#define REGISTER_FUNCTION(Name) \ + TfLiteRegistration *Register_##Name(void) \ + { \ + static TfLiteRegistration r = {}; \ + r.init = Name::Init##Name; \ + r.free = Name::Free##Name; \ + r.prepare = Name::Prepare##Name; \ + r.invoke = Name::Eval##Name; \ + r.custom_name = #Name; \ + return &r; \ } REGISTER_FUNCTION(TensorFlowMax) diff --git a/libs/tflite/src/ext/kernels/Abs.cpp b/libs/tflite/src/ext/kernels/Abs.cpp index 7e9c233..8046f5b 100644 --- a/libs/tflite/src/ext/kernels/Abs.cpp +++ b/libs/tflite/src/ext/kernels/Abs.cpp @@ -29,9 +29,9 @@ namespace custom namespace Abs { -void *InitAbs(TfLiteContext *context, const char *buffer, size_t length) { return nullptr; } +void *InitAbs(TfLiteContext *, const char *, size_t) { return nullptr; } -void FreeAbs(TfLiteContext *context, void *buffer) {} +void FreeAbs(TfLiteContext *, void *) {} TfLiteStatus PrepareAbs(TfLiteContext *context, TfLiteNode *node) { diff --git a/libs/tflite/src/ext/kernels/SquaredDifference.cpp b/libs/tflite/src/ext/kernels/SquaredDifference.cpp index 8ac2b1d..5301ad5 100644 --- a/libs/tflite/src/ext/kernels/SquaredDifference.cpp +++ b/libs/tflite/src/ext/kernels/SquaredDifference.cpp @@ -28,12 +28,9 @@ namespace custom namespace SquaredDifference { -void *InitSquaredDifference(TfLiteContext *context, const char *buffer, size_t length) -{ - return nullptr; -} +void *InitSquaredDifference(TfLiteContext *, const char *, size_t) { return nullptr; } -void FreeSquaredDifference(TfLiteContext *context, void *buffer) {} +void FreeSquaredDifference(TfLiteContext *, void *) {} TfLiteStatus PrepareSquaredDifference(TfLiteContext *context, TfLiteNode *node) { diff --git a/libs/tflite/src/ext/kernels/TensorFlowMax.cpp b/libs/tflite/src/ext/kernels/TensorFlowMax.cpp index d72ad24..5d4e821 100644 --- a/libs/tflite/src/ext/kernels/TensorFlowMax.cpp +++ b/libs/tflite/src/ext/kernels/TensorFlowMax.cpp @@ -41,7 +41,7 @@ struct TensorFlowMaxOp TfLiteTensor *output; }; -void *InitTensorFlowMax(TfLiteContext *context, const char *buffer, size_t length) +void *InitTensorFlowMax(TfLiteContext *context, const char *, size_t) { // Creates two temp tensors to store index and axis for internal // implementation only. @@ -50,7 +50,7 @@ void *InitTensorFlowMax(TfLiteContext *context, const char *buffer, size_t lengt return scratch_tensor_index; } -void FreeTensorFlowMax(TfLiteContext *context, void *buffer) +void FreeTensorFlowMax(TfLiteContext *, void *buffer) { delete static_cast(buffer); } @@ -67,15 +67,15 @@ TfLiteStatus ResizeTempAxis(TfLiteContext *context, TensorFlowMaxOp *op_context, // Resizes output array based on the input size and resolved axis. TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowMaxOp *op_context) { - size_t num_axis = ::tflite::NumElements(op_context->axis); + int64_t num_axis = ::tflite::NumElements(op_context->axis); TfLiteIntArray *input_dims = op_context->input->dims; int input_num_dims = ::tflite::NumDimensions(op_context->input); const int *axis = op_context->axis->data.i32; { // Calculates size of reducing axis. - int num_reduce_axis = num_axis; - for (int i = 0; i < num_axis; ++i) + int64_t num_reduce_axis = num_axis; + for (int64_t i = 0; i < num_axis; ++i) { int current = axis[i]; if (current < 0) @@ -83,7 +83,7 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowMaxOp *op_cont current += input_num_dims; } TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims); - for (int j = 0; j < i; ++j) + for (int64_t j = 0; j < i; ++j) { int previous = axis[j]; if (previous < 0) @@ -105,7 +105,7 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowMaxOp *op_cont if (input_num_dims == output_num_dims) { TfLiteIntArray *output_dims = TfLiteIntArrayCopy(input_dims); - for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) + for (int64_t axis_idx = 0; axis_idx < num_axis; ++axis_idx) { int current = axis[axis_idx]; output_dims->data[current] = 1; @@ -279,7 +279,7 @@ template inline TfLiteStatus CustomMax(TfLiteContext *context, T *input_data, const int *input_dims, const int input_num_dims, T *output_data, const int *output_dims, const int output_num_dims, const int *axis, - const int num_axis_dimensions, bool keep_dims, int *temp_index, int *resolved_axis) + const int num_axis_dimensions, bool /*keep_dims*/, int *temp_index, int *resolved_axis) { // resolves axis. int num_resolved_axis = 0; diff --git a/libs/tflite/src/ext/kernels/TensorFlowSum.cpp b/libs/tflite/src/ext/kernels/TensorFlowSum.cpp index cbf9797..b6d5f28 100644 --- a/libs/tflite/src/ext/kernels/TensorFlowSum.cpp +++ b/libs/tflite/src/ext/kernels/TensorFlowSum.cpp @@ -41,7 +41,7 @@ struct TensorFlowSumOp TfLiteTensor *output; }; -void *InitTensorFlowSum(TfLiteContext *context, const char *buffer, size_t length) +void *InitTensorFlowSum(TfLiteContext *context, const char *, size_t) { // Creates two temp tensors to store index and axis for internal // implementation only. @@ -50,7 +50,7 @@ void *InitTensorFlowSum(TfLiteContext *context, const char *buffer, size_t lengt return scratch_tensor_index; } -void FreeTensorFlowSum(TfLiteContext *context, void *buffer) +void FreeTensorFlowSum(TfLiteContext *, void *buffer) { delete static_cast(buffer); } @@ -67,15 +67,15 @@ TfLiteStatus ResizeTempAxis(TfLiteContext *context, TensorFlowSumOp *op_context, // Resizes output array based on the input size and resolved axis. TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowSumOp *op_context) { - size_t num_axis = ::tflite::NumElements(op_context->axis); + int64_t num_axis = ::tflite::NumElements(op_context->axis); TfLiteIntArray *input_dims = op_context->input->dims; int input_num_dims = ::tflite::NumDimensions(op_context->input); const int *axis = op_context->axis->data.i32; { // Calculates size of reducing axis. - int num_reduce_axis = num_axis; - for (int i = 0; i < num_axis; ++i) + int64_t num_reduce_axis = num_axis; + for (int64_t i = 0; i < num_axis; ++i) { int current = axis[i]; if (current < 0) @@ -83,7 +83,7 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowSumOp *op_cont current += input_num_dims; } TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims); - for (int j = 0; j < i; ++j) + for (int64_t j = 0; j < i; ++j) { int previous = axis[j]; if (previous < 0) @@ -105,7 +105,7 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowSumOp *op_cont if (input_num_dims == output_num_dims) { TfLiteIntArray *output_dims = TfLiteIntArrayCopy(input_dims); - for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) + for (int64_t axis_idx = 0; axis_idx < num_axis; ++axis_idx) { int current = axis[axis_idx]; output_dims->data[current] = 1; @@ -279,7 +279,7 @@ template inline TfLiteStatus CustomSum(TfLiteContext *context, T *input_data, const int *input_dims, const int input_num_dims, T *output_data, const int *output_dims, const int output_num_dims, const int *axis, - const int num_axis_dimensions, bool keep_dims, int *temp_index, int *resolved_axis) + const int num_axis_dimensions, bool /*keep_dims*/, int *temp_index, int *resolved_axis) { // resolves axis. int num_resolved_axis = 0; diff --git a/libs/tflite/src/ext/nnapi_delegate.cpp b/libs/tflite/src/ext/nnapi_delegate.cpp index 25858a7..40ecbf2 100644 --- a/libs/tflite/src/ext/nnapi_delegate.cpp +++ b/libs/tflite/src/ext/nnapi_delegate.cpp @@ -306,7 +306,7 @@ TfLiteStatus AddOpsAndParams( auto add_scalar_int32 = [&nn_model, &augmented_inputs, &next_id](int value) { - ANeuralNetworksOperandType operand_type{.type = ANEURALNETWORKS_INT32}; + ANeuralNetworksOperandType operand_type{}; operand_type.type = ANEURALNETWORKS_INT32; CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type)) CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id, &value, sizeof(int32_t))) @@ -315,7 +315,7 @@ TfLiteStatus AddOpsAndParams( auto add_scalar_float32 = [&nn_model, &augmented_inputs, &next_id](float value) { - ANeuralNetworksOperandType operand_type{.type = ANEURALNETWORKS_FLOAT32}; + ANeuralNetworksOperandType operand_type{}; operand_type.type = ANEURALNETWORKS_FLOAT32; CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type)) CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id, &value, sizeof(float))) @@ -323,10 +323,10 @@ TfLiteStatus AddOpsAndParams( }; auto add_vector_int32 = [&](const int* values, uint32_t num_values) { - ANeuralNetworksOperandType operand_type{ - .type = ANEURALNETWORKS_TENSOR_INT32, - .dimensionCount = 1, - .dimensions = &num_values}; + ANeuralNetworksOperandType operand_type{}; + operand_type.type = ANEURALNETWORKS_TENSOR_INT32; + operand_type.dimensionCount = 1; + operand_type.dimensions = &num_values; CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type)) CHECK_NN(ANeuralNetworksModel_setOperandValue( nn_model, next_id, values, sizeof(int32_t) * num_values)); @@ -480,7 +480,7 @@ TfLiteStatus AddOpsAndParams( auto add_optional_tensors = [&nn_model, &augmented_inputs, &next_id](int nn_type) { for (size_t idx = 0; idx < augmented_inputs.size(); idx++) { - if (augmented_inputs[idx] == kOptionalTensor) { + if (augmented_inputs[idx] == static_cast(kOptionalTensor)) { const std::vector dim = {0, 0}; ANeuralNetworksOperandType operand_type{nn_type, 2, dim.data(), 0, 0}; CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type)) @@ -494,7 +494,7 @@ TfLiteStatus AddOpsAndParams( int nnapi_version = 10; #include "nnapi_delegate_ex_AddOpsAndParams_lambda.inc" - ANeuralNetworksOperationType nn_op_type; + ANeuralNetworksOperationType nn_op_type = -1; // Using namespace directive to minimize diff with upstream tensorflow namespace tflite = ::tflite; @@ -889,7 +889,7 @@ TfLiteStatus AddOpsAndParams( case tflite::BuiltinOperator_SELECT: case tflite::BuiltinOperator_SLICE: case tflite::BuiltinOperator_SIN: - //case tflite::BuiltinOperator_LOG: + case tflite::BuiltinOperator_LOG: //case tflite::BuiltinOperator_TRANSPOSE_CONV: #ifndef OBS_BUILD case tflite::BuiltinOperator_TILE: @@ -962,9 +962,10 @@ TfLiteStatus AddOpsAndParams( #endif } - //if (nnapi_version == 11 && GetAndroidSdkVersionCached() < 28) { - // FATAL("Op %d needs NNAPI1.1", builtin); - //} + if (nnapi_version == 11 && GetAndroidSdkVersionCached() < 28) { + //logError("Op %d needs NNAPI1.1", builtin); + //return kTfLiteError; + } // Add the operation. RETURN_ERROR_IF_NN_FAILED(ANeuralNetworksModel_addOperation( @@ -991,7 +992,7 @@ TfLiteStatus NNAPIDelegate::BuildGraph(::tflite::Interpreter* interpreter) { std::vector tensor_id_to_nnapi_id(interpreter->tensors_size(), kOperandNotNeeded); auto set_ids_to_not_set = [&tensor_id_to_nnapi_id](const int* buf, - size_t count) { + int count) { for (int j = 0; j < count; j++) { auto tensor_id = buf[j]; if (tensor_id != kOptionalTensor) { diff --git a/libs/tflite/src/ext/nnapi_delegate_ex_AddOpsAndParams_lambda.inc b/libs/tflite/src/ext/nnapi_delegate_ex_AddOpsAndParams_lambda.inc index a91e4de..8c539d6 100644 --- a/libs/tflite/src/ext/nnapi_delegate_ex_AddOpsAndParams_lambda.inc +++ b/libs/tflite/src/ext/nnapi_delegate_ex_AddOpsAndParams_lambda.inc @@ -17,17 +17,6 @@ add_scalar_int32(width); }; - auto check_l2normalization_params = [interpreter, &node](void* data) { - auto builtin = reinterpret_cast(data); - if (builtin->activation != kTfLiteActNone) { - FATAL("NNAPI does not support L2Normalization with fused activations"); - } - if ((node.inputs->size > 0) && - (interpreter->tensor(node.inputs->data[0])->dims->size != 4)) { - FATAL("NNAPI only supports input rank 4 for L2Normalization"); - } - }; - auto add_transpose_conv_params = [&add_scalar_int32](void* data) { auto builtin = reinterpret_cast(data); add_scalar_int32(builtin->padding); -- 2.7.4