target_include_directories(nnfw_lib_tflite PUBLIC ${NNFW_INCLUDE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/include)
target_link_libraries(nnfw_lib_tflite tensorflow-lite ${LIB_PTHREAD} dl)
target_link_libraries(nnfw_lib_tflite nnfw_lib_misc)
+target_compile_options(nnfw_lib_tflite PRIVATE -Wall -Wextra -Werror)
add_executable(nnfw_lib_tflite_test_TensorView src/TensorView.test.cpp)
target_link_libraries(nnfw_lib_tflite_test_TensorView nnfw_lib_tflite)
{
_outfile << "tensor_shape_gen.append('{";
- size_t r = 0;
+ int r = 0;
for (; r < tensor->dims->size - 1; r++)
{
_outfile << tensor->dims->data[r] << ", ";
namespace custom
{
-#define REGISTER_FUNCTION(Name) \
- TfLiteRegistration *Register_##Name(void) \
- { \
- static TfLiteRegistration r = { \
- Name::Init##Name, Name::Free##Name, Name::Prepare##Name, Name::Eval##Name, \
- }; \
- r.custom_name = #Name; \
- return &r; \
+#define REGISTER_FUNCTION(Name) \
+ TfLiteRegistration *Register_##Name(void) \
+ { \
+ static TfLiteRegistration r = {}; \
+ r.init = Name::Init##Name; \
+ r.free = Name::Free##Name; \
+ r.prepare = Name::Prepare##Name; \
+ r.invoke = Name::Eval##Name; \
+ r.custom_name = #Name; \
+ return &r; \
}
REGISTER_FUNCTION(TensorFlowMax)
namespace Abs
{
-void *InitAbs(TfLiteContext *context, const char *buffer, size_t length) { return nullptr; }
+void *InitAbs(TfLiteContext *, const char *, size_t) { return nullptr; }
-void FreeAbs(TfLiteContext *context, void *buffer) {}
+void FreeAbs(TfLiteContext *, void *) {}
TfLiteStatus PrepareAbs(TfLiteContext *context, TfLiteNode *node)
{
namespace SquaredDifference
{
-void *InitSquaredDifference(TfLiteContext *context, const char *buffer, size_t length)
-{
- return nullptr;
-}
+void *InitSquaredDifference(TfLiteContext *, const char *, size_t) { return nullptr; }
-void FreeSquaredDifference(TfLiteContext *context, void *buffer) {}
+void FreeSquaredDifference(TfLiteContext *, void *) {}
TfLiteStatus PrepareSquaredDifference(TfLiteContext *context, TfLiteNode *node)
{
TfLiteTensor *output;
};
-void *InitTensorFlowMax(TfLiteContext *context, const char *buffer, size_t length)
+void *InitTensorFlowMax(TfLiteContext *context, const char *, size_t)
{
// Creates two temp tensors to store index and axis for internal
// implementation only.
return scratch_tensor_index;
}
-void FreeTensorFlowMax(TfLiteContext *context, void *buffer)
+void FreeTensorFlowMax(TfLiteContext *, void *buffer)
{
delete static_cast<TensorFlowMaxOp *>(buffer);
}
// Resizes output array based on the input size and resolved axis.
TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowMaxOp *op_context)
{
- size_t num_axis = ::tflite::NumElements(op_context->axis);
+ int64_t num_axis = ::tflite::NumElements(op_context->axis);
TfLiteIntArray *input_dims = op_context->input->dims;
int input_num_dims = ::tflite::NumDimensions(op_context->input);
const int *axis = op_context->axis->data.i32;
{
// Calculates size of reducing axis.
- int num_reduce_axis = num_axis;
- for (int i = 0; i < num_axis; ++i)
+ int64_t num_reduce_axis = num_axis;
+ for (int64_t i = 0; i < num_axis; ++i)
{
int current = axis[i];
if (current < 0)
current += input_num_dims;
}
TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims);
- for (int j = 0; j < i; ++j)
+ for (int64_t j = 0; j < i; ++j)
{
int previous = axis[j];
if (previous < 0)
if (input_num_dims == output_num_dims)
{
TfLiteIntArray *output_dims = TfLiteIntArrayCopy(input_dims);
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
+ for (int64_t axis_idx = 0; axis_idx < num_axis; ++axis_idx)
{
int current = axis[axis_idx];
output_dims->data[current] = 1;
inline TfLiteStatus
CustomMax(TfLiteContext *context, T *input_data, const int *input_dims, const int input_num_dims,
T *output_data, const int *output_dims, const int output_num_dims, const int *axis,
- const int num_axis_dimensions, bool keep_dims, int *temp_index, int *resolved_axis)
+ const int num_axis_dimensions, bool /*keep_dims*/, int *temp_index, int *resolved_axis)
{
// resolves axis.
int num_resolved_axis = 0;
TfLiteTensor *output;
};
-void *InitTensorFlowSum(TfLiteContext *context, const char *buffer, size_t length)
+void *InitTensorFlowSum(TfLiteContext *context, const char *, size_t)
{
// Creates two temp tensors to store index and axis for internal
// implementation only.
return scratch_tensor_index;
}
-void FreeTensorFlowSum(TfLiteContext *context, void *buffer)
+void FreeTensorFlowSum(TfLiteContext *, void *buffer)
{
delete static_cast<TensorFlowSumOp *>(buffer);
}
// Resizes output array based on the input size and resolved axis.
TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowSumOp *op_context)
{
- size_t num_axis = ::tflite::NumElements(op_context->axis);
+ int64_t num_axis = ::tflite::NumElements(op_context->axis);
TfLiteIntArray *input_dims = op_context->input->dims;
int input_num_dims = ::tflite::NumDimensions(op_context->input);
const int *axis = op_context->axis->data.i32;
{
// Calculates size of reducing axis.
- int num_reduce_axis = num_axis;
- for (int i = 0; i < num_axis; ++i)
+ int64_t num_reduce_axis = num_axis;
+ for (int64_t i = 0; i < num_axis; ++i)
{
int current = axis[i];
if (current < 0)
current += input_num_dims;
}
TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims);
- for (int j = 0; j < i; ++j)
+ for (int64_t j = 0; j < i; ++j)
{
int previous = axis[j];
if (previous < 0)
if (input_num_dims == output_num_dims)
{
TfLiteIntArray *output_dims = TfLiteIntArrayCopy(input_dims);
- for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
+ for (int64_t axis_idx = 0; axis_idx < num_axis; ++axis_idx)
{
int current = axis[axis_idx];
output_dims->data[current] = 1;
inline TfLiteStatus
CustomSum(TfLiteContext *context, T *input_data, const int *input_dims, const int input_num_dims,
T *output_data, const int *output_dims, const int output_num_dims, const int *axis,
- const int num_axis_dimensions, bool keep_dims, int *temp_index, int *resolved_axis)
+ const int num_axis_dimensions, bool /*keep_dims*/, int *temp_index, int *resolved_axis)
{
// resolves axis.
int num_resolved_axis = 0;
auto add_scalar_int32 = [&nn_model, &augmented_inputs,
&next_id](int value) {
- ANeuralNetworksOperandType operand_type{.type = ANEURALNETWORKS_INT32};
+ ANeuralNetworksOperandType operand_type{}; operand_type.type = ANEURALNETWORKS_INT32;
CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id, &value,
sizeof(int32_t)))
auto add_scalar_float32 = [&nn_model, &augmented_inputs,
&next_id](float value) {
- ANeuralNetworksOperandType operand_type{.type = ANEURALNETWORKS_FLOAT32};
+ ANeuralNetworksOperandType operand_type{}; operand_type.type = ANEURALNETWORKS_FLOAT32;
CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
CHECK_NN(ANeuralNetworksModel_setOperandValue(nn_model, next_id, &value,
sizeof(float)))
};
auto add_vector_int32 = [&](const int* values, uint32_t num_values) {
- ANeuralNetworksOperandType operand_type{
- .type = ANEURALNETWORKS_TENSOR_INT32,
- .dimensionCount = 1,
- .dimensions = &num_values};
+ ANeuralNetworksOperandType operand_type{};
+ operand_type.type = ANEURALNETWORKS_TENSOR_INT32;
+ operand_type.dimensionCount = 1;
+ operand_type.dimensions = &num_values;
CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
CHECK_NN(ANeuralNetworksModel_setOperandValue(
nn_model, next_id, values, sizeof(int32_t) * num_values));
auto add_optional_tensors = [&nn_model, &augmented_inputs,
&next_id](int nn_type) {
for (size_t idx = 0; idx < augmented_inputs.size(); idx++) {
- if (augmented_inputs[idx] == kOptionalTensor) {
+ if (augmented_inputs[idx] == static_cast<uint32_t>(kOptionalTensor)) {
const std::vector<uint32_t> dim = {0, 0};
ANeuralNetworksOperandType operand_type{nn_type, 2, dim.data(), 0, 0};
CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
int nnapi_version = 10;
#include "nnapi_delegate_ex_AddOpsAndParams_lambda.inc"
- ANeuralNetworksOperationType nn_op_type;
+ ANeuralNetworksOperationType nn_op_type = -1;
// Using namespace directive to minimize diff with upstream tensorflow
namespace tflite = ::tflite;
case tflite::BuiltinOperator_SELECT:
case tflite::BuiltinOperator_SLICE:
case tflite::BuiltinOperator_SIN:
- //case tflite::BuiltinOperator_LOG:
+ case tflite::BuiltinOperator_LOG:
//case tflite::BuiltinOperator_TRANSPOSE_CONV:
#ifndef OBS_BUILD
case tflite::BuiltinOperator_TILE:
#endif
}
- //if (nnapi_version == 11 && GetAndroidSdkVersionCached() < 28) {
- // FATAL("Op %d needs NNAPI1.1", builtin);
- //}
+ if (nnapi_version == 11 && GetAndroidSdkVersionCached() < 28) {
+ //logError("Op %d needs NNAPI1.1", builtin);
+ //return kTfLiteError;
+ }
// Add the operation.
RETURN_ERROR_IF_NN_FAILED(ANeuralNetworksModel_addOperation(
std::vector<int64_t> tensor_id_to_nnapi_id(interpreter->tensors_size(),
kOperandNotNeeded);
auto set_ids_to_not_set = [&tensor_id_to_nnapi_id](const int* buf,
- size_t count) {
+ int count) {
for (int j = 0; j < count; j++) {
auto tensor_id = buf[j];
if (tensor_id != kOptionalTensor) {
add_scalar_int32(width);
};
- auto check_l2normalization_params = [interpreter, &node](void* data) {
- auto builtin = reinterpret_cast<TfLiteL2NormParams*>(data);
- if (builtin->activation != kTfLiteActNone) {
- FATAL("NNAPI does not support L2Normalization with fused activations");
- }
- if ((node.inputs->size > 0) &&
- (interpreter->tensor(node.inputs->data[0])->dims->size != 4)) {
- FATAL("NNAPI only supports input rank 4 for L2Normalization");
- }
- };
-
auto add_transpose_conv_params = [&add_scalar_int32](void* data) {
auto builtin = reinterpret_cast<TfLiteTransposeConvParams*>(data);
add_scalar_int32(builtin->padding);