private:
::tflite::Interpreter *const _interp;
- nnfw::NNAPIDelegate _delegate;
+ nnfw::tflite::NNAPIDelegate _delegate;
};
} // namespace tflite
#include "tensorflow/contrib/lite/context.h"
-namespace tflite
+namespace nnfw
{
-namespace ops
+namespace tflite
{
namespace custom
{
-namespace nnfw
-{
namespace Abs
{
TfLiteStatus EvalAbs(TfLiteContext *context, TfLiteNode *node);
} // namespace Abs
-} // namespace nnfw
} // namespace custom
-} // namespace ops
} // namespace tflite
+} // namespace nnfw
#endif // __NNFW_TFLITE_EXT_KERNELS_ABS_H__
#include "tflite/ext/kernels/TensorFlowSum.h"
#include "tflite/ext/kernels/Abs.h"
-namespace tflite
+namespace nnfw
{
-namespace ops
+namespace tflite
{
namespace custom
{
-namespace nnfw
-{
#define REGISTER_FUNCTION(Name) \
TfLiteRegistration *Register_##Name(void) \
#undef REGISTER_FUNCTION
-} // namespace nnfw
} // namespace custom
-} // namespace ops
} // namespace tflite
+} // namespace nnfw
#endif // __NNFW_TFLITE_EXT_KERNELS_CUSTOM_OP_H__
#include "tensorflow/contrib/lite/context.h"
-namespace tflite
+namespace nnfw
{
-namespace ops
+namespace tflite
{
namespace custom
{
-namespace nnfw
-{
namespace SquaredDifference
{
TfLiteStatus EvalSquaredDifference(TfLiteContext *context, TfLiteNode *node);
} // namespace SquaredDifference
-} // namespace nnfw
} // namespace custom
-} // namespace ops
} // namespace tflite
+} // namespace nnfw
#endif // __NNFW_TFLITE_EXT_KERNELS_SQUARED_DIFFERENCE_H__
#include "tensorflow/contrib/lite/context.h"
-namespace tflite
+namespace nnfw
{
-namespace ops
+namespace tflite
{
namespace custom
{
-namespace nnfw
-{
namespace TensorFlowMax
{
TfLiteStatus EvalTensorFlowMax(TfLiteContext *context, TfLiteNode *node);
} // namespace TensorFlowMax
-} // namespace nnfw
} // namespace custom
-} // namespace ops
} // namespace tflite
+} // namespace nnfw
#endif // __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_MAX_H__
#include "tensorflow/contrib/lite/context.h"
-namespace tflite
+namespace nnfw
{
-namespace ops
+namespace tflite
{
namespace custom
{
-namespace nnfw
-{
namespace TensorFlowSum
{
TfLiteStatus EvalTensorFlowSum(TfLiteContext *context, TfLiteNode *node);
} // namespace TensorFlowSum
-} // namespace nnfw
} // namespace custom
-} // namespace ops
} // namespace tflite
+} // namespace nnfw
#endif // __NNFW_TFLITE_EXT_KERNELS_TENSORFLOW_SUM_H__
#include "tensorflow/contrib/lite/context.h"
#include "tensorflow/contrib/lite/model.h"
-// TODO Use namespace nnfw
+namespace nnfw {
namespace tflite {
-namespace ops {
-namespace builtin {
-class BuiltinOpResolver : public MutableOpResolver {
+class BuiltinOpResolver : public ::tflite::MutableOpResolver {
public:
BuiltinOpResolver();
};
-} // namespace builtin
-} // namespace ops
} // namespace tflite
+} // namespace nnfw
#endif // __NNFW_TFLITE_EXT_KERNELS_REGISTER_H__
class ANeuralNetworksCompilation;
namespace nnfw {
+namespace tflite {
class NNAPIAllocation : public ::tflite::MMAPAllocation {
public:
std::vector<int> model_states_outputs_; // holds TFLite tensor ids
};
+} // namespace tflite
} // namespace nnfw
#endif // __NNFW_TFLITE_EXT_NNAPI_DELEGATE_H__
}
else
{
- nnfw::NNAPIDelegate d;
+ nnfw::tflite::NNAPIDelegate d;
if (d.BuildGraph(nnapi.get()))
{
#include <iostream>
#include <cmath>
-namespace tflite
+namespace nnfw
{
-namespace ops
+namespace tflite
{
namespace custom
{
-namespace nnfw
-{
namespace Abs
{
TfLiteStatus PrepareAbs(TfLiteContext *context, TfLiteNode *node)
{
- TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
- TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+ TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 1);
+ TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1);
- const TfLiteTensor *input = GetInput(context, node, 0);
- TfLiteTensor *output = GetOutput(context, node, 0);
+ const TfLiteTensor *input = ::tflite::GetInput(context, node, 0);
+ TfLiteTensor *output = ::tflite::GetOutput(context, node, 0);
TF_LITE_ENSURE_EQ(context, input->type, output->type);
TfLiteStatus EvalAbs(TfLiteContext *context, TfLiteNode *node)
{
- const TfLiteTensor *input = GetInput(context, node, 0);
- TfLiteTensor *output = GetOutput(context, node, 0);
- size_t elements = NumElements(input);
+ const TfLiteTensor *input = ::tflite::GetInput(context, node, 0);
+ TfLiteTensor *output = ::tflite::GetOutput(context, node, 0);
+ size_t elements = ::tflite::NumElements(input);
switch (input->type)
{
case kTfLiteFloat32:
}
} // namespace Abs
-} // nnfw
} // namespace custom
-} // namespace ops
} // namespace tflite
+} // namespace nnfw
#include <iostream>
-namespace tflite
+namespace nnfw
{
-namespace ops
+namespace tflite
{
namespace custom
{
-namespace nnfw
-{
namespace SquaredDifference
{
TfLiteStatus PrepareSquaredDifference(TfLiteContext *context, TfLiteNode *node)
{
- TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
- TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+ TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 2);
+ TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1);
- const TfLiteTensor *input1 = GetInput(context, node, 0);
- const TfLiteTensor *input2 = GetInput(context, node, 1);
- TfLiteTensor *output = GetOutput(context, node, 0);
+ const TfLiteTensor *input1 = ::tflite::GetInput(context, node, 0);
+ const TfLiteTensor *input2 = ::tflite::GetInput(context, node, 1);
+ TfLiteTensor *output = ::tflite::GetOutput(context, node, 0);
TF_LITE_ENSURE_EQ(context, input1->type, input2->type);
TF_LITE_ENSURE_EQ(context, input1->type, output->type);
TfLiteStatus EvalSquaredDifference(TfLiteContext *context, TfLiteNode *node)
{
- const TfLiteTensor *input1 = GetInput(context, node, 0);
- const TfLiteTensor *input2 = GetInput(context, node, 1);
+ const TfLiteTensor *input1 = ::tflite::GetInput(context, node, 0);
+ const TfLiteTensor *input2 = ::tflite::GetInput(context, node, 1);
- TfLiteTensor *output = GetOutput(context, node, 0);
+ TfLiteTensor *output = ::tflite::GetOutput(context, node, 0);
- size_t elements = NumElements(input1);
+ size_t elements = ::tflite::NumElements(input1);
switch (input1->type)
{
}
} // namespace SquaredDifference
-} // nnfw
} // namespace custom
-} // namespace ops
} // namespace tflite
+} // namespace nnfw
#include <iostream>
-namespace tflite
+namespace nnfw
{
-namespace ops
+namespace tflite
{
namespace custom
{
-namespace nnfw
-{
namespace TensorFlowMax
{
{
TensorFlowMaxOp(TfLiteContext *context, TfLiteNode *node)
{
- input = tflite::GetInput(context, node, 0);
- axis = tflite::GetInput(context, node, 1);
- output = tflite::GetOutput(context, node, 0);
+ input = ::tflite::GetInput(context, node, 0);
+ axis = ::tflite::GetInput(context, node, 1);
+ output = ::tflite::GetOutput(context, node, 0);
}
const TfLiteTensor *input;
const TfLiteTensor *axis;
TfLiteTensor *resolved_axis)
{
TfLiteIntArray *axis_size = TfLiteIntArrayCreate(1);
- axis_size->data[0] = static_cast<int>(tflite::NumElements(op_context->axis));
+ axis_size->data[0] = static_cast<int>(::tflite::NumElements(op_context->axis));
return context->ResizeTensor(context, resolved_axis, axis_size);
}
// Resizes output array based on the input size and resolved axis.
TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowMaxOp *op_context)
{
- size_t num_axis = tflite::NumElements(op_context->axis);
+ size_t num_axis = ::tflite::NumElements(op_context->axis);
TfLiteIntArray *input_dims = op_context->input->dims;
- int input_num_dims = tflite::NumDimensions(op_context->input);
+ int input_num_dims = ::tflite::NumDimensions(op_context->input);
const int *axis = op_context->axis->data.i32;
{
}
}
// Determines output dimensions.
- int output_num_dims = tflite::NumDimensions(op_context->output);
+ int output_num_dims = ::tflite::NumDimensions(op_context->output);
TF_LITE_ENSURE(context, (input_num_dims == output_num_dims) ||
(input_num_dims - num_reduce_axis == output_num_dims));
scratch_tensor->type = kTfLiteInt32;
scratch_tensor->allocation_type = kTfLiteArenaRw;
TfLiteIntArray *index_size = TfLiteIntArrayCreate(1);
- index_size->data[0] = tflite::NumDimensions(op_context->input);
+ index_size->data[0] = ::tflite::NumDimensions(op_context->input);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, index_size));
// Creates a temp tensor to store resolved axis given input data.
TfLiteStatus PrepareTensorFlowMax(TfLiteContext *context, TfLiteNode *node)
{
- TF_LITE_ENSURE_EQ(context, tflite::NumInputs(node), 2);
- TF_LITE_ENSURE_EQ(context, tflite::NumOutputs(node), 1);
+ TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 2);
+ TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1);
TensorFlowMaxOp op_context(context, node);
TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context));
TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
// Leaves work to Eval if axis is not constant; else resizes output.
- if (!tflite::IsConstantTensor(op_context.axis))
+ if (!::tflite::IsConstantTensor(op_context.axis))
{
- tflite::SetTensorToDynamic(op_context.output);
- tflite::SetTensorToDynamic(resolved_axis);
+ ::tflite::SetTensorToDynamic(op_context.output);
+ ::tflite::SetTensorToDynamic(resolved_axis);
return kTfLiteOk;
}
resolved_axis->allocation_type = kTfLiteArenaRw;
{
TensorFlowMaxOp op_context(context, node);
- int num_axis = static_cast<int>(tflite::NumElements(op_context.axis));
+ int num_axis = static_cast<int>(::tflite::NumElements(op_context.axis));
TfLiteTensor *temp_index = &context->tensors[node->temporaries->data[0]];
TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
// Resize the output tensor if the output tensor is dynamic.
- if (tflite::IsDynamicTensor(op_context.output))
+ if (::tflite::IsDynamicTensor(op_context.output))
{
TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis));
TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
return returnStatus;
}
+
} // namespace TensorFlowMax
-} // namespace nnfw
} // namespace custom
-} // namespace ops
} // namespace tflite
+} // namespace nnfw
#include <iostream>
-namespace tflite
+namespace nnfw
{
-namespace ops
+namespace tflite
{
namespace custom
{
-namespace nnfw
-{
namespace TensorFlowSum
{
{
TensorFlowSumOp(TfLiteContext *context, TfLiteNode *node)
{
- input = tflite::GetInput(context, node, 0);
- axis = tflite::GetInput(context, node, 1);
- output = tflite::GetOutput(context, node, 0);
+ input = ::tflite::GetInput(context, node, 0);
+ axis = ::tflite::GetInput(context, node, 1);
+ output = ::tflite::GetOutput(context, node, 0);
}
const TfLiteTensor *input;
const TfLiteTensor *axis;
TfLiteTensor *resolved_axis)
{
TfLiteIntArray *axis_size = TfLiteIntArrayCreate(1);
- axis_size->data[0] = static_cast<int>(tflite::NumElements(op_context->axis));
+ axis_size->data[0] = static_cast<int>(::tflite::NumElements(op_context->axis));
return context->ResizeTensor(context, resolved_axis, axis_size);
}
// Resizes output array based on the input size and resolved axis.
TfLiteStatus ResizeOutputTensor(TfLiteContext *context, TensorFlowSumOp *op_context)
{
- size_t num_axis = tflite::NumElements(op_context->axis);
+ size_t num_axis = ::tflite::NumElements(op_context->axis);
TfLiteIntArray *input_dims = op_context->input->dims;
- int input_num_dims = tflite::NumDimensions(op_context->input);
+ int input_num_dims = ::tflite::NumDimensions(op_context->input);
const int *axis = op_context->axis->data.i32;
{
}
}
// Determines output dimensions.
- int output_num_dims = tflite::NumDimensions(op_context->output);
+ int output_num_dims = ::tflite::NumDimensions(op_context->output);
TF_LITE_ENSURE(context, (input_num_dims == output_num_dims) ||
(input_num_dims - num_reduce_axis == output_num_dims));
scratch_tensor->type = kTfLiteInt32;
scratch_tensor->allocation_type = kTfLiteArenaRw;
TfLiteIntArray *index_size = TfLiteIntArrayCreate(1);
- index_size->data[0] = tflite::NumDimensions(op_context->input);
+ index_size->data[0] = ::tflite::NumDimensions(op_context->input);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, index_size));
// Creates a temp tensor to store resolved axis given input data.
TfLiteStatus PrepareTensorFlowSum(TfLiteContext *context, TfLiteNode *node)
{
- TF_LITE_ENSURE_EQ(context, tflite::NumInputs(node), 2);
- TF_LITE_ENSURE_EQ(context, tflite::NumOutputs(node), 1);
+ TF_LITE_ENSURE_EQ(context, ::tflite::NumInputs(node), 2);
+ TF_LITE_ENSURE_EQ(context, ::tflite::NumOutputs(node), 1);
TensorFlowSumOp op_context(context, node);
TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context));
TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
// Leaves work to Eval if axis is not constant; else resizes output.
- if (!tflite::IsConstantTensor(op_context.axis))
+ if (!::tflite::IsConstantTensor(op_context.axis))
{
- tflite::SetTensorToDynamic(op_context.output);
- tflite::SetTensorToDynamic(resolved_axis);
+ ::tflite::SetTensorToDynamic(op_context.output);
+ ::tflite::SetTensorToDynamic(resolved_axis);
return kTfLiteOk;
}
resolved_axis->allocation_type = kTfLiteArenaRw;
{
TensorFlowSumOp op_context(context, node);
- int num_axis = static_cast<int>(tflite::NumElements(op_context.axis));
+ int num_axis = static_cast<int>(::tflite::NumElements(op_context.axis));
TfLiteTensor *temp_index = &context->tensors[node->temporaries->data[0]];
TfLiteTensor *resolved_axis = &context->tensors[node->temporaries->data[1]];
// Resize the output tensor if the output tensor is dynamic.
- if (tflite::IsDynamicTensor(op_context.output))
+ if (::tflite::IsDynamicTensor(op_context.output))
{
TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis));
TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
return returnStatus;
}
+
} // namespace TensorFlowSum
-} // namespace nnfw
} // namespace custom
-} // namespace ops
} // namespace tflite
+} // namespace nnfw
limitations under the License.
==============================================================================*/
+// NOTE To minimize diff with upstream tensorflow, disable clang-format
+// clang-format off
+
// NOTE This code is derived from the following file (in TensorFlow)
// 'externals/tensorflow/tensorflow/contrib/lite/kernels/register.cc'
#include "tflite/ext/kernels/register.h"
#include "tflite/ext/kernels/CustomOps.h"
-// TODO Use namespace nnfw
-namespace tflite
-{
-namespace ops
-{
-namespace builtin
-{
+namespace tflite {
+namespace ops {
+namespace builtin {
TfLiteRegistration *Register_RELU();
TfLiteRegistration *Register_RELU_N1_TO_1();
TfLiteRegistration *Register_ZEROS_LIKE();
#endif // OBS_BUILD
+} // namespace builtin
+} // namespace ops
+} // namespace tflite
+
+namespace nnfw {
+namespace tflite {
+
BuiltinOpResolver::BuiltinOpResolver()
{
+ // Using namespace directive to minimize diff with upstream tensorflow
+ using namespace ::tflite::ops::builtin;
+ using namespace ::tflite;
+
AddBuiltin(BuiltinOperator_RELU, Register_RELU());
AddBuiltin(BuiltinOperator_RELU_N1_TO_1, Register_RELU_N1_TO_1());
AddBuiltin(BuiltinOperator_RELU6, Register_RELU6());
AddBuiltin(BuiltinOperator_ZEROS_LIKE, Register_ZEROS_LIKE());
#endif // OBS_BUILD
- AddCustom("TensorFlowMax", tflite::ops::custom::nnfw::Register_TensorFlowMax());
- AddCustom("SquaredDifference", tflite::ops::custom::nnfw::Register_SquaredDifference());
- AddCustom("TensorFlowSum", tflite::ops::custom::nnfw::Register_TensorFlowSum());
- AddCustom("Abs", tflite::ops::custom::nnfw::Register_Abs());
+ AddCustom("TensorFlowMax", nnfw::tflite::custom::Register_TensorFlowMax());
+ AddCustom("SquaredDifference", nnfw::tflite::custom::Register_SquaredDifference());
+ AddCustom("TensorFlowSum", nnfw::tflite::custom::Register_TensorFlowSum());
+ AddCustom("Abs", nnfw::tflite::custom::Register_Abs());
}
-} // namespace builtin
-} // namespace ops
-} // namespace tflite
+} // namespace tflite
+} // namespace nnfw
#endif
namespace nnfw {
+namespace tflite {
void logError(const char* format, ...) {
// stderr is convenient for native tests, but is not captured for apps
}
// Adds the tensors of the interpreter to the NN API model.
-TfLiteStatus addTensorOperands(tflite::Interpreter* interpreter,
+TfLiteStatus addTensorOperands(::tflite::Interpreter* interpreter,
ANeuralNetworksModel* nn_model,
uint32_t* no_of_operands_added,
std::vector<int64_t>* nnapi_ids) {
// only memory
if (tensor->allocation_type == kTfLiteMmapRo) {
if (const NNAPIAllocation* alloc = dynamic_cast<const NNAPIAllocation*>(
- static_cast<const tflite::Allocation*>(tensor->allocation))) {
+ static_cast<const ::tflite::Allocation*>(tensor->allocation))) {
RETURN_ERROR_IF_NN_FAILED(
ANeuralNetworksModel_setOperandValueFromMemory(
nn_model, next_id, alloc->memory(),
// Adds the operations and their parameters to the NN API model.
// 'next-id' is the operand ID of the next operand of the model.
TfLiteStatus AddOpsAndParams(
- tflite::Interpreter* interpreter, ANeuralNetworksModel* nn_model,
+ ::tflite::Interpreter* interpreter, ANeuralNetworksModel* nn_model,
uint32_t next_id, std::vector<int>* model_state_inputs,
std::vector<int>* model_state_outputs,
const std::vector<int64_t>& tensor_id_to_nnapi_id) {
const auto* node_and_registration = interpreter->node_and_registration(i);
const TfLiteNode& node = node_and_registration->first;
const TfLiteRegistration& registration = node_and_registration->second;
- tflite::BuiltinOperator builtin =
- static_cast<tflite::BuiltinOperator>(registration.builtin_code);
+ ::tflite::BuiltinOperator builtin =
+ static_cast<::tflite::BuiltinOperator>(registration.builtin_code);
// Add the parameters.
std::vector<uint32_t> augmented_inputs, augmented_outputs;
ANeuralNetworksOperationType nn_op_type;
+ // Using namespace directive to minimize diff with upstream tensorflow
+ namespace tflite = ::tflite;
+
switch (builtin) {
case tflite::BuiltinOperator_ADD:
nn_op_type = ANEURALNETWORKS_ADD;
bool NNAPIDelegate::IsSupported() { return nnfw::NNAPIExists(); }
+} // namespace tflite
} // namespace nnfw
// clang-format on
{
std::unique_ptr<::tflite::Interpreter> interpreter;
- ::tflite::ops::builtin::BuiltinOpResolver resolver;
+ nnfw::tflite::BuiltinOpResolver resolver;
::tflite::InterpreterBuilder builder(_model, resolver);
#include <stdexcept>
using namespace tflite;
-using namespace tflite::ops::builtin;
+using namespace nnfw::tflite;
int main(const int argc, char **argv)
{
#include "util/benchmark.h"
using namespace tflite;
-using namespace tflite::ops::builtin;
+using namespace nnfw::tflite;
void help(std::ostream &out, const int argc, char **argv)
{
#include "tflite/ext/nnapi_delegate.h"
namespace {
- nnfw::NNAPIDelegate nnfw_delegate_;
+ nnfw::tflite::NNAPIDelegate nnfw_delegate_;
}
#ifdef TFLITE_CUSTOM_OPS_HEADER
tflite::MutableOpResolver resolver;
RegisterSelectedOps(&resolver);
#else
- tflite::ops::builtin::BuiltinOpResolver resolver;
+ nnfw::tflite::BuiltinOpResolver resolver;
#endif
tflite::InterpreterBuilder(*model, resolver)(&interpreter);
#include <iostream>
using namespace tflite;
-using namespace tflite::ops::builtin;
+using namespace nnfw::tflite;
namespace vector
{
#include <algorithm>
using namespace tflite;
-using namespace tflite::ops::builtin;
+using namespace nnfw::tflite;
using namespace std::placeholders; // for _1, _2 ...
void print_max_idx(float *f, int size)