Previously tflite_benchmark_model always run models with interpreter only.
Now, it uses nnapi delegate on `use_nnapi=true`
```
LD_LIBRARY_PATH=Product/out/lib Product/out/bin/tflite_benchmark_model \
--graph=../models/mobilenet_quant_v1_224.tflite \
--input_layer="Placeholder" --input_layer_shape="1,224,224,3" \
--num_threads=1 --use_nnapi=true \
```
Note that it is still under development.
Instrument code for each operation is not inserted yet.
Signed-off-by: Sanggyu Lee <sg5.lee@samsung.com>
#include "tensorflow/contrib/lite/op_resolver.h"
#include "tensorflow/contrib/lite/string_util.h"
#include "logging.h"
+#include "support/tflite/nnapi_delegate.h"
#ifdef TFLITE_CUSTOM_OPS_HEADER
void RegisterSelectedOps(::tflite::MutableOpResolver* resolver);
}
void BenchmarkTfLiteModel::RunImpl() {
- if (interpreter->Invoke() != kTfLiteOk) {
- TFLITE_LOG(FATAL) << "Failed to invoke!";
+ bool use_nnapi = params_.Get<bool>("use_nnapi");
+ if (use_nnapi) {
+ if (nnfw::NNAPIDelegate().Invoke(interpreter.get()) != kTfLiteOk) {
+ TFLITE_LOG(FATAL) << "Failed to invoke!";
+ }
+ } else {
+ if (interpreter->Invoke() != kTfLiteOk) {
+ TFLITE_LOG(FATAL) << "Failed to invoke!";
+ }
}
}