#include <unordered_set>
#include <vector>
-#ifdef TFLITE_FLEX
-#include "tensorflow/lite/delegates/flex/delegate.h"
-#endif // TFLITE_FLEX
#include "tflite/ext/kernels/register.h"
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/op_resolver.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/tools/benchmark/logging.h"
+#ifdef GEMMLOWP_PROFILING
+#include "gemmlowp/profiling/profiler.h"
+#endif
+
// For profiling nnapi_delegate
#include "profiling/profiling.h"
#include "tflite/ext/nnapi_delegate.h"
summarizer_.ProcessProfiles(profile_events, *interpreter_);
}
+void GemmlowpProfilingListener::OnBenchmarkStart(
+ const BenchmarkParams& params) {
+#ifdef GEMMLOWP_PROFILING
+ gemmlowp::RegisterCurrentThreadForProfiling();
+ gemmlowp::StartProfiling();
+#endif
+}
+
+void GemmlowpProfilingListener::OnBenchmarkEnd(
+ const BenchmarkResults& results) {
+#ifdef GEMMLOWP_PROFILING
+ gemmlowp::FinishProfiling();
+#endif
+}
+
namespace {
std::vector<std::string> Split(const std::string& str, const char delim) {
return true;
}
-BenchmarkParams GetDefaultParams() {
+std::vector<int> TfLiteIntArrayToVector(const TfLiteIntArray* int_array) {
+ std::vector<int> values;
+ values.reserve(int_array->size);
+ for (size_t i = 0; i < int_array->size; i++) {
+ values.push_back(int_array->data[i]);
+ }
+ return values;
+}
+
+} // namespace
+
+BenchmarkParams BenchmarkTfLiteModel::DefaultParams() {
BenchmarkParams default_params = BenchmarkModel::DefaultParams();
default_params.AddParam("graph", BenchmarkParam::Create<std::string>(""));
default_params.AddParam("input_layer",
return default_params;
}
-} // namespace
-
BenchmarkTfLiteModel::BenchmarkTfLiteModel()
- : BenchmarkModel(GetDefaultParams()) {
- AddListener(&profiling_listener_);
-}
+ : BenchmarkTfLiteModel(DefaultParams()) {}
BenchmarkTfLiteModel::BenchmarkTfLiteModel(BenchmarkParams params)
: BenchmarkModel(std::move(params)) {
AddListener(&profiling_listener_);
+ AddListener(&gemmlowp_profiling_listener_);
}
std::vector<Flag> BenchmarkTfLiteModel::GetFlags() {
void BenchmarkTfLiteModel::PrepareInputsAndOutputs() {
auto interpreter_inputs = interpreter->inputs();
// Set the values of the input tensors.
- for (int j = 0; j < inputs.size(); ++j) {
- const InputLayerInfo& input = inputs[j];
+ for (int j = 0; j < interpreter_inputs.size(); ++j) {
int i = interpreter_inputs[j];
TfLiteTensor* t = interpreter->tensor(i);
- std::vector<int> sizes = input.shape;
-
+ std::vector<int> sizes = TfLiteIntArrayToVector(t->dims);
// TODO(ahentz): below we ignore the O-th dimension (number of batches).
if (t->type == kTfLiteFloat32) {
FillRandomValue<float>(
interpreter->typed_tensor<uint8_t>(i),
std::vector<int>(sizes.begin() + 1, sizes.end()),
[]() { return static_cast<uint8_t>(rand()) % 255; });
+ } else if (t->type == kTfLiteInt8) {
+ FillRandomValue<int8_t>(
+ interpreter->typed_tensor<int8_t>(i),
+ std::vector<int>(sizes.begin() + 1, sizes.end()),
+ []() { return static_cast<int8_t>(rand()) % 255 - 127; });
} else if (t->type == kTfLiteString) {
tflite::DynamicBuffer buffer;
FillRandomString(&buffer, sizes, []() {
return "we're have some friends over saturday to hang out in the yard";
});
- buffer.WriteToTensor(interpreter->tensor(i));
+ buffer.WriteToTensor(interpreter->tensor(i), /*new_shape=*/nullptr);
} else {
TFLITE_LOG(FATAL) << "Don't know how to populate tensor " << t->name
<< " of type " << t->type;
bool use_nnapi = params_.Get<bool>("use_nnapi");
interpreter->UseNNAPI(use_nnapi);
-
if (use_nnapi) {
if (nnfw_delegate_.BuildGraph(interpreter.get()) != kTfLiteOk) {
TFLITE_LOG(FATAL) << "Failed to BuildGraph!";
}
}
-
-#ifdef TFLITE_FLEX
- TFLITE_LOG(INFO) << "Instantiating Flex Delegate";
- delegate_ = FlexDelegate::Create();
- if (delegate_) {
- interpreter->ModifyGraphWithDelegate(delegate_.get(),
- /*allow_dynamic_tensors=*/true);
- }
-#endif // TFLITE_FLEX
+ ApplyDelegates();
auto interpreter_inputs = interpreter->inputs();