#include <unistd.h>
#include <time.h>
#include <queue>
+#include <thread>
// H/W
#define MV_INFERENCE_TFLITE_MAX_THREAD_NUM -1
LOGE("Failed to construct GPU delegate");
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
+ } else {
+ LOGI("Use XNNPACK.");
+
+ TfLiteDelegate *delegate = NULL;
+ int num_threads = std::thread::hardware_concurrency();
+ char *env_tflite_num_threads = getenv("FORCE_TFLITE_NUM_THREADS");
+ if (env_tflite_num_threads)
+ {
+ num_threads = atoi(env_tflite_num_threads);
+ LOGI("@@@@@@ FORCE_TFLITE_NUM_THREADS(XNNPACK)=%d", num_threads);
+ }
+
+ // IMPORTANT: initialize options with TfLiteXNNPackDelegateOptionsDefault() for
+ // API-compatibility with future extensions of the TfLiteXNNPackDelegateOptions
+ // structure.
+ TfLiteXNNPackDelegateOptions xnnpack_options = TfLiteXNNPackDelegateOptionsDefault();
+ xnnpack_options.num_threads = num_threads;
+
+ delegate = TfLiteXNNPackDelegateCreate (&xnnpack_options);
+ if (!delegate) {
+ LOGE("ERR: %s(%d)", __FILE__, __LINE__);
+ }
+
+ if (mInterpreter->ModifyGraphWithDelegate(delegate) != kTfLiteOk) {
+ LOGE("Failed to construct GPU delegate");
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
}
mInterpreter->SetNumThreads(MV_INFERENCE_TFLITE_MAX_THREAD_NUM);
#include <inference_engine_common.h>
#include "tensorflow2/lite/delegates/gpu/delegate.h"
+#include "tensorflow2/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow2/lite/kernels/register.h"
#include "tensorflow2/lite/model.h"
#include "tensorflow2/lite/optional_debug_tools.h"