#include <queue>
// TODO. Below is test code. DO NOT use ML internal function.
-#define ENABLE_NO_ALLOC
-#if defined(ENABLE_NO_ALLOC)
-extern "C" int ml_single_invoke_no_alloc(ml_single_h single, const ml_tensors_data_h input, ml_tensors_data_h output);
+#define ENABLE_FAST
+#if defined(ENABLE_FAST)
+extern "C" int ml_single_invoke_fast(ml_single_h single, const ml_tensors_data_h input, ml_tensors_data_h output);
#endif
namespace InferenceEngineImpl
if (INFERENCE_BACKEND_NONE >= type || INFERENCE_BACKEND_MAX <= type ||
INFERENCE_BACKEND_OPENCV == type) {
- LOGE("Invalid backend type.");
+ LOGE("Invalid backend type.(%d)", type);
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
}
return err;
}
-#if defined(ENABLE_NO_ALLOC)
- err = ml_single_invoke_no_alloc(mSingle, mInputDataHandle, mOutputDataHandle);
+#if defined(ENABLE_FAST)
+ err = ml_single_invoke_fast(mSingle, mInputDataHandle, mOutputDataHandle);
if (err != ML_ERROR_NONE) {
- LOGE("Failed to request ml_single_invoke_no_alloc(%d).", err);
+ LOGE("Failed to request ml_single_invoke_fast(%d).", err);
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
#else