*/
#include <inference_engine_error.h>
-#include "inference_engine_nnstreamer_private.h"
+#include "inference_engine_mlapi_private.h"
#include <fstream>
#include <iostream>
{
LOGI("ENTER");
- // TODO. Implement this function according to a given nnstreamer backend properly.
+ // TODO. Implement this function according to a given ML Single API backend properly.
LOGI("LEAVE");
{
LOGI("ENTER");
- // Output tensor buffers will be allocated by a backend plugin of nnstreamer
+ // Output tensor buffers will be allocated by a backend plugin of ML Single API of nnstreamer
// So add a null tensor buffer object. This buffer will be updated at Run callback.
// Caution. this tensor buffer will be checked by upper framework to verity if
inference_engine_tensor_info tensor_info;
- // TODO. Set tensor info from a given nnstreamer backend instead of fixed one.
+ // TODO. Set tensor info from a given ML Single API of nnstreamer backend instead of fixed one.
tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16;
tensor_info.shape = { 1, 1001 };
mDesignated_inputs.clear();
std::vector<std::string>().swap(mDesignated_inputs);
- // TODO. Request input property information to a given nnstreamer backend,
+ // TODO. Request input property information to a given ML Single API of nnstreamer backend,
// and set it instead of user-given one,
mDesignated_inputs = property.layer_names;
mDesignated_outputs.clear();
std::vector<std::string>().swap(mDesignated_outputs);
- // TODO. Request output property information to a given nnstreamer backend,
+ // TODO. Request output property information to a given ML Single API of nnstreamer backend,
// and set it instead of user-given one,
mDesignated_outputs = property.layer_names;
return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
- // TODO. flag supported accel device types according to a given nnstreamer backend.
+ // TODO. flag supported accel device types according to a given ML Single API of nnstreamer backend.
capacity->supported_accel_devices = INFERENCE_TARGET_CUSTOM;
LOGI("LEAVE");