return INFERENCE_ENGINE_ERROR_NONE;
}
+ int InferenceMLAPI::CreateMLAPITensorInfo(ml_tensors_info_h& tensor_info,
+ inference_engine_layer_property& layer_property)
+ {
+ if (layer_property.layers.empty()) {
+ LOGE("input or output property is empty.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ int err = ml_tensors_info_create(&tensor_info);
+ if (err != ML_ERROR_NONE) {
+ LOGE("Failed to create tensor info(%d).", err);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ err = ml_tensors_info_set_count(tensor_info, layer_property.layers.size());
+ if (err != ML_ERROR_NONE) {
+ LOGE("Failed to set tensor count(%d).", err);
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ size_t layer_idx = 0;
+
+ for (auto& iter : layer_property.layers) {
+ inference_engine_tensor_info& info = iter.second;
+
+ int tensor_type = 0;
+
+ try {
+ tensor_type = ConvertTensorTypeToMLAPI(info.data_type);
+ } catch (const std::invalid_argument& ex) {
+ LOGE("Error (%s) (%d)", ex.what(), info.data_type);
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ err = ml_tensors_info_set_tensor_type(tensor_info, layer_idx, static_cast<ml_tensor_type_e>(tensor_type));
+ if (err != ML_ERROR_NONE) {
+ LOGE("Failed to set tensor count(%d).", err);
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ // TODO. nnstreamer needs fixed dimention with 4 for nntrainer tensor filter. Why??
+ std::vector<unsigned int> indim(4, 1);
+
+ LOGI("Input tensor(%zu) shape:", layer_idx);
+
+ std::copy(info.shape.begin(), info.shape.end(), indim.begin());
+
+ for (auto& shape_value : indim)
+ LOGI("%u", shape_value);
+
+ err = ml_tensors_info_set_tensor_dimension(tensor_info, layer_idx, indim.data());
+ if (err != ML_ERROR_NONE) {
+ LOGE("Failed to set tensor dimension(%d).", err);
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ layer_idx++;
+ }
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+ }
+
int InferenceMLAPI::Load(std::vector<std::string> model_paths,
inference_model_format_e model_format)
{
LOGI("Model name = %s", model_str.c_str());
- // TODO. create ml_tensor_info for input and output tensor and pass
- // them as parameters of ml_single_open function.
+ ml_tensors_info_h in_info = NULL, out_info = NULL;
+
+ // In case of nntrainer tensor filter, input and output tensor
+ // informaion is needed to load a given model.
+ if (mPluginType == INFERENCE_BACKEND_NNTRAINER) {
+ int ret = CreateMLAPITensorInfo(in_info, mInputProperty);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ return ret;
+
+ ret = CreateMLAPITensorInfo(out_info, mOutputProperty);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ return ret;
+ }
- int err = ml_single_open(&mSingle, model_str.c_str(), NULL, NULL,
+ int err = ml_single_open(&mSingle, model_str.c_str(), in_info, out_info,
nnfw_type, nnfw_hw);
if (err != ML_ERROR_NONE) {
LOGE("Failed to request ml_single_open(%d).", err);
LOGI("input tensor type = %d", in_type);
- int type = ConvertTensorType(in_type);
- if (type == -1) {
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ int type = 0;
+
+ try {
+ type = ConvertTensorTypeToInternal(in_type);
+ } catch (const std::invalid_argument& ex) {
+ LOGE("Error (%s) (%d)", ex.what(), in_type);
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
in_buffer.data_type = static_cast<inference_tensor_data_type_e>(type);
LOGI("output tensor type = %d", out_type);
- int type = ConvertTensorType(out_type);
- if (type == -1) {
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ int type = 0;
+
+ try {
+ type = ConvertTensorTypeToInternal(out_type);
+ } catch (const std::invalid_argument& ex) {
+ LOGE("Error (%s) (%d)", ex.what(), out_type);
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
out_buffer.data_type = static_cast<inference_tensor_data_type_e>(type);
LOGI("input tensor type = %d", in_type);
- int type = ConvertTensorType(in_type);
- if (type == -1) {
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ int type = 0;
+
+ try {
+ type = ConvertTensorTypeToInternal(in_type);
+ } catch (const std::invalid_argument& ex) {
+ LOGE("Error (%s) (%d)", ex.what(), in_type);
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
ret = ml_tensors_info_get_tensor_dimension(mInputInfoHandle, input.second, in_dim);
LOGI("output tensor type = %d", out_type);
- int type = ConvertTensorType(out_type);
- if (type == -1) {
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ int type = 0;
+
+ try {
+ type = ConvertTensorTypeToInternal(out_type);
+ } catch (const std::invalid_argument& ex) {
+ LOGE("Error (%s) (%d)", ex.what(), out_type);
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
}
ret = ml_tensors_info_get_tensor_dimension(mOutputInfoHandle, output.second, out_dim);
return INFERENCE_ENGINE_ERROR_NONE;
}
- int InferenceMLAPI::ConvertTensorType(int tensor_type)
+ int InferenceMLAPI::ConvertTensorTypeToInternal(int tensor_type)
{
LOGI("ENTER");
+ int converted_type = 0;
+
switch (tensor_type) {
case ML_TENSOR_TYPE_FLOAT32:
- return INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ converted_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ break;
case ML_TENSOR_TYPE_UINT8:
- return INFERENCE_TENSOR_DATA_TYPE_UINT8;
+ converted_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+ break;
case ML_TENSOR_TYPE_UINT16:
- return INFERENCE_TENSOR_DATA_TYPE_UINT16;
+ converted_type = INFERENCE_TENSOR_DATA_TYPE_UINT16;
+ break;
case ML_TENSOR_TYPE_INT64:
- return INFERENCE_TENSOR_DATA_TYPE_INT64;
+ converted_type = INFERENCE_TENSOR_DATA_TYPE_INT64;
+ break;
case ML_TENSOR_TYPE_UINT64:
- return INFERENCE_TENSOR_DATA_TYPE_UINT64;
+ converted_type = INFERENCE_TENSOR_DATA_TYPE_UINT64;
+ break;
default:
- LOGE("Tensor type(%d) is invalid.", tensor_type);
- return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ throw std::invalid_argument("invalid tensor type.");
+ }
+
+ LOGI("LEAVE");
+
+ return converted_type;
+ }
+
+ int InferenceMLAPI::ConvertTensorTypeToMLAPI(int tensor_type)
+ {
+ LOGI("ENTER");
+
+ int converted_type = 0;
+
+ switch (tensor_type) {
+ case INFERENCE_TENSOR_DATA_TYPE_FLOAT32:
+ converted_type = ML_TENSOR_TYPE_FLOAT32;
+ break;
+ case INFERENCE_TENSOR_DATA_TYPE_UINT8:
+ converted_type = ML_TENSOR_TYPE_UINT8;
+ break;
+ case INFERENCE_TENSOR_DATA_TYPE_UINT16:
+ converted_type = ML_TENSOR_TYPE_UINT16;
+ break;
+ case INFERENCE_TENSOR_DATA_TYPE_INT64:
+ converted_type = ML_TENSOR_TYPE_INT64;
+ break;
+ case INFERENCE_TENSOR_DATA_TYPE_UINT64:
+ converted_type = ML_TENSOR_TYPE_UINT64;
+ break;
+ default:
+ throw std::invalid_argument("invalid tensor type.");
}
LOGI("LEAVE");
- return -1;
+ return converted_type;
}
int InferenceMLAPI::UpdateTensorsInfo()