From: Rafal Walczyna Date: Tue, 26 Jan 2021 12:46:17 +0000 (+0100) Subject: [ML][Common] Add TensorsData.setTensorRawData location/size support X-Git-Tag: submit/tizen/20210217.032056~8^2 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=d9b6a627daec6abd5437ebd56441350230259b3a;p=platform%2Fcore%2Fapi%2Fwebapi-plugins.git [ML][Common] Add TensorsData.setTensorRawData location/size support ACR: TWDAPI-273 Test code: var ti = new tizen.ml.TensorsInfo(); ti.addTensorInfo("tensor", "INT16", [3, 3]) var td = ti.getTensorsData(); console.log(td.getTensorRawData(0).data) // Int16Array(9) [0, 0, 0, 0, 0, 0, 0, 0, 0] // 0 0 0 // 0 0 0 // 0 0 0 td.setTensorRawData(0, [1, 2, 3], [0, 2], [3, 1]) console.log(td.getTensorRawData(0).data) // Int16Array(9) [0, 0, 0, 0, 0, 0, 1, 2, 3] // 0 0 0 // 0 0 0 // 1 2 3 td.setTensorRawData(0, [4, 5, 6], [2, 0], [1, 3]) console.log(td.getTensorRawData(0).data) // Int16Array(9) [0, 0, 4, 0, 0, 5, 1, 2, 6] // 0 0 4 // 0 0 5 // 1 2 6 td.setTensorRawData(0, [9], [1, 1], [1, 1]) console.log(td.getTensorRawData(0).data) // Int16Array(9) [0, 0, 4, 0, 9, 5, 1, 2, 6] // 0 0 4 // 0 9 5 // 1 2 6 td.setTensorRawData(0, [-4,3,-7], [0, 2], [-1, 1]) console.log(td.getTensorRawData(0).data) // Int16Array(9) [0, 0, 4, 0, 9, 5, -4, 3, -7] // 0 0 4 // 0 9 5 // -4 3 -7 [Verification] Built successful. Tested in Chrome Dev console. Change-Id: Ibc4ae4ddef35941678a765acd5a300cefa6671b0 Signed-off-by: Rafal Walczyna --- diff --git a/src/ml/ml_instance.cc b/src/ml/ml_instance.cc index da6a3e2c..d86fe9fc 100644 --- a/src/ml/ml_instance.cc +++ b/src/ml/ml_instance.cc @@ -23,6 +23,9 @@ #include "common/platform_result.h" #include "common/tools.h" +static_assert(ML_TENSOR_RANK_LIMIT == 4, + "This implementation requires different ML_TENSOR_RANK_LIMIT. Please fix the code."); + namespace extension { namespace ml { @@ -206,57 +209,6 @@ void MlInstance::MLTensorsInfoCountGetter(const picojson::value& args, picojson: ReportSuccess(val, out); } -PlatformResult GetDimensionsFromJsonArray(picojson::array& dim, - unsigned int dimensions[ML_TENSOR_RANK_LIMIT]) { - ScopeLogger(); - bool foundValidValue = false; - unsigned int validDimensions[ML_TENSOR_RANK_LIMIT]; - for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) { - validDimensions[i] = 1; - } - int dimSize = ML_TENSOR_RANK_LIMIT; - if (dim.size() <= ML_TENSOR_RANK_LIMIT) { - dimSize = dim.size(); - } else { - LoggerD("Provided dimensions array is bigger than supported"); - } - - for (int i = dimSize - 1; i >= 0; i--) { - auto& d = dim[i]; - if (!d.is()) { - LoggerE("dimensions array contains an invalid value: %s", d.serialize().c_str()); - return PlatformResult(ErrorCode::INVALID_VALUES_ERR, - "dimensions array contains an invalid value"); - } - - int v = static_cast(d.get()); - if (v <= 0) { - // dimensions with zeros at the end are valid - // 0 after valid value is not accepted - if (foundValidValue || (v < 0)) { - LoggerE("dimensions array contains non-positive value: %d", v); - return PlatformResult(ErrorCode::INVALID_VALUES_ERR, - "dimensions array contains non-positive value"); - } - continue; - } - - foundValidValue = true; - validDimensions[i] = static_cast(v); - } - - if (!foundValidValue) { - LoggerE("No valid values found in dimensions array"); - return PlatformResult(ErrorCode::INVALID_VALUES_ERR, - "dimensions array contains invalid values"); - } - - for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) { - dimensions[i] = validDimensions[i]; - } - return PlatformResult(ErrorCode::NO_ERROR); -} - void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args, picojson::object& out) { ScopeLogger("args: %s", args.serialize().c_str()); CHECK_ARGS(args, kTensorsInfoId, double, out); @@ -291,7 +243,7 @@ void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args, picojso unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {}; auto dim = args.get(kDimensions).get(); - result = GetDimensionsFromJsonArray(dim, dimensions); + result = util::GetDimensionsFromJsonArray(dim, dimensions); if (!result) { LogAndReportError(result, &out); return; @@ -352,7 +304,7 @@ void MlInstance::MLTensorsInfoSetDimensions(const picojson::value& args, picojso unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {}; auto dim = args.get(kDimensions).get(); - PlatformResult result = GetDimensionsFromJsonArray(dim, dimensions); + PlatformResult result = util::GetDimensionsFromJsonArray(dim, dimensions); if (!result) { LogAndReportError(result, &out); return; @@ -632,8 +584,8 @@ void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, pico return; } // TODO: validate location and size - will be done in future commit - int location[ML_TENSOR_RANK_LIMIT]; - int size[ML_TENSOR_RANK_LIMIT]; + unsigned int location[ML_TENSOR_RANK_LIMIT]; + unsigned int size[ML_TENSOR_RANK_LIMIT]; TensorRawData raw_data; PlatformResult result = tensors_data->GetTensorRawData(index, location, size, &raw_data); if (!result) { @@ -641,7 +593,7 @@ void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, pico return; } - std::vector out_data{raw_data.data, raw_data.data + raw_data.size}; + std::vector out_data{raw_data.data, raw_data.data + raw_data.size_in_bytes}; out[kBuffer] = picojson::value(picojson::string_type, true); common::encode_binary_in_string(out_data, out[kBuffer].get()); @@ -685,12 +637,16 @@ void MlInstance::MLTensorsDataGetTensorType(const picojson::value& args, picojso } void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out) { - ScopeLogger("args: %s", args.serialize().c_str()); + ScopeLogger(); CHECK_ARGS(args, kTensorsDataId, double, out); CHECK_ARGS(args, kIndex, double, out); CHECK_ARGS(args, kBuffer, std::string, out); CHECK_ARGS(args, kLocation, picojson::array, out); CHECK_ARGS(args, kSize, picojson::array, out); + LoggerD("%s, %s", kTensorsDataId.c_str(), args.get(kTensorsDataId).serialize().c_str()); + LoggerD("%s, %s", kIndex.c_str(), args.get(kIndex).serialize().c_str()); + LoggerD("%s, %s", kLocation.c_str(), args.get(kLocation).serialize().c_str()); + LoggerD("%s, %s", kSize.c_str(), args.get(kSize).serialize().c_str()); int tensors_data_id = static_cast(args.get(kTensorsDataId).get()); int index = static_cast(args.get(kIndex).get()); @@ -702,16 +658,34 @@ void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args, pico return; } - int location[ML_TENSOR_RANK_LIMIT] = {0, 0, 0, 0}; - int size[ML_TENSOR_RANK_LIMIT] = {-1, -1, -1, -1}; - // TODO: validate location and size - will be done in future commit + unsigned int location[ML_TENSOR_RANK_LIMIT] = {}; + PlatformResult result = + util::GetLocationFromJsonArray(args.get(kLocation).get(), location); + if (!result) { + LogAndReportError(result, &out); + return; + } + + unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {}; + result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions); + if (!result) { + LogAndReportError(result, &out); + return; + } + unsigned int size[ML_TENSOR_RANK_LIMIT] = {}; + result = util::GetSizeFromJsonArray(args.get(kSize).get(), location, dimensions, + size); + if (!result) { + LogAndReportError(result, &out); + return; + } const std::string& str_buffer = args.get(kBuffer).get(); std::vector buffer; common::decode_binary_from_string(str_buffer, buffer); - TensorRawData rawData{.data = buffer.data(), .size = buffer.size()}; - PlatformResult result = tensors_data->SetTensorRawData(index, location, size, rawData); + TensorRawData rawData{.data = buffer.data(), .size_in_bytes = buffer.size()}; + result = tensors_data->SetTensorRawData(index, location, size, rawData); if (!result) { LogAndReportError(result, &out); return; diff --git a/src/ml/ml_tensors_data_manager.cc b/src/ml/ml_tensors_data_manager.cc index 410cd68b..aa944643 100644 --- a/src/ml/ml_tensors_data_manager.cc +++ b/src/ml/ml_tensors_data_manager.cc @@ -62,8 +62,8 @@ ml_tensor_type_e TensorsData::GetTensorType(int index) { return tensor_type_enum; } -PlatformResult TensorsData::GetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT], - int size[ML_TENSOR_RANK_LIMIT], +PlatformResult TensorsData::GetTensorRawData(int index, unsigned int location[ML_TENSOR_RANK_LIMIT], + unsigned int size[ML_TENSOR_RANK_LIMIT], TensorRawData* tensor_raw_data) { ScopeLogger("id_: %d, index: %d", id_, index); if (nullptr == tensor_raw_data) { @@ -72,16 +72,15 @@ PlatformResult TensorsData::GetTensorRawData(int index, int location[ML_TENSOR_R } void* data; size_t data_size; - int ret = ml_tensors_data_get_tensor_data(handle_, index, &data, &data_size); - if (ML_ERROR_NONE != ret) { - LoggerE("ml_tensors_data_get_tensor_data failed: %d (%s)", ret, get_error_message(ret)); - return util::ToPlatformResult(ret, "Internal TensorsData error"); + PlatformResult result = NativeGetTensorData(index, &data, &data_size); + if (!result) { + return result; } // TODO: add support for location and size - will be done in future commit // Dimensions of whole tensor unsigned int dim[ML_TENSOR_RANK_LIMIT]; - PlatformResult result = tensors_info_->NativeGetTensorDimensions(index, dim); + result = tensors_info_->NativeGetTensorDimensions(index, dim); if (!result) { return result; } @@ -96,19 +95,127 @@ PlatformResult TensorsData::GetTensorRawData(int index, int location[ML_TENSOR_R } tensor_raw_data->data = static_cast(data); - tensor_raw_data->size = data_size; + tensor_raw_data->size_in_bytes = data_size; return PlatformResult(ErrorCode::NO_ERROR); } -PlatformResult TensorsData::SetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT], - int size[ML_TENSOR_RANK_LIMIT], +PlatformResult TensorsData::SetTensorRawData(int index, unsigned int location[ML_TENSOR_RANK_LIMIT], + unsigned int size[ML_TENSOR_RANK_LIMIT], TensorRawData& tensor_raw_data) { - ScopeLogger("id_: %d, index: %d", id_, index); + ScopeLogger("id_: %d, index: %d, tensor_raw_data.size_in_bytes: %zu", id_, index, + tensor_raw_data.size_in_bytes); + // Dimensions of whole tensor + unsigned int dim[ML_TENSOR_RANK_LIMIT]; + // Dimensions of updated tensors relative to location coordiantes + unsigned int size_rel[ML_TENSOR_RANK_LIMIT]; + + PlatformResult result = tensors_info_->NativeGetTensorDimensions(index, dim); + if (!result) { + return result; + } + uint8_t bytes_per_element = tensors_info_->GetBytesPerElement(index); + + // Check if update is partial due to location change + bool partial = false; + for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) { + if (location[i] >= dim[i]) { + // Input data starts outside of current data + LoggerE("Input data location is invalid on [%d]: %u", i, location[i]); + return PlatformResult{ErrorCode::INVALID_VALUES_ERR, "Input data location is invalid"}; + } else if (location[i] != 0) { + partial = true; + } + } + + // Check if data will fit in TensorData and calculate dimensions + // of modified part, also check if update is partial due to size change + size_t data_to_be_updated_size = bytes_per_element; + for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) { + size_rel[i] = location[i] + size[i]; + if (size_rel[i] > dim[i]) { + LoggerE("Input data will not fit in TensorData [%d]: %u > %u", i, size_rel[i], dim[i]); + return PlatformResult{ErrorCode::INVALID_VALUES_ERR, "Input data will not fit in TensorData"}; + } else { + data_to_be_updated_size *= size[i]; + if (size_rel[i] < dim[i]) { + partial = true; + } + } + } + // Check if provided TensorRawData is big enough + if (data_to_be_updated_size > tensor_raw_data.size_in_bytes) { + LoggerE("Input data is too small, expected: %zu, got: %zu", data_to_be_updated_size, + tensor_raw_data.size_in_bytes); + return PlatformResult{ErrorCode::INVALID_VALUES_ERR, "Input data is too small"}; + } + // Check if provided TensorRawData is not too big + if (data_to_be_updated_size < tensor_raw_data.size_in_bytes) { + LoggerE("Input data is too big, expected: %zu, got: %zu", data_to_be_updated_size, + tensor_raw_data.size_in_bytes); + return PlatformResult{ErrorCode::INVALID_VALUES_ERR, "Input data is too big"}; + } + + int ret = 0; + if (partial) { + LoggerD("Partial update of tensor data"); + // Get current data + void* void_data = nullptr; + size_t data_size; + result = NativeGetTensorData(index, &void_data, &data_size); + if (!result) { + return result; + } + uint8_t* data = static_cast(void_data); + // Allocate space for new data + auto new_data = std::make_unique(data_size); + size_t position = 0; + size_t position_in_new_data = 0; + // Modified data is in range from location to size_rel + // boolean values helps to optimize checks for updates + // if value's position in respective axis [a] is outside of range = size_rel[3])) { + update_3 = false; + } + for (unsigned int j = 0; j < dim[2]; j++) { + bool update_2 = update_3; + if (update_2 && ((j < location[2]) || (j >= size_rel[2]))) { + update_2 = false; + } + for (unsigned int k = 0; k < dim[1]; k++) { + bool update_1 = update_2; + if (update_1 && ((k < location[1]) || (k >= size_rel[1]))) { + update_1 = false; + } + for (unsigned int l = 0; l < dim[0]; l++) { + bool update_0 = update_1; + if (update_0 && ((l < location[0]) || (l >= size_rel[0]))) { + update_0 = false; + } + if (update_0) { + mempcpy(&new_data[position], &tensor_raw_data.data[position_in_new_data], + bytes_per_element); + position_in_new_data += bytes_per_element; + } else { + mempcpy(&new_data[position], &data[position], bytes_per_element); + } + position += bytes_per_element; + } + } + } + } + LoggerD("Updated %zu bytes out of %zu bytes", position_in_new_data, position); + ret = ml_tensors_data_set_tensor_data(handle_, index, new_data.get(), data_size); + // new_data is released by unique_ptr + } else { + // All data is changed + ret = ml_tensors_data_set_tensor_data(handle_, index, tensor_raw_data.data, + tensor_raw_data.size_in_bytes); + } - // TODO: add support for location and size - will be done in future commit - int ret = - ml_tensors_data_set_tensor_data(handle_, index, tensor_raw_data.data, tensor_raw_data.size); if (ML_ERROR_NONE != ret) { LoggerE("ml_tensors_data_set_tensor_data failed: %d (%s)", ret, get_error_message(ret)); return util::ToPlatformResult(ret, "Internal TensorsData error"); @@ -117,6 +224,10 @@ PlatformResult TensorsData::SetTensorRawData(int index, int location[ML_TENSOR_R return PlatformResult(ErrorCode::NO_ERROR); } +TensorsInfo* TensorsData::GetTensorsInfo() { + return tensors_info_; +} + PlatformResult TensorsData::NativeDestroy() { ScopeLogger("id_: %d", id_); int ret = ml_tensors_data_destroy(handle_); @@ -127,6 +238,15 @@ PlatformResult TensorsData::NativeDestroy() { return PlatformResult(ErrorCode::NO_ERROR); } +PlatformResult TensorsData::NativeGetTensorData(int index, void** raw_data, size_t* size) { + int ret = ml_tensors_data_get_tensor_data(handle_, index, raw_data, size); + if (ML_ERROR_NONE != ret) { + LoggerE("ml_tensors_data_get_tensor_data failed: %d (%s)", ret, get_error_message(ret)); + return util::ToPlatformResult(ret, "Internal TensorsData error"); + } + return PlatformResult(ErrorCode::NO_ERROR); +} + TensorsDataManager::TensorsDataManager() : nextId_(0) { ScopeLogger(); } diff --git a/src/ml/ml_tensors_data_manager.h b/src/ml/ml_tensors_data_manager.h index 41db5e78..f07a7396 100644 --- a/src/ml/ml_tensors_data_manager.h +++ b/src/ml/ml_tensors_data_manager.h @@ -34,7 +34,7 @@ class TensorsInfo; struct TensorRawData { // TensorRawData does not take ownership of data, remember to handle it outside uint8_t* data; - size_t size; + size_t size_in_bytes; std::string type_str; unsigned int shape[ML_TENSOR_RANK_LIMIT]; }; @@ -49,10 +49,13 @@ class TensorsData { int TensorsInfoId(); int Count(); ml_tensor_type_e GetTensorType(int index); - PlatformResult GetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT], - int size[ML_TENSOR_RANK_LIMIT], TensorRawData* tensor_raw_data); - PlatformResult SetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT], - int size[ML_TENSOR_RANK_LIMIT], TensorRawData& tensor_raw_data); + PlatformResult GetTensorRawData(int index, unsigned int location[ML_TENSOR_RANK_LIMIT], + unsigned int size[ML_TENSOR_RANK_LIMIT], + TensorRawData* tensor_raw_data); + PlatformResult SetTensorRawData(int index, unsigned int location[ML_TENSOR_RANK_LIMIT], + unsigned int size[ML_TENSOR_RANK_LIMIT], + TensorRawData& tensor_raw_data); + TensorsInfo* GetTensorsInfo(); PlatformResult NativeDestroy(); @@ -60,6 +63,8 @@ class TensorsData { TensorsData(TensorsData const&) = delete; TensorsData& operator=(TensorsData const&) = delete; + PlatformResult NativeGetTensorData(int index, void** raw_data, size_t* size); + ml_tensors_data_h handle_; int id_; TensorsInfo* tensors_info_; diff --git a/src/ml/ml_tensors_info_manager.cc b/src/ml/ml_tensors_info_manager.cc index 9a052df5..d69c5b5f 100644 --- a/src/ml/ml_tensors_info_manager.cc +++ b/src/ml/ml_tensors_info_manager.cc @@ -280,6 +280,35 @@ PlatformResult TensorsInfo::NativeGetTensorSize(int index, size_t* size) { return PlatformResult(ErrorCode::NO_ERROR); } +uint8_t TensorsInfo::GetBytesPerElement(int index) { + ScopeLogger("id_: %d, index: %d", id_, index); + ml_tensor_type_e type = ML_TENSOR_TYPE_UNKNOWN; + int ret = ml_tensors_info_get_tensor_type(handle_, index, &type); + if (ML_ERROR_NONE == ret) { + switch (type) { + case ML_TENSOR_TYPE_INT8: + case ML_TENSOR_TYPE_UINT8: + case ML_TENSOR_TYPE_UNKNOWN: + return 1; + case ML_TENSOR_TYPE_INT16: + case ML_TENSOR_TYPE_UINT16: + return 2; + case ML_TENSOR_TYPE_INT32: + case ML_TENSOR_TYPE_UINT32: + case ML_TENSOR_TYPE_FLOAT32: + return 4; + case ML_TENSOR_TYPE_INT64: + case ML_TENSOR_TYPE_UINT64: + case ML_TENSOR_TYPE_FLOAT64: + return 8; + default: + return 1; + } + } + LoggerE("ml_tensors_info_get_tensor_type failed: [%d] (%s)", ret, get_error_message(ret)); + return 1; +} + TensorsInfoManager::TensorsInfoManager(TensorsDataManager* tensors_data_manager) : nextId_(0), tensors_data_manager_(tensors_data_manager) { ScopeLogger(); diff --git a/src/ml/ml_tensors_info_manager.h b/src/ml/ml_tensors_info_manager.h index 56f1c1dd..3bb3213a 100644 --- a/src/ml/ml_tensors_info_manager.h +++ b/src/ml/ml_tensors_info_manager.h @@ -59,6 +59,8 @@ class TensorsInfo { PlatformResult NativeSetTensorType(int index, const ml_tensor_type_e type); PlatformResult NativeGetTensorSize(int index, size_t* size); + uint8_t GetBytesPerElement(int index); + private: ml_tensors_info_h handle_; int id_; diff --git a/src/ml/ml_utils.cc b/src/ml/ml_utils.cc index e021050f..3c42b184 100644 --- a/src/ml/ml_utils.cc +++ b/src/ml/ml_utils.cc @@ -115,6 +115,115 @@ bool CheckNNFWAvailability(const std::string& nnfw, const std::string& hw) { return available; } +PlatformResult GetDimensionsFromJsonArray(const picojson::array& dim, + unsigned int dimensions[ML_TENSOR_RANK_LIMIT]) { + ScopeLogger(); + bool foundValidValue = false; + unsigned int validDimensions[ML_TENSOR_RANK_LIMIT]; + for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) { + validDimensions[i] = 1; + } + int dimSize = ML_TENSOR_RANK_LIMIT; + if (dim.size() <= ML_TENSOR_RANK_LIMIT) { + dimSize = dim.size(); + } else { + LoggerD("Provided dimensions array is bigger than supported"); + } + + for (int i = dimSize - 1; i >= 0; i--) { + auto& d = dim[i]; + if (!d.is()) { + LoggerE("dimensions array contains an invalid value: %s", d.serialize().c_str()); + return PlatformResult(ErrorCode::INVALID_VALUES_ERR, + "dimensions array contains an invalid value"); + } + + int v = static_cast(d.get()); + if (v <= 0) { + // dimensions with zeros at the end are valid + // 0 after valid value is not accepted + if (foundValidValue || (v < 0)) { + LoggerE("dimensions array contains non-positive value: %d", v); + return PlatformResult(ErrorCode::INVALID_VALUES_ERR, + "dimensions array contains non-positive value"); + } + continue; + } + + foundValidValue = true; + validDimensions[i] = static_cast(v); + } + + if (!foundValidValue) { + LoggerE("No valid values found in dimensions array"); + return PlatformResult(ErrorCode::INVALID_VALUES_ERR, + "dimensions array contains invalid values"); + } + + for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) { + dimensions[i] = validDimensions[i]; + } + return PlatformResult(ErrorCode::NO_ERROR); +} + +PlatformResult GetLocationFromJsonArray(const picojson::array& array, + unsigned int location[ML_TENSOR_RANK_LIMIT]) { + if (array.size() > ML_TENSOR_RANK_LIMIT) { + LoggerD("Provided size array is bigger than supported"); + } + int i = 0; + for (const auto& a : array) { + double num = -1; + if (a.is()) { + num = a.get(); + } + if (num < 0) { + LoggerE("location array contains negative value: %s", a.serialize().c_str()); + return PlatformResult(ErrorCode::INVALID_VALUES_ERR, + "location array contains negative value"); + } + location[i] = static_cast(num); + i++; + if (i == ML_TENSOR_RANK_LIMIT) { + break; + } + } + return PlatformResult(ErrorCode::NO_ERROR); +} + +PlatformResult GetSizeFromJsonArray(const picojson::array& array, + unsigned int location[ML_TENSOR_RANK_LIMIT], + unsigned int dimensions[ML_TENSOR_RANK_LIMIT], + unsigned int size[ML_TENSOR_RANK_LIMIT]) { + if (array.size() > ML_TENSOR_RANK_LIMIT) { + LoggerD("Provided size array is bigger than supported"); + } + int i = 0; + for (const auto& a : array) { + double num = 0; + if (a.is()) { + num = a.get(); + } + if (num == 0) { + LoggerE("size array contains zero value: %s", a.serialize().c_str()); + return PlatformResult(ErrorCode::INVALID_VALUES_ERR, "size array contains zero value"); + } else if (num > 0) { + size[i] = static_cast(num); + } else { + // in case of negative value, size becomes size from location to end of axis + size[i] = dimensions[i] - location[i]; + } + i++; + if (i == ML_TENSOR_RANK_LIMIT) { + break; + } + } + for (; i < ML_TENSOR_RANK_LIMIT; i++) { + size[i] = dimensions[i] - location[i]; + } + return PlatformResult(ErrorCode::NO_ERROR); +} + } // util } // ml } // extension diff --git a/src/ml/ml_utils.h b/src/ml/ml_utils.h index 481483e0..70ab77c0 100644 --- a/src/ml/ml_utils.h +++ b/src/ml/ml_utils.h @@ -44,6 +44,15 @@ PlatformResult ToPlatformResult(int ml_error_code, const std::string& error_mess bool CheckNNFWAvailability(const std::string& nnfw, const std::string& hw); +PlatformResult GetDimensionsFromJsonArray(const picojson::array& dim, + unsigned int dimensions[ML_TENSOR_RANK_LIMIT]); +PlatformResult GetLocationFromJsonArray(const picojson::array& array, + unsigned int location[ML_TENSOR_RANK_LIMIT]); +PlatformResult GetSizeFromJsonArray(const picojson::array& array, + unsigned int location[ML_TENSOR_RANK_LIMIT], + unsigned int dimensions[ML_TENSOR_RANK_LIMIT], + unsigned int size[ML_TENSOR_RANK_LIMIT]); + } // util } // ml } // extension