From: Rafal Walczyna Date: Mon, 25 Jan 2021 14:31:38 +0000 (+0100) Subject: [ML][Common] Add TensorsData.setTensorRawData method X-Git-Tag: submit/tizen/20210202.064821~2 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=refs%2Fchanges%2F90%2F252290%2F5;p=platform%2Fcore%2Fapi%2Fwebapi-plugins.git [ML][Common] Add TensorsData.setTensorRawData method ACR: TWDAPI-273 Test code: var ti = new tizen.ml.TensorsInfo(); ti.addTensorInfo("tensor1", "UINT8", [1, 1]) var td = ti.getTensorsData(); console.log(td.getTensorRawData(0)); td.setTensorRawData(0, [13]); console.log(td.getTensorRawData(0)); [Verification] Built successful. Tested in Chrome Dev console. Change-Id: I522112df9a69f1a5ec7bbcbc12094296a11ec1c9 Signed-off-by: Rafal Walczyna --- diff --git a/src/ml/js/ml_common.js b/src/ml/js/ml_common.js index eeb8838..0208213 100755 --- a/src/ml/js/ml_common.js +++ b/src/ml/js/ml_common.js @@ -175,9 +175,97 @@ TensorsData.prototype.getTensorRawData = function() { return new ArrayType(data.buffer); }; -TensorsData.prototype.setTensorData = function() { - _CheckIfTensorsDataNotDisposed(); - throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented'); +var TensorsDataSetTensorRawDataExceptions = [ + 'InvalidValuesError', + 'TypeMismatchError', + 'NotSupportedError', + 'AbortError' +]; + +function ValidateBufferForTensorsData(tensorsData, index, buffer) { + var result = native_.callSync('MLTensorsDataGetTensorType', { + tensorsDataId: tensorsData._id, + index: index + }); + + if (native_.isFailure(result)) { + throw AbortError; + } + var tensorType = native_.getResultObject(result); + var ret = buffer; + + var ArrayType = _GetBufferTypeFromTensorType(tensorType); + if (Array.isArray(buffer)) { + // in case of standard Array - create TypedArray from it + ret = new ArrayType(buffer); + } else if (false == buffer instanceof ArrayType) { + throw new WebAPIException( + WebAPIException.TYPE_MISMATCH_ERR, + 'buffer array has incompatible type, expected: ' + + ArrayType.name + + ', got: ' + + x.constructor.name + ); + } + return ret; +} + +TensorsData.prototype.setTensorRawData = function() { + _CheckIfTensorsDataNotDisposed(this._id); + var argsIndex = validator_.validateArgs(arguments, [ + { + name: 'index', + type: types_.LONG + } + ]); + var argsLocSize = validator_.validateArgs(Array.prototype.slice.call(arguments, 2), [ + { + name: 'location', + type: types_.ARRAY, + optional: true + }, + { + name: 'size', + type: types_.ARRAY, + optional: true + } + ]); + + if (!argsIndex.has.index) { + throw new WebAPIException( + WebAPIException.INVALID_VALUES_ERR, + 'Invalid parameter: index is undefined' + ); + } + + if (arguments.length < 2) { + throw new WebAPIException( + WebAPIException.INVALID_VALUES_ERR, + 'Invalid parameter: buffer is undefined' + ); + } + var buffer = ValidateBufferForTensorsData(this, argsIndex.index, arguments[1]); + + // TODO: validate location and size - will be done in future commit + + // TODO: modify ArrayToString to accept also float types, not only int + var encodedData = privUtils_.ArrayToString(new Uint8Array(buffer.buffer)); + var callArgs = { + index: argsIndex.index, + tensorsDataId: this._id, + buffer: encodedData, + location: argsLocSize.location ? argsLocSize.location : [], + size: argsLocSize.size ? argsLocSize.size : [] + }; + var result = native_.callSync('MLTensorsDataSetTensorRawData', callArgs); + + if (native_.isFailure(result)) { + throw native_.getErrorObjectAndValidate( + result, + TensorsDataSetTensorRawDataExceptions, + AbortError + ); + } }; TensorsData.prototype.dispose = function() { diff --git a/src/ml/ml_instance.cc b/src/ml/ml_instance.cc index c439672..3969f57 100644 --- a/src/ml/ml_instance.cc +++ b/src/ml/ml_instance.cc @@ -107,6 +107,8 @@ MlInstance::MlInstance() REGISTER_METHOD(MLTensorsDataDispose); REGISTER_METHOD(MLTensorsDataGetTensorRawData); + REGISTER_METHOD(MLTensorsDataGetTensorType); + REGISTER_METHOD(MLTensorsDataSetTensorRawData); // Single API begin REGISTER_METHOD(MLSingleManagerOpenModel); @@ -611,7 +613,6 @@ void MlInstance::MLTensorsDataDispose(const picojson::value& args, picojson::obj } ReportSuccess(out); } - void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out) { ScopeLogger("args: %s", args.serialize().c_str()); CHECK_ARGS(args, kTensorsDataId, double, out); @@ -646,6 +647,71 @@ void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, pico ReportSuccess(out); } + +void MlInstance::MLTensorsDataGetTensorType(const picojson::value& args, picojson::object& out) { + ScopeLogger("args: %s", args.serialize().c_str()); + CHECK_ARGS(args, kTensorsDataId, double, out); + CHECK_ARGS(args, kIndex, double, out); + + int tensors_data_id = static_cast(args.get(kTensorsDataId).get()); + int index = static_cast(args.get(kIndex).get()); + + TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id); + if (nullptr == tensors_data) { + LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out, + ("Could not find TensorsData handle with given id: %d", tensors_data_id)); + return; + } + + std::string tensor_type_string; + PlatformResult result = + types::TensorTypeEnum.getName(tensors_data->GetTensorType(index), &tensor_type_string); + if (!result) { + LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting name of TensorType"), + &out, + ("TensorTypeEnum.getName() failed, error: %s", result.message().c_str())); + return; + } + + picojson::value val = picojson::value{tensor_type_string}; + ReportSuccess(val, out); +} + +void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out) { + ScopeLogger("args: %s", args.serialize().c_str()); + CHECK_ARGS(args, kTensorsDataId, double, out); + CHECK_ARGS(args, kIndex, double, out); + CHECK_ARGS(args, kBuffer, std::string, out); + CHECK_ARGS(args, kLocation, picojson::array, out); + CHECK_ARGS(args, kSize, picojson::array, out); + + int tensors_data_id = static_cast(args.get(kTensorsDataId).get()); + int index = static_cast(args.get(kIndex).get()); + + TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id); + if (nullptr == tensors_data) { + LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out, + ("Could not find TensorsData handle with given id: %d", tensors_data_id)); + return; + } + + int location[ML_TENSOR_RANK_LIMIT] = {0, 0, 0, 0}; + int size[ML_TENSOR_RANK_LIMIT] = {-1, -1, -1, -1}; + // TODO: validate location and size - will be done in future commit + + const std::string& str_buffer = args.get(kBuffer).get(); + std::vector buffer; + common::decode_binary_from_string(str_buffer, buffer); + + TensorRawData rawData{.data = buffer.data(), .size = buffer.size()}; + PlatformResult result = tensors_data->SetTensorRawData(index, location, size, rawData); + if (!result) { + LogAndReportError(result, &out); + return; + } + + ReportSuccess(out); +} // Common ML API end // Single API begin diff --git a/src/ml/ml_instance.h b/src/ml/ml_instance.h index 5828b93..6bbc16e 100644 --- a/src/ml/ml_instance.h +++ b/src/ml/ml_instance.h @@ -57,6 +57,8 @@ class MlInstance : public common::ParsedInstance { void MLTensorsDataDispose(const picojson::value& args, picojson::object& out); void MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out); + void MLTensorsDataGetTensorType(const picojson::value& args, picojson::object& out); + void MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out); TensorsInfoManager tensors_info_manager_; TensorsDataManager tensors_data_manager_; diff --git a/src/ml/ml_tensors_data_manager.cc b/src/ml/ml_tensors_data_manager.cc index 39b4281..eff05da 100644 --- a/src/ml/ml_tensors_data_manager.cc +++ b/src/ml/ml_tensors_data_manager.cc @@ -52,6 +52,16 @@ int TensorsData::Count() { return tensors_info_->Count(); } +ml_tensor_type_e TensorsData::GetTensorType(int index) { + ScopeLogger("id_: %d, index: %d", id_, index); + ml_tensor_type_e tensor_type_enum = ML_TENSOR_TYPE_UNKNOWN; + PlatformResult result = tensors_info_->NativeGetTensorType(index, &tensor_type_enum); + if (!result) { + LoggerE("Failed to get tensor type"); + } + return tensor_type_enum; +} + PlatformResult TensorsData::GetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT], int size[ML_TENSOR_RANK_LIMIT], TensorRawData* tensor_raw_data) { @@ -69,13 +79,8 @@ PlatformResult TensorsData::GetTensorRawData(int index, int location[ML_TENSOR_R } // TODO: add support for location and size - will be done in future commit - ml_tensor_type_e type_enum = ML_TENSOR_TYPE_UNKNOWN; - PlatformResult result = tensors_info_->NativeGetTensorType(index, &type_enum); - if (!result) { - return result; - } - - result = types::TensorTypeEnum.getName(type_enum, &tensor_raw_data->type_str); + PlatformResult result = + types::TensorTypeEnum.getName(this->GetTensorType(index), &tensor_raw_data->type_str); if (!result) { return result; } @@ -86,6 +91,22 @@ PlatformResult TensorsData::GetTensorRawData(int index, int location[ML_TENSOR_R return PlatformResult(ErrorCode::NO_ERROR); } +PlatformResult TensorsData::SetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT], + int size[ML_TENSOR_RANK_LIMIT], + TensorRawData& tensor_raw_data) { + ScopeLogger("id_: %d, index: %d", id_, index); + + // TODO: add support for location and size - will be done in future commit + int ret = + ml_tensors_data_set_tensor_data(handle_, index, tensor_raw_data.data, tensor_raw_data.size); + if (ML_ERROR_NONE != ret) { + LoggerE("ml_tensors_data_set_tensor_data failed: %d (%s)", ret, get_error_message(ret)); + return util::ToPlatformResult(ret, "Internal TensorsData error"); + } + + return PlatformResult(ErrorCode::NO_ERROR); +} + PlatformResult TensorsData::NativeDestroy() { ScopeLogger("id_: %d", id_); int ret = ml_tensors_data_destroy(handle_); diff --git a/src/ml/ml_tensors_data_manager.h b/src/ml/ml_tensors_data_manager.h index 5faeddd..c8e4ca9 100644 --- a/src/ml/ml_tensors_data_manager.h +++ b/src/ml/ml_tensors_data_manager.h @@ -47,8 +47,11 @@ class TensorsData { int Id(); int TensorsInfoId(); int Count(); + ml_tensor_type_e GetTensorType(int index); PlatformResult GetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT], int size[ML_TENSOR_RANK_LIMIT], TensorRawData* tensor_raw_data); + PlatformResult SetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT], + int size[ML_TENSOR_RANK_LIMIT], TensorRawData& tensor_raw_data); PlatformResult NativeDestroy();