From: Piotr Kosko/Tizen API (PLT) /SRPOL/Engineer/Samsung Electronics Date: Mon, 2 Aug 2021 08:15:03 +0000 (+0200) Subject: [ml][single] Implemented communication in ML API X-Git-Tag: submit/tizen/20210806.090850~1 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=6cb7804b36e6717fb2e042ad61b437291221203a;p=platform%2Fcore%2Fapi%2Fwebapi-plugins.git [ml][single] Implemented communication in ML API [Verification] Performance increased in Chromium console. TCT passrate - 100% Change-Id: I7079ec1a690696dc4acc5d827920603e3a12e9ab --- diff --git a/src/ml/js/ml_common.js b/src/ml/js/ml_common.js index b90ca069..ffe4e7b9 100755 --- a/src/ml/js/ml_common.js +++ b/src/ml/js/ml_common.js @@ -234,6 +234,15 @@ function ValidateBufferForTensorsData(tensorsData, index, buffer) { return ret; } +function stringToUint8Array(str) { + // utf-8 + var buf = new Uint8Array(str.length); + for (var i = 0, strLen = str.length; i < strLen; i++) { + buf[i] = str.charCodeAt(i); + } + return buf; +} + TensorsData.prototype.setTensorRawData = function() { _CheckIfTensorsDataNotDisposed(this._id); var argsIndex = validator_.validateArgs(arguments, [ @@ -273,16 +282,42 @@ TensorsData.prototype.setTensorRawData = function() { _CheckIfArrayHasOnlyNumbersAndThrow(argsLocSize.location, 'location'); _CheckIfArrayHasOnlyNumbersAndThrow(argsLocSize.size, 'size'); - var encodedData = privUtils_.ArrayToString(new Uint8Array(buffer.buffer)); + var methodIndex = 0; var callArgs = { index: argsIndex.index, tensorsDataId: this._id, - buffer: encodedData, location: argsLocSize.location ? argsLocSize.location : [], size: argsLocSize.size ? argsLocSize.size : [] }; - var result = native_.callSync('MLTensorsDataSetTensorRawData', callArgs); - + var callArgsCoded = stringToUint8Array(JSON.stringify(callArgs)); + + var buffer = new Uint8Array(buffer.buffer); + + // FORMAT: + // 1 byte === methodIndex + // 4 byte === JSON lenght (N) + // 4 byte === buffer length (M) + // N bytest === JSON data + // M bytes === buffer data + var headerData = [ + methodIndex, + callArgsCoded.length >> 24, + callArgsCoded.length >> 16, + callArgsCoded.length >> 8, + callArgsCoded.length, + buffer.length >> 24, + buffer.length >> 16, + buffer.length >> 8, + buffer.length + ]; + var mergedData = new Uint8Array( + headerData.length + callArgsCoded.length + buffer.length + ); + mergedData.set(headerData); + mergedData.set(callArgsCoded, headerData.length); + mergedData.set(buffer, headerData.length + callArgsCoded.length); + + var result = native_.callSyncBinaryWithJSONAnswer(mergedData); if (native_.isFailure(result)) { throw native_.getErrorObjectAndValidate( result, diff --git a/src/ml/ml_instance.cc b/src/ml/ml_instance.cc index 27f74b09..63ac32d5 100644 --- a/src/ml/ml_instance.cc +++ b/src/ml/ml_instance.cc @@ -108,6 +108,7 @@ MlInstance::MlInstance() using namespace std::placeholders; #define REGISTER_METHOD(M) RegisterSyncHandler(#M, std::bind(&MlInstance::M, this, _1, _2)) +#define REGISTER_BINARY_METHOD(M) RegisterBinaryHandler(std::bind(&MlInstance::M, this, _1, _2, _3)) REGISTER_METHOD(MLCheckNNFWAvailability); REGISTER_METHOD(MLTensorsInfoCountGetter); @@ -131,6 +132,7 @@ MlInstance::MlInstance() REGISTER_METHOD(MLTensorsDataGetTensorRawData); REGISTER_METHOD(MLTensorsDataGetTensorType); REGISTER_METHOD(MLTensorsDataSetTensorRawData); + REGISTER_BINARY_METHOD(MLTensorsDataSetTensorRawDataBinary); REGISTER_METHOD(MLSingleManagerOpenModel); REGISTER_METHOD(MLSingleShotGetTensorsInfo); @@ -740,6 +742,90 @@ void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args, pico ReportSuccess(out); } +void MlInstance::MLTensorsDataSetTensorRawDataBinary(const char* data, size_t data_size, + picojson::object& out) { + ScopeLogger(); + /* + METHOD_ID WAS ALREADY REMOVED during message handling + other data packed with following format: + // FORMAT: + // 1 byte === methodIndex /// already parsed + // 4 byte === JSON lenght (N) + // 4 byte === buffer length (M) + // N bytest === JSON data + // M bytes === buffer data + */ + unsigned int call_args_len_begin = 0; + unsigned int call_args_len = static_cast( + (data[call_args_len_begin] << 24) + (data[call_args_len_begin + 1] << 16) + + (data[call_args_len_begin + 2] << 8) + (data[call_args_len_begin + 3])); + + unsigned int buffer_len_begin = call_args_len_begin + 4; + unsigned int buffer_len = static_cast( + (data[buffer_len_begin] << 24) + (data[buffer_len_begin + 1] << 16) + + (data[buffer_len_begin + 2] << 8) + (data[buffer_len_begin + 3])); + + unsigned int call_args_begin = buffer_len_begin + 4; + std::string call_args(data + call_args_begin, call_args_len); + + picojson::value args; + picojson::parse(args, call_args); + + unsigned int buffer_begin = call_args_begin + call_args_len; + + CHECK_ARGS(args, kTensorsDataId, double, out); + CHECK_ARGS(args, kIndex, double, out); + CHECK_ARGS(args, kLocation, picojson::array, out); + CHECK_ARGS(args, kSize, picojson::array, out); + LoggerD("%s, %s", kTensorsDataId.c_str(), args.get(kTensorsDataId).serialize().c_str()); + LoggerD("%s, %s", kIndex.c_str(), args.get(kIndex).serialize().c_str()); + LoggerD("%s, %s", kLocation.c_str(), args.get(kLocation).serialize().c_str()); + LoggerD("%s, %s", kSize.c_str(), args.get(kSize).serialize().c_str()); + + int tensors_data_id = static_cast(args.get(kTensorsDataId).get()); + int index = static_cast(args.get(kIndex).get()); + + TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id); + if (nullptr == tensors_data) { + LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out, + ("Could not find TensorsData handle with given id: %d", tensors_data_id)); + return; + } + + unsigned int location[ML_TENSOR_RANK_LIMIT] = {}; + PlatformResult result = + util::GetLocationFromJsonArray(args.get(kLocation).get(), location); + if (!result) { + LogAndReportError(result, &out); + return; + } + + unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {}; + result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions); + if (!result) { + LogAndReportError(result, &out); + return; + } + + unsigned int size[ML_TENSOR_RANK_LIMIT] = {}; + result = util::GetSizeFromJsonArray(args.get(kSize).get(), location, dimensions, + size); + if (!result) { + LogAndReportError(result, &out); + return; + } + + TensorRawData raw_data{reinterpret_cast(const_cast(data + buffer_begin)), + buffer_len}; + result = tensors_data->SetTensorRawData(index, location, size, raw_data); + if (!result) { + LogAndReportError(result, &out); + return; + } + + ReportSuccess(out); +} + void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson::object& out) { ScopeLogger("args: %s", args.serialize().c_str()); CHECK_ARGS(args, kModelPath, std::string, out); diff --git a/src/ml/ml_instance.h b/src/ml/ml_instance.h index 11af609f..d4984e20 100644 --- a/src/ml/ml_instance.h +++ b/src/ml/ml_instance.h @@ -59,6 +59,7 @@ class MlInstance : public common::ParsedInstance { void MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out); void MLTensorsDataGetTensorType(const picojson::value& args, picojson::object& out); void MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out); + void MLTensorsDataSetTensorRawDataBinary(const char* data, size_t size, picojson::object& out); /* * ########## IMPORTANT ########## diff --git a/src/ml/ml_utils.h b/src/ml/ml_utils.h index 70ab77c0..34dbb635 100644 --- a/src/ml/ml_utils.h +++ b/src/ml/ml_utils.h @@ -48,6 +48,7 @@ PlatformResult GetDimensionsFromJsonArray(const picojson::array& dim, unsigned int dimensions[ML_TENSOR_RANK_LIMIT]); PlatformResult GetLocationFromJsonArray(const picojson::array& array, unsigned int location[ML_TENSOR_RANK_LIMIT]); + PlatformResult GetSizeFromJsonArray(const picojson::array& array, unsigned int location[ML_TENSOR_RANK_LIMIT], unsigned int dimensions[ML_TENSOR_RANK_LIMIT],