return new ArrayType(data.buffer);
};
-TensorsData.prototype.setTensorData = function() {
- _CheckIfTensorsDataNotDisposed();
- throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
+var TensorsDataSetTensorRawDataExceptions = [
+ 'InvalidValuesError',
+ 'TypeMismatchError',
+ 'NotSupportedError',
+ 'AbortError'
+];
+
+function ValidateBufferForTensorsData(tensorsData, index, buffer) {
+ var result = native_.callSync('MLTensorsDataGetTensorType', {
+ tensorsDataId: tensorsData._id,
+ index: index
+ });
+
+ if (native_.isFailure(result)) {
+ throw AbortError;
+ }
+ var tensorType = native_.getResultObject(result);
+ var ret = buffer;
+
+ var ArrayType = _GetBufferTypeFromTensorType(tensorType);
+ if (Array.isArray(buffer)) {
+ // in case of standard Array - create TypedArray from it
+ ret = new ArrayType(buffer);
+ } else if (false == buffer instanceof ArrayType) {
+ throw new WebAPIException(
+ WebAPIException.TYPE_MISMATCH_ERR,
+ 'buffer array has incompatible type, expected: ' +
+ ArrayType.name +
+ ', got: ' +
+ x.constructor.name
+ );
+ }
+ return ret;
+}
+
+TensorsData.prototype.setTensorRawData = function() {
+ _CheckIfTensorsDataNotDisposed(this._id);
+ var argsIndex = validator_.validateArgs(arguments, [
+ {
+ name: 'index',
+ type: types_.LONG
+ }
+ ]);
+ var argsLocSize = validator_.validateArgs(Array.prototype.slice.call(arguments, 2), [
+ {
+ name: 'location',
+ type: types_.ARRAY,
+ optional: true
+ },
+ {
+ name: 'size',
+ type: types_.ARRAY,
+ optional: true
+ }
+ ]);
+
+ if (!argsIndex.has.index) {
+ throw new WebAPIException(
+ WebAPIException.INVALID_VALUES_ERR,
+ 'Invalid parameter: index is undefined'
+ );
+ }
+
+ if (arguments.length < 2) {
+ throw new WebAPIException(
+ WebAPIException.INVALID_VALUES_ERR,
+ 'Invalid parameter: buffer is undefined'
+ );
+ }
+ var buffer = ValidateBufferForTensorsData(this, argsIndex.index, arguments[1]);
+
+ // TODO: validate location and size - will be done in future commit
+
+ // TODO: modify ArrayToString to accept also float types, not only int
+ var encodedData = privUtils_.ArrayToString(new Uint8Array(buffer.buffer));
+ var callArgs = {
+ index: argsIndex.index,
+ tensorsDataId: this._id,
+ buffer: encodedData,
+ location: argsLocSize.location ? argsLocSize.location : [],
+ size: argsLocSize.size ? argsLocSize.size : []
+ };
+ var result = native_.callSync('MLTensorsDataSetTensorRawData', callArgs);
+
+ if (native_.isFailure(result)) {
+ throw native_.getErrorObjectAndValidate(
+ result,
+ TensorsDataSetTensorRawDataExceptions,
+ AbortError
+ );
+ }
};
TensorsData.prototype.dispose = function() {
REGISTER_METHOD(MLTensorsDataDispose);
REGISTER_METHOD(MLTensorsDataGetTensorRawData);
+ REGISTER_METHOD(MLTensorsDataGetTensorType);
+ REGISTER_METHOD(MLTensorsDataSetTensorRawData);
// Single API begin
REGISTER_METHOD(MLSingleManagerOpenModel);
}
ReportSuccess(out);
}
-
void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsDataId, double, out);
ReportSuccess(out);
}
+
+void MlInstance::MLTensorsDataGetTensorType(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+ CHECK_ARGS(args, kTensorsDataId, double, out);
+ CHECK_ARGS(args, kIndex, double, out);
+
+ int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
+ int index = static_cast<int>(args.get(kIndex).get<double>());
+
+ TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ if (nullptr == tensors_data) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
+ ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ return;
+ }
+
+ std::string tensor_type_string;
+ PlatformResult result =
+ types::TensorTypeEnum.getName(tensors_data->GetTensorType(index), &tensor_type_string);
+ if (!result) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting name of TensorType"),
+ &out,
+ ("TensorTypeEnum.getName() failed, error: %s", result.message().c_str()));
+ return;
+ }
+
+ picojson::value val = picojson::value{tensor_type_string};
+ ReportSuccess(val, out);
+}
+
+void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+ CHECK_ARGS(args, kTensorsDataId, double, out);
+ CHECK_ARGS(args, kIndex, double, out);
+ CHECK_ARGS(args, kBuffer, std::string, out);
+ CHECK_ARGS(args, kLocation, picojson::array, out);
+ CHECK_ARGS(args, kSize, picojson::array, out);
+
+ int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
+ int index = static_cast<int>(args.get(kIndex).get<double>());
+
+ TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ if (nullptr == tensors_data) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
+ ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ return;
+ }
+
+ int location[ML_TENSOR_RANK_LIMIT] = {0, 0, 0, 0};
+ int size[ML_TENSOR_RANK_LIMIT] = {-1, -1, -1, -1};
+ // TODO: validate location and size - will be done in future commit
+
+ const std::string& str_buffer = args.get(kBuffer).get<std::string>();
+ std::vector<std::uint8_t> buffer;
+ common::decode_binary_from_string(str_buffer, buffer);
+
+ TensorRawData rawData{.data = buffer.data(), .size = buffer.size()};
+ PlatformResult result = tensors_data->SetTensorRawData(index, location, size, rawData);
+ if (!result) {
+ LogAndReportError(result, &out);
+ return;
+ }
+
+ ReportSuccess(out);
+}
// Common ML API end
// Single API begin
void MLTensorsDataDispose(const picojson::value& args, picojson::object& out);
void MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out);
+ void MLTensorsDataGetTensorType(const picojson::value& args, picojson::object& out);
+ void MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out);
TensorsInfoManager tensors_info_manager_;
TensorsDataManager tensors_data_manager_;
return tensors_info_->Count();
}
+ml_tensor_type_e TensorsData::GetTensorType(int index) {
+ ScopeLogger("id_: %d, index: %d", id_, index);
+ ml_tensor_type_e tensor_type_enum = ML_TENSOR_TYPE_UNKNOWN;
+ PlatformResult result = tensors_info_->NativeGetTensorType(index, &tensor_type_enum);
+ if (!result) {
+ LoggerE("Failed to get tensor type");
+ }
+ return tensor_type_enum;
+}
+
PlatformResult TensorsData::GetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT],
int size[ML_TENSOR_RANK_LIMIT],
TensorRawData* tensor_raw_data) {
}
// TODO: add support for location and size - will be done in future commit
- ml_tensor_type_e type_enum = ML_TENSOR_TYPE_UNKNOWN;
- PlatformResult result = tensors_info_->NativeGetTensorType(index, &type_enum);
- if (!result) {
- return result;
- }
-
- result = types::TensorTypeEnum.getName(type_enum, &tensor_raw_data->type_str);
+ PlatformResult result =
+ types::TensorTypeEnum.getName(this->GetTensorType(index), &tensor_raw_data->type_str);
if (!result) {
return result;
}
return PlatformResult(ErrorCode::NO_ERROR);
}
+PlatformResult TensorsData::SetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT],
+ int size[ML_TENSOR_RANK_LIMIT],
+ TensorRawData& tensor_raw_data) {
+ ScopeLogger("id_: %d, index: %d", id_, index);
+
+ // TODO: add support for location and size - will be done in future commit
+ int ret =
+ ml_tensors_data_set_tensor_data(handle_, index, tensor_raw_data.data, tensor_raw_data.size);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_data_set_tensor_data failed: %d (%s)", ret, get_error_message(ret));
+ return util::ToPlatformResult(ret, "Internal TensorsData error");
+ }
+
+ return PlatformResult(ErrorCode::NO_ERROR);
+}
+
PlatformResult TensorsData::NativeDestroy() {
ScopeLogger("id_: %d", id_);
int ret = ml_tensors_data_destroy(handle_);
int Id();
int TensorsInfoId();
int Count();
+ ml_tensor_type_e GetTensorType(int index);
PlatformResult GetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT],
int size[ML_TENSOR_RANK_LIMIT], TensorRawData* tensor_raw_data);
+ PlatformResult SetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT],
+ int size[ML_TENSOR_RANK_LIMIT], TensorRawData& tensor_raw_data);
PlatformResult NativeDestroy();