UNKNOWN: 'UNKNOWN'
};
+function _GetBufferTypeFromTensorType(tensorType) {
+ switch (tensorType) {
+ case 'INT8':
+ return Int8Array;
+ case 'UINT8':
+ return Uint8Array;
+ case 'INT16':
+ return Int16Array;
+ case 'UINT16':
+ return Uint16Array;
+ case 'FLOAT32':
+ return Float32Array;
+ case 'INT32':
+ return Int32Array;
+ case 'UINT32':
+ return Uint32Array;
+ case 'FLOAT64':
+ return Float64Array;
+ case 'INT64':
+ return BigInt64Array;
+ case 'UINT64':
+ return BigUint64Array;
+ }
+ return Uint8Array;
+}
+
// TensorsData
var _ValidTensorsDataIds = new Set();
};
TensorsData.prototype.getTensorRawData = function() {
- _CheckIfTensorsDataNotDisposed();
- throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
+ _CheckIfTensorsDataNotDisposed(this._id);
+ var args = validator_.validateArgs(arguments, [
+ {
+ name: 'index',
+ type: types_.LONG
+ },
+ {
+ name: 'location',
+ type: types_.ARRAY,
+ optional: true
+ },
+ {
+ name: 'size',
+ type: types_.ARRAY,
+ optional: true
+ }
+ ]);
+
+ if (!args.has.index) {
+ throw new WebAPIException(
+ WebAPIException.INVALID_VALUES_ERR,
+ 'Invalid parameter: index is undefined'
+ );
+ }
+ // TODO: validate location and size - will be done in future commit
+
+ var callArgs = {
+ tensorsDataId: this._id,
+ index: args.index,
+ location: args.location ? args.location : [],
+ size: args.size ? args.size : []
+ };
+
+ var result = native_.callSync('MLTensorsDataGetTensorRawData', callArgs);
+
+ if (native_.isFailure(result)) {
+ throw native_.getErrorObjectAndValidate(
+ result,
+ TensorsInfoGettersSettersValidExceptions,
+ AbortError
+ );
+ }
+
+ // TODO: modify StringToArray to accept also float types, not only int
+ var data = privUtils_.StringToArray(result.buffer, Uint8Array);
+ var ArrayType = _GetBufferTypeFromTensorType(result.type);
+ // TODO: return TensorRawData
+ return new ArrayType(data.buffer);
};
TensorsData.prototype.setTensorData = function() {
const std::string kProperty = "property";
const std::string kBOOLEAN = "BOOLEAN";
const std::string kSTRING = "STRING";
+const std::string kBuffer = "buffer";
+const std::string kSize = "size";
+const std::string kLocation = "location";
} // namespace
using namespace common;
REGISTER_METHOD(MLPipelineValveIsOpen);
REGISTER_METHOD(MLTensorsDataDispose);
+ REGISTER_METHOD(MLTensorsDataGetTensorRawData);
// Single API begin
REGISTER_METHOD(MLSingleManagerOpenModel);
}
ReportSuccess(out);
}
+
+void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+ CHECK_ARGS(args, kTensorsDataId, double, out);
+ CHECK_ARGS(args, kIndex, double, out);
+ CHECK_ARGS(args, kLocation, picojson::array, out);
+ CHECK_ARGS(args, kSize, picojson::array, out);
+
+ int tensor_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
+ int index = static_cast<int>(args.get(kIndex).get<double>());
+
+ TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
+ if (nullptr == tensors_data) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
+ ("Could not find TensorsData handle with given id: %d", tensor_data_id));
+ return;
+ }
+ // TODO: validate location and size - will be done in future commit
+ int location[ML_TENSOR_RANK_LIMIT];
+ int size[ML_TENSOR_RANK_LIMIT];
+ TensorRawData raw_data;
+ PlatformResult result = tensors_data->GetTensorRawData(index, location, size, &raw_data);
+ if (!result) {
+ LogAndReportError(result, &out);
+ return;
+ }
+
+ std::vector<std::uint8_t> out_data{raw_data.data, raw_data.data + raw_data.size};
+ out[kBuffer] = picojson::value(picojson::string_type, true);
+ common::encode_binary_in_string(out_data, out[kBuffer].get<std::string>());
+
+ out[kType] = picojson::value(raw_data.type_str);
+
+ ReportSuccess(out);
+}
// Common ML API end
// Single API begin
void MLTensorsInfoDispose(const picojson::value& args, picojson::object& out);
void MLTensorsDataDispose(const picojson::value& args, picojson::object& out);
+ void MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out);
TensorsInfoManager tensors_info_manager_;
TensorsDataManager tensors_data_manager_;
return tensors_info_->Count();
}
+PlatformResult TensorsData::GetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT],
+ int size[ML_TENSOR_RANK_LIMIT],
+ TensorRawData* tensor_raw_data) {
+ ScopeLogger("id_: %d, index: %d", id_, index);
+ if (nullptr == tensor_raw_data) {
+ LoggerE("Invalid tensor_raw_data");
+ return PlatformResult(ErrorCode::ABORT_ERR);
+ }
+ void* data;
+ size_t data_size;
+ int ret = ml_tensors_data_get_tensor_data(handle_, index, &data, &data_size);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_data_get_tensor_data failed: %d (%s)", ret, get_error_message(ret));
+ return util::ToPlatformResult(ret, "Internal TensorsData error");
+ }
+ // TODO: add support for location and size - will be done in future commit
+
+ ml_tensor_type_e type_enum = ML_TENSOR_TYPE_UNKNOWN;
+ PlatformResult result = tensors_info_->NativeGetTensorType(index, &type_enum);
+ if (!result) {
+ return result;
+ }
+
+ result = types::TensorTypeEnum.getName(type_enum, &tensor_raw_data->type_str);
+ if (!result) {
+ return result;
+ }
+
+ tensor_raw_data->data = static_cast<uint8_t*>(data);
+ tensor_raw_data->size = data_size;
+
+ return PlatformResult(ErrorCode::NO_ERROR);
+}
+
PlatformResult TensorsData::NativeDestroy() {
ScopeLogger("id_: %d", id_);
int ret = ml_tensors_data_destroy(handle_);
class TensorsInfo;
+struct TensorRawData {
+ // TensorRawData does not take ownership of data, remember to handle it outside
+ uint8_t* data;
+ size_t size;
+ std::string type_str;
+};
+
class TensorsData {
public:
TensorsData(ml_tensors_data_h handle, int id, TensorsInfo* tensors_info);
int Id();
int TensorsInfoId();
int Count();
+ PlatformResult GetTensorRawData(int index, int location[ML_TENSOR_RANK_LIMIT],
+ int size[ML_TENSOR_RANK_LIMIT], TensorRawData* tensor_raw_data);
PlatformResult NativeDestroy();