return new TensorsData(result.tensorsDataId, result.tensorsInfoId);
};
+var ValidInvokeAsyncErrors = ['TimeoutError', 'NotSupportedError', 'AbortError'];
+SingleShot.prototype.invokeAsync = function() {
+ var args = validator_.validateArgs(arguments, [
+ {
+ name: 'inTensorsData',
+ type: types_.PLATFORM_OBJECT,
+ values: TensorsData
+ },
+ {
+ name: 'successCallback',
+ type: types_.FUNCTION
+ },
+ { name: 'errorCallback', type: types_.FUNCTION, optional: true, nullable: true }
+ ]);
+
+ var callback = function(result) {
+ if (native_.isFailure(result)) {
+ setTimeout(function() {
+ native_.callIfPossible(
+ args.errorCallback,
+ native_.getErrorObjectAndValidate(
+ result,
+ ValidInvokeAsyncErrors,
+ AbortError
+ )
+ );
+ }, 0);
+ } else {
+ native_.callIfPossible(
+ args.successCallback,
+ new TensorsData(result.tensorsDataId, result.tensorsInfoId)
+ );
+ }
+ };
+
+ var nativeArgs = {
+ id: this._id,
+ tensorsDataId: args.inTensorsData._id,
+ async: true
+ };
+
+ var result = native_.call('MLSingleShotInvoke', nativeArgs, callback);
+ if (native_.isFailure(result)) {
+ throw native_.getErrorObjectAndValidate(
+ result,
+ ValidInvokeAsyncErrors,
+ AbortError
+ );
+ }
+};
+
var GetSetValueValidExceptions = [
'AbortError',
'InvalidValuesError',
int id = static_cast<int>(args.get(kId).get<double>());
int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
+ bool async =
+ (args.contains(kAsync) && args.get(kAsync).is<bool>()) ? args.get(kAsync).get<bool>() : false;
TensorsData* in_tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ if (async && in_tensors_data) {
+ // in case of async flow need to prevent destroying entry data during invoke
+ // from JS, creation of a copy
+ in_tensors_data = GetTensorsInfoManager().CreateTensorsData(in_tensors_data->GetTensorsInfo());
+ }
if (nullptr == in_tensors_data) {
LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
("Could not find TensorsData handle with given id: %d", tensors_data_id));
return;
}
- TensorsData* out_tensors_data = nullptr;
- auto ret = single_manager_.Invoke(id, in_tensors_data, &out_tensors_data);
- if (!ret) {
- ReportError(ret, &out);
- return;
- }
+ auto logic = [this, id, tensors_data_id, in_tensors_data, async](decltype(out) out) {
+ TensorsData* out_tensors_data = nullptr;
+ auto ret = single_manager_.Invoke(id, in_tensors_data, &out_tensors_data);
+ if (async) {
+ // in case of async flow, the in_tensor_data with underlying TensorsInfo
+ // was copied, thus need to be released here
+ GetTensorsInfoManager().DisposeTensorsInfo(in_tensors_data->GetTensorsInfo());
+ GetTensorsDataManager().DisposeTensorsData(in_tensors_data);
+ }
+ if (!ret) {
+ ReportError(ret, &out);
+ return;
+ }
- out[kTensorsDataId] = picojson::value(static_cast<double>(out_tensors_data->Id()));
- out[kTensorsInfoId] = picojson::value(static_cast<double>(out_tensors_data->TensorsInfoId()));
- ReportSuccess(out);
+ out[kTensorsDataId] = picojson::value(static_cast<double>(out_tensors_data->Id()));
+ out[kTensorsInfoId] = picojson::value(static_cast<double>(out_tensors_data->TensorsInfoId()));
+ ReportSuccess(out);
+ };
+
+ if (!async) {
+ logic(out);
+ } else {
+ CHECK_ARGS(args, kCallbackId, double, out);
+ double callback_id = args.get(kCallbackId).get<double>();
+ this->worker_.add_job([this, callback_id, logic] {
+ picojson::value response = picojson::value(picojson::object());
+ picojson::object& async_out = response.get<picojson::object>();
+ async_out[kCallbackId] = picojson::value(callback_id);
+ logic(async_out);
+ this->PostMessage(response.serialize().c_str());
+ });
+
+ // Sync return
+ ReportSuccess(out);
+ }
}
void MlInstance::MLSingleShotGetValue(const picojson::value& args, picojson::object& out) {