var types_ = validator_.Types;
var type_ = xwalk.utils.type;
var native_ = new xwalk.utils.NativeManager(extension);
+var converter_ = xwalk.utils.converter;
var AbortError = new WebAPIException('AbortError', 'An unknown error occurred');
}
}
-var TensorsData = function(id, tensorsInfoId) {
+var TensorsData = function(id, tensorsInfoId, disposable) {
Object.defineProperties(this, {
count: {
enumerable: true,
value: new TensorsInfo(tensorsInfoId),
writable: false,
enumerable: false
+ },
+ _disposable: {
+ value: type_.isNullOrUndefined(disposable) ? true : disposable,
+ writable: false,
+ enumerable: false
}
});
_ValidTensorsDataIds.add(id);
};
TensorsData.prototype.dispose = function() {
+ if (!this._disposable) {
+ return;
+ }
+
if (false == _ValidTensorsDataIds.has(this._id)) {
privUtils_.log('TensorsData already disposed');
return;
exports = new MachineLearningManager();
exports.TensorsInfo = TensorsInfo;
-exports.CustomFilterOutput = CustomFilterOutput;
MachineLearningPipeline.prototype.createPipeline = CreatePipeline;
//Pipeline::registerCustomFilter() begin
-var CustomFilterOutput = function() {
- validator_.isConstructorCall(this, CustomFilterOutput);
-
- var args = validator_.validateArgs(arguments, [
- {
- name: 'status',
- type: validator_.Types.LONG
- },
- {
- name: 'data',
- type: types_.PLATFORM_OBJECT,
- values: TensorsData,
- optional: true,
- nullable: true
- }
- ]);
-
- if (!args.has.data) {
- args.data = null;
- }
-
- if (args.status > 0 && args.status !== 1) {
- throw new WebAPIException(
- WebAPIException.INVALID_VALUES_ERR,
- 'CustomFilterOutput.status === 1 is the only legal positive value'
- );
- }
-
- if (args.status === 0 && args.data === null) {
- throw new WebAPIException(
- WebAPIException.INVALID_VALUES_ERR,
- 'CustomFilterOutput.data === null is illegal when ' +
- 'CustomFilterOutput.status === 0'
- );
- }
-
- Object.defineProperties(this, {
- status: {
- enumerable: true,
- value: args.status
- },
- data: {
- enumerable: true,
- value: args.data
- }
- });
-};
-
var ValidRegisterCustomFilterExceptions = [
'InvalidValuesError',
'NotSupportedError',
];
var ValidCustomFilterOutputErrors = ['InvalidValuesError', 'AbortError'];
+
MachineLearningPipeline.prototype.registerCustomFilter = function() {
var args = validator_.validateArgs(arguments, [
{
/*
* CustomFilter processing has 4 stages (the description below assumes
* the typical scenario with no errors):
- * 1. (C++) C++ callback is called by the native API with input data.
- * The C++ callback clones the tensors data and associated info and
- * sends it to JS.
- * 2. (JS) customFilterWrapper is called with the input data from C++
- * as one of its arguments. User-provided callback processes the data
- * and the output is sent to C++ by a call of asynchronous function.
- * 3. (C++) C++ callback is woken up and clones the output from user
- * callback to native tensors data. It replies to JS with success/error.
- * 4. (JS) If C++ responded with success, the operation stops.
+ * 1. (C++; non-main thread) C++ callback is called by the native API with input data.
+ * The C++ callback wraps native ml_tensors_data_h handles in TensorsData
+ * objects and sends them together with associated TensorsInfo to JS.
+ * 2. (JS; main thread) customFilterWrapper is called with the input data from C++
+ * as one of its arguments. User-provided callback processes the data.
+ * The input/output TensorsData that arrive to JS as CustomFilter arguments
+ * are unique in that they:
+ * - cannot be disposed, i.e. calling {input, output}.dispose() is no-op
+ * - input is immutable, i.e. calling input.setTensorRawData() is no-op
+ * output.setTensorRawData() modify the native nnstreamer object directly.
+ * 3. (C++; main thread) Sleeping callback thread is notified. If anything
+ * goes wrong, C++ function returns an error synchronously to stage 4.
+ * 4. (JS; main thread) If C++ returned a success, the operation stops.
* Otherwise, the error callback provided by the user is called.
+ * 5. (C++; non-main thread) C++ callback is woken up and returns the status
+ * received from user to pipeline.
*/
var customFilterWrapper = function(msg) {
/*
return;
}
- var inputData = new TensorsData(msg.tensorsDataId, msg.tensorsInfoId);
+ var inputData = new TensorsData(
+ msg.inputTensorsDataId,
+ msg.inputTensorsInfoId,
+ false
+ );
+ var outputData = new TensorsData(
+ msg.outputTensorsDataId,
+ msg.outputTensorsInfoId,
+ false
+ );
+
/*
* customFilterErrorInJs records errors caused by the CustomFilter callback
* provided by the user.
var customFilterErrorInJs = null;
var jsResponse = {
status: -1,
- dataId: -1,
name: nativeArgs.name,
requestId: msg.requestId
};
- var output = null;
try {
- output = args.customFilter(inputData);
+ jsResponse.status = converter_.toLong(
+ args.customFilter(inputData, outputData)
+ );
} catch (exception) {
+ var exceptionString =
+ typeof exception.toString === 'function'
+ ? exception.toString()
+ : JSON.stringify(exception);
customFilterErrorInJs = new WebAPIException(
WebAPIException.ABORT_ERR,
- 'CustomFilter has thrown exception: ' + xwalk.JSON.stringify(exception)
+ 'CustomFilter has thrown exception: ' + exceptionString
);
}
- if (output instanceof CustomFilterOutput) {
- jsResponse.status = output.status;
- jsResponse.dataId = type_.isNullOrUndefined(output.data)
- ? -1
- : output.data._id;
- } else if (customFilterErrorInJs === null) {
+ if (!customFilterErrorInJs && jsResponse.status > 0 && jsResponse.status !== 1) {
customFilterErrorInJs = new WebAPIException(
- WebAPIException.TYPE_MISMATCH_ERR,
- 'The value returned from CustomFilter is not a CustomFilterOutput object'
+ WebAPIException.INVALID_VALUES_ERR,
+ 'The only legal positive value of status returned from CustomFilter is 1'
);
+ jsResponse.status = -1;
}
/*
- * Callback called in stage 4.
- *
- * It is used to process success/error messages that come from
- * C++ (stage 3).
- * It does not handle errors caused by the user-provided CustomFilter
- * which we detect in JS.
+ * Entering stage 3.
*/
- function filterOutputCallback(result) {
- if (native_.isSuccess(result)) {
- return;
- }
-
- var error = native_.getErrorObjectAndValidate(
- result,
- ValidCustomFilterOutputErrors,
- AbortError
- );
-
- native_.callIfPossible(args.errorCallback, error);
- }
+ var result = native_.callSync('MLPipelineManagerCustomFilterOutput', jsResponse);
/*
- * Entering stage 3.
+ * Stage 4.
*/
- var result = native_.call(
- 'MLPipelineManagerCustomFilterOutput',
- jsResponse,
- filterOutputCallback
- );
-
if (customFilterErrorInJs) {
/*
* If we detect that user-provided CustomFilter callback caused
* any errors in JS, the C++ layer gets the message to stop the
* pipeline (status == -1) and does not reply to JS with errors.
- * Thus, filterOutputCallback is not called and this is why we
- * call the user-provided error callback from JS.
+ * Thus, "result" is a success we call the user-provided error
+ * callback here.
*/
native_.callIfPossible(args.errorCallback, customFilterErrorInJs);
} else if (native_.isFailure(result)) {
- filterOutputCallback(result);
+ var error = native_.getErrorObjectAndValidate(
+ result,
+ ValidCustomFilterOutputErrors,
+ AbortError
+ );
+
+ native_.callIfPossible(args.errorCallback, error);
}
};
("Could not find TensorsData handle with given id: %d", tensors_data_id));
return;
}
+
+ if (!tensors_data->DisposableFromJS()) {
+ ReportSuccess(out);
+ return;
+ }
+
// Dispose underlying tensorsInfo
PlatformResult result = GetTensorsInfoManager().DisposeTensorsInfo(tensors_data->TensorsInfoId());
if (!result) {
CHECK_ARGS(args, kName, std::string, out);
CHECK_ARGS(args, kStatus, double, out);
CHECK_ARGS(args, kRequestId, double, out);
- CHECK_ARGS(args, kDataId, double, out);
- CHECK_ARGS(args, kCallbackId, double, out);
const auto& custom_filter_name = args.get(kName).get<std::string>();
auto status = static_cast<int>(args.get(kStatus).get<double>());
auto request_id = static_cast<int>(args.get(kRequestId).get<double>());
- auto data_id = static_cast<int>(args.get(kDataId).get<double>());
- auto callback_id = static_cast<int>(args.get(kCallbackId).get<double>());
- auto ret = pipeline_manager_.CustomFilterOutput(custom_filter_name, request_id, status, data_id,
- callback_id);
+ auto ret = pipeline_manager_.CustomFilterOutput(custom_filter_name, request_id, status);
if (!ret) {
LogAndReportError(ret, &out);
return;
#include <atomic>
#include <utility>
+#include "common/scope_exit.h"
#include "common/tools.h"
#include "ml_pipeline_custom_filter.h"
#include "ml_utils.h"
const std::string kCallbackId = "callbackId";
const std::string kListenerId = "listenerId";
const std::string kRequestId = "requestId";
-const std::string kTensorsDataId = "tensorsDataId";
-const std::string kTensorsInfoId = "tensorsInfoId";
+const std::string kInputTensorsDataId = "inputTensorsDataId";
+const std::string kInputTensorsInfoId = "inputTensorsInfoId";
+const std::string kOutputTensorsDataId = "outputTensorsDataId";
+const std::string kOutputTensorsInfoId = "outputTensorsInfoId";
} // namespace
const int CustomFilter::kCustomFilterIgnoreData = 1;
const int CustomFilter::kCustomFilterSuccess = 0;
-CustomFilter::JSResponse::JSResponse(int status, int callback_id, TensorsData* tensors_data_ptr,
- TensorsDataManager* tensors_data_manager_ptr,
- TensorsInfoManager* tensors_info_manager_ptr)
- : status{status},
- callback_id{callback_id},
- tensors_data_ptr{tensors_data_ptr},
- tensors_data_manager_ptr{tensors_data_manager_ptr},
- tensors_info_manager_ptr{tensors_info_manager_ptr} {
- ScopeLogger("status: [%d], callback_id: [%d], tensors_data_ptr: %p]", status, callback_id,
- tensors_data_ptr);
-}
-
-CustomFilter::JSResponse::~JSResponse() {
- ScopeLogger("status: [%d], callback_id: [%d], tensors_data_ptr: [%p]", status, callback_id,
- tensors_data_ptr);
-
- if (!tensors_data_ptr) {
- return;
- }
- // We ignore errors, because we can't do anything about them and these methods
- // will log error messages
- tensors_info_manager_ptr->DisposeTensorsInfo(tensors_data_ptr->TensorsInfoId());
- tensors_data_manager_ptr->DisposeTensorsData(tensors_data_ptr);
-}
-
-CustomFilter::JSResponse::JSResponse(JSResponse&& other)
- : status{other.status},
- callback_id{other.callback_id},
- tensors_data_ptr{other.tensors_data_ptr},
- tensors_data_manager_ptr{other.tensors_data_manager_ptr},
- tensors_info_manager_ptr{other.tensors_info_manager_ptr} {
- other.tensors_data_ptr = nullptr;
-}
-
PlatformResult CustomFilter::CreateAndRegisterCustomFilter(
const std::string& name, const std::string& listener_name, TensorsInfo* input_tensors_info_ptr,
TensorsInfo* output_tensors_info_ptr, common::Instance* instance_ptr,
return PlatformResult{};
}
-void CustomFilter::NotifyAboutJSResponse(int request_id, int status, int callback_id,
- TensorsData* tensors_data_ptr) {
- ScopeLogger("request_id: [%d], status: [%d], callback_id: [%d], tensors_data_ptr: [%p]",
- request_id, status, callback_id, tensors_data_ptr);
+void CustomFilter::NotifyAboutJSResponse(int request_id, int status) {
+ ScopeLogger("request_id: [%d], status: [%d]", request_id, status);
std::lock_guard<std::mutex>{request_id_to_js_response_mutex_};
- request_id_to_js_response_.emplace(
- request_id, JSResponse{status, callback_id, tensors_data_ptr, tensors_data_manager_ptr_,
- tensors_info_manager_ptr_});
+ request_id_to_js_response_status_[request_id] = status;
cv_.notify_all();
}
return data_id++;
}
-bool CustomFilter::PrepareMessageWithInputData(const ml_tensors_data_h input_tensors_data,
- picojson::value* out_message, int* out_request_id) {
- ScopeLogger("input_tensors_data: [%p]", input_tensors_data);
+bool CustomFilter::PrepareMessageWithInputData(
+ const ml_tensors_data_h native_input_tensors_data_handle,
+ const ml_tensors_data_h native_output_tensors_data_handle, picojson::value* out_message,
+ int* out_request_id, TensorsData** input_tensors_data_ptr,
+ TensorsData** output_tensors_data_ptr) {
+ ScopeLogger("native_input_tensors_data_handle: [%p], native_output_tensors_data_handle: [%p]",
+ native_input_tensors_data_handle, native_output_tensors_data_handle);
auto& message_obj = out_message->get<picojson::object>();
message_obj[kListenerId] = picojson::value{listener_name_};
return false;
}
- auto* input_tensors_data_clone_ptr = tensors_info_manager_ptr_->CloneNativeTensorWithData(
- input_tensors_info_ptr_->Handle(), input_tensors_data);
- if (!input_tensors_data_clone_ptr) {
+ *input_tensors_data_ptr = tensors_data_manager_ptr_->CreateTensorsData(
+ input_tensors_info_ptr_, native_input_tensors_data_handle, false, true);
+ if (!input_tensors_data_ptr) {
LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal CustomFilter error"),
&message_obj,
- ("Could not clone tensors data. Custom filter won't be triggered."));
+ ("Could not create TensorsData. Custom filter won't be triggered."));
return false;
}
- message_obj[kTensorsDataId] =
- picojson::value{static_cast<double>(input_tensors_data_clone_ptr->Id())};
- message_obj[kTensorsInfoId] =
- picojson::value{static_cast<double>(input_tensors_data_clone_ptr->TensorsInfoId())};
+ *output_tensors_data_ptr = tensors_data_manager_ptr_->CreateTensorsData(
+ output_tensors_info_ptr_, native_output_tensors_data_handle, false, false);
+ if (!output_tensors_data_ptr) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal CustomFilter error"),
+ &message_obj,
+ ("Could not create TensorsData. Custom filter won't be triggered."));
+ return false;
+ }
+
+ message_obj[kInputTensorsDataId] =
+ picojson::value{static_cast<double>((*input_tensors_data_ptr)->Id())};
+ message_obj[kInputTensorsInfoId] =
+ picojson::value{static_cast<double>((*input_tensors_data_ptr)->TensorsInfoId())};
+ message_obj[kOutputTensorsDataId] =
+ picojson::value{static_cast<double>((*output_tensors_data_ptr)->Id())};
+ message_obj[kOutputTensorsInfoId] =
+ picojson::value{static_cast<double>((*output_tensors_data_ptr)->TensorsInfoId())};
*out_request_id = getRequestId();
message_obj[kRequestId] = picojson::value{static_cast<double>(*out_request_id)};
return true;
}
-int CustomFilter::CopyJsFilterOutputToNativeObject(int request_id, const JSResponse& js_response,
- ml_tensors_data_h output_tensors_data,
- picojson::value* out_response_to_js) {
- ScopeLogger("request_id: [%d]", request_id);
-
- auto& response_to_js_obj = out_response_to_js->get<picojson::object>();
- response_to_js_obj[kCallbackId] = picojson::value{static_cast<double>(js_response.callback_id)};
-
- int custom_filter_status = kCustomFilterError;
- if (kCustomFilterIgnoreData == js_response.status || js_response.status < 0) {
- /*
- * Although js_response.status < 0 means "error", we respond with "success" message
- * to JS, because this status came from JS and the problem, if any, is already handled there.
- */
- ReportSuccess(response_to_js_obj);
- custom_filter_status = js_response.status;
- } else if (kCustomFilterSuccess == js_response.status) {
- auto* js_response_tensors_info_ptr =
- tensors_info_manager_ptr_->GetTensorsInfo(js_response.tensors_data_ptr->TensorsInfoId());
-
- if (!js_response_tensors_info_ptr) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal CustomFilter error"),
- &response_to_js_obj,
- ("Could not get tensors info. Custom filter won't be triggered."));
- return kCustomFilterError;
- }
-
- if (!output_tensors_info_ptr_->Equals(js_response_tensors_info_ptr)) {
- LogAndReportError(PlatformResult(ErrorCode::INVALID_VALUES_ERR,
- "Output's TensorsInfo is not equal to expected"),
- &response_to_js_obj);
- return kCustomFilterError;
- }
-
- auto tensors_count = js_response_tensors_info_ptr->Count();
- for (int i = 0; i < tensors_count; ++i) {
- void* data = nullptr;
- size_t data_size = 0;
- auto ret = ml_tensors_data_get_tensor_data(js_response.tensors_data_ptr->Handle(), i, &data,
- &data_size);
- if (ML_ERROR_NONE != ret) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal CustomFilter error"),
- &response_to_js_obj,
- ("ml_tensors_data_get_tensor_data() failed: [%d] (%s), i: [%d]", ret,
- get_error_message(ret), i));
- return kCustomFilterError;
- }
-
- ret = ml_tensors_data_set_tensor_data(output_tensors_data, i, data, data_size);
- if (ML_ERROR_NONE != ret) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal CustomFilter error"),
- &response_to_js_obj,
- ("ml_tensors_data_set_tensor_data() failed: [%d] (%s), i: [%d]", ret,
- get_error_message(ret), i));
- return kCustomFilterError;
- }
- }
-
- custom_filter_status = kCustomFilterSuccess;
- } else {
- ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Internal Customfilter error"},
- &response_to_js_obj);
- custom_filter_status = kCustomFilterError;
- }
-
- return custom_filter_status;
-}
-
-int CustomFilter::CustomFilterListener(const ml_tensors_data_h input_tensors_data,
- ml_tensors_data_h output_tensors_data, void* user_data) {
- ScopeLogger("input_tensors_data: [%p], tensors_info_out: [%p], user_data: [%p]",
- input_tensors_data, output_tensors_data, user_data);
+int CustomFilter::CustomFilterListener(const ml_tensors_data_h native_input_tensors_data_handle,
+ ml_tensors_data_h native_output_tensors_data_handle,
+ void* user_data) {
+ ScopeLogger(
+ "native_input_tensors_data_handle: [%p], native_output_tensors_data_handle: [%p], user_data: "
+ "[%p]",
+ native_input_tensors_data_handle, native_output_tensors_data_handle, user_data);
if (!user_data) {
LoggerE("user_data is a nullptr");
- return -1;
+ return kCustomFilterError;
}
CustomFilter* custom_filter_ptr = static_cast<CustomFilter*>(user_data);
picojson::value message{picojson::object{}};
int request_id = -1;
- auto success =
- custom_filter_ptr->PrepareMessageWithInputData(input_tensors_data, &message, &request_id);
+ TensorsData *input_tensors_data_ptr = nullptr, *output_tensors_data_ptr = nullptr;
+
+ SCOPE_EXIT {
+ custom_filter_ptr->tensors_data_manager_ptr_->DisposeTensorsData(input_tensors_data_ptr);
+ custom_filter_ptr->tensors_data_manager_ptr_->DisposeTensorsData(output_tensors_data_ptr);
+ };
+
+ auto success = custom_filter_ptr->PrepareMessageWithInputData(
+ native_input_tensors_data_handle, native_output_tensors_data_handle, &message, &request_id,
+ &input_tensors_data_ptr, &output_tensors_data_ptr);
std::unique_lock<std::mutex> lock{custom_filter_ptr->request_id_to_js_response_mutex_};
common::Instance::PostMessage(custom_filter_ptr->instance_ptr_, message);
* (the main thread).
*/
custom_filter_ptr->cv_.wait(lock, [custom_filter_ptr, request_id]() {
- return custom_filter_ptr->request_id_to_js_response_.count(request_id) != 0;
+ return custom_filter_ptr->request_id_to_js_response_status_.count(request_id) != 0;
});
/*
- * Stage 3. of data processing starts here.
+ * Stage 5. of data processing starts here.
*/
- auto js_response{std::move(custom_filter_ptr->request_id_to_js_response_[request_id])};
- custom_filter_ptr->request_id_to_js_response_.erase(request_id);
+ auto js_response_status = custom_filter_ptr->request_id_to_js_response_status_[request_id];
+ custom_filter_ptr->request_id_to_js_response_status_.erase(request_id);
lock.unlock();
- picojson::value response_to_js{picojson::object{}};
- auto custom_filter_status = custom_filter_ptr->CopyJsFilterOutputToNativeObject(
- request_id, js_response, output_tensors_data, &response_to_js);
-
- common::Instance::PostMessage(custom_filter_ptr->instance_ptr_, response_to_js);
-
- return custom_filter_status;
+ return js_response_status;
}
} // namespace pipeline
CustomFilter(const CustomFilter&) = delete;
CustomFilter& operator=(const CustomFilter&) = delete;
- void NotifyAboutJSResponse(int request_id, int status, int callback_id,
- TensorsData* tensors_data_ptr);
+ void NotifyAboutJSResponse(int request_id, int status);
static const int kCustomFilterError;
static const int kCustomFilterIgnoreData;
static int CustomFilterListener(const ml_tensors_data_h tensors_data_in,
ml_tensors_data_h tensors_data_out, void* user_data);
- struct JSResponse {
- JSResponse() = default;
- JSResponse(int status, int callback_id, TensorsData* tensors_data_ptr,
- TensorsDataManager* tensors_data_manager_ptr,
- TensorsInfoManager* tensors_info_manager_ptr);
- JSResponse(JSResponse&& other);
-
- ~JSResponse();
-
- JSResponse(const JSResponse&) = delete;
- JSResponse& operator=(const JSResponse&) = delete;
-
- int status;
- int callback_id;
-
- TensorsData* tensors_data_ptr = nullptr;
-
- // We need these managers to properly dispose
- // tensors_data_ptr and the associated TensorsInfo object
- TensorsDataManager* tensors_data_manager_ptr;
- TensorsInfoManager* tensors_info_manager_ptr;
- };
-
/*
* Returns "false" if any error occurs and "true" otherwise.
*/
bool PrepareMessageWithInputData(const ml_tensors_data_h input_tensors_data,
- picojson::value* out_message, int* out_request_id);
-
- /*
- * Returns the value to be returned from CustomFilter, which
- * implements ml_custom_easy_invoke_cb;
- */
- int CopyJsFilterOutputToNativeObject(int request_id, const JSResponse& js_response,
- ml_tensors_data_h output_tensors_data,
- picojson::value* out_response_to_js);
+ ml_tensors_data_h output_tensors_data,
+ picojson::value* out_message, int* out_request_id,
+ TensorsData** input_tensors_data_ptr,
+ TensorsData** output_tensors_data_ptr);
int getRequestId();
TensorsDataManager* tensors_data_manager_ptr_;
std::mutex request_id_to_js_response_mutex_;
- std::unordered_map<int, JSResponse> request_id_to_js_response_;
+ std::unordered_map<int, int> request_id_to_js_response_status_;
std::condition_variable cv_;
std::thread::id main_thread_id_;
};
// Pipeline::registerCustomFilter() end
PlatformResult PipelineManager::CustomFilterOutput(const std::string& custom_filter_name,
- int request_id, int status, int data_id,
- int callback_id) {
- ScopeLogger(
- "custom_filter_name: [%s], request_id: [%d], status: [%d], data_id: [%d], callback_id: [%d]",
- custom_filter_name.c_str(), request_id, status, data_id, callback_id);
+ int request_id, int status) {
+ ScopeLogger("custom_filter_name: [%s], request_id: [%d], status: [%d]",
+ custom_filter_name.c_str(), request_id, status);
auto filter_it = custom_filters_.find(custom_filter_name);
if (custom_filters_.end() == filter_it) {
return PlatformResult{ErrorCode::ABORT_ERR, "Internal CustomFilter error"};
}
- if (CustomFilter::kCustomFilterSuccess != status) {
- filter_it->second->NotifyAboutJSResponse(request_id, status, callback_id, nullptr);
- return PlatformResult{};
- }
-
- auto* output_from_js_tensors_data = tensors_data_manager_->GetTensorsData(data_id);
- if (!output_from_js_tensors_data) {
- LoggerE("Could not get TensorsData: [%d]", data_id);
- filter_it->second->NotifyAboutJSResponse(request_id, CustomFilter::kCustomFilterError,
- callback_id, nullptr);
- return PlatformResult{ErrorCode::ABORT_ERR, "Internal CustomFilter error"};
- }
-
- auto* output_from_js_tensors_info =
- tensors_info_manager_->GetTensorsInfo(output_from_js_tensors_data->TensorsInfoId());
- if (!output_from_js_tensors_info) {
- LoggerE("Could not get TensorsInfo: [%d]", output_from_js_tensors_data->TensorsInfoId());
- filter_it->second->NotifyAboutJSResponse(request_id, CustomFilter::kCustomFilterError,
- callback_id, nullptr);
- return PlatformResult{ErrorCode::ABORT_ERR, "Internal CustomFilter error"};
- }
-
- /*
- * We clone this tensors data to be sure, that the user won't dispose it before it will be cloned
- * by CustomFilter::CustomFilterListener.
- */
- auto* output_from_js_tensors_data_clone = tensors_info_manager_->CloneNativeTensorWithData(
- output_from_js_tensors_info->Handle(), output_from_js_tensors_data->Handle());
- if (!output_from_js_tensors_data_clone) {
- LoggerE("Could not clone TensorsData: [%d] with TensorsInfo: [%d]", data_id,
- output_from_js_tensors_info->Id());
- filter_it->second->NotifyAboutJSResponse(request_id, CustomFilter::kCustomFilterError,
- callback_id, nullptr);
- return PlatformResult{ErrorCode::ABORT_ERR, "Internal CustomFilter error"};
- }
-
- filter_it->second->NotifyAboutJSResponse(request_id, status, callback_id,
- output_from_js_tensors_data_clone);
+ filter_it->second->NotifyAboutJSResponse(request_id, status);
return PlatformResult{};
}
TensorsInfo* output_tensors_info_ptr);
PlatformResult CustomFilterOutput(const std::string& custom_filter_name, int request_id,
- int status, int data_id, int callback_id);
+ int status);
// Pipeline::registerCustomFilter() end
// Pipeline::unregisterCustomFilter() begin
namespace extension {
namespace ml {
-TensorsData::TensorsData(ml_tensors_data_h handle, int id, TensorsInfo* tensors_info)
- : handle_(handle), id_(id), tensors_info_(tensors_info) {
+TensorsData::TensorsData(ml_tensors_data_h handle, int id, TensorsInfo* tensors_info,
+ bool owns_native_handle, bool immutable)
+ : handle_(handle),
+ id_(id),
+ tensors_info_(tensors_info),
+ owns_native_handle_(owns_native_handle),
+ immutable_(immutable) {
ScopeLogger();
}
TensorsData::~TensorsData() {
- ScopeLogger();
- if (!this->NativeDestroy()) {
- LoggerE("TensorsData NativeDestroy failed");
+ ScopeLogger("id_: %d, owns_native_handle_: %s", id_, owns_native_handle_ ? "true" : "false");
+ if (owns_native_handle_) {
+ if (!this->NativeDestroy()) {
+ LoggerE("TensorsData NativeDestroy failed");
+ }
}
// TensorsDataManager releases tensors_info_
}
return tensors_info_->Count();
}
+bool TensorsData::DisposableFromJS() {
+ return owns_native_handle_;
+}
+
ml_tensor_type_e TensorsData::GetTensorType(int index) {
ScopeLogger("id_: %d, index: %d", id_, index);
ml_tensor_type_e tensor_type_enum = ML_TENSOR_TYPE_UNKNOWN;
PlatformResult TensorsData::SetTensorRawData(int index, unsigned int location[ML_TENSOR_RANK_LIMIT],
unsigned int size[ML_TENSOR_RANK_LIMIT],
TensorRawData& tensor_raw_data) {
- ScopeLogger("id_: %d, index: %d, tensor_raw_data.size_in_bytes: %zu", id_, index,
- tensor_raw_data.size_in_bytes);
+ ScopeLogger("id_: %d, index: %d, tensor_raw_data.size_in_bytes: %zu, immutable_: %s", id_, index,
+ tensor_raw_data.size_in_bytes, immutable_ ? "true" : "false");
+
+ if (immutable_) {
+ return PlatformResult(ErrorCode::NO_ERROR);
+ }
+
// Dimensions of whole tensor
unsigned int dim[ML_TENSOR_RANK_LIMIT];
// Dimensions of updated tensors relative to location coordiantes
map_.clear();
};
-TensorsData* TensorsDataManager::CreateTensorsData(TensorsInfo* tensors_info,
- ml_tensors_data_h tensors_data_handle) {
+TensorsData* TensorsDataManager::CreateTensorsData(TensorsInfo* tensors_info) {
ScopeLogger();
if (nullptr == tensors_info) {
LoggerE("Could not find tensor");
return nullptr;
}
+ ml_tensors_data_h tensors_data_handle;
+ int ret = ml_tensors_data_create(tensors_info->Handle(), &tensors_data_handle);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_data_create failed: %d (%s)", ret, get_error_message(ret));
+ return nullptr;
+ }
+
std::lock_guard<std::mutex> lock{map_and_next_id_mutex_};
int id = nextId_++;
auto t = std::make_unique<TensorsData>(tensors_data_handle, id, tensors_info);
map_[id] = std::move(t);
return map_[id].get();
-}
+};
+
+TensorsData* TensorsDataManager::CreateTensorsData(TensorsInfo* tensors_info,
+ const ml_tensors_data_h tensors_data_handle,
+ bool owns_native_handle, bool immutable) {
+ ScopeLogger("owns_native_handle: %s, immutable: %s", owns_native_handle ? "true" : "false",
+ immutable ? "true" : "false");
-TensorsData* TensorsDataManager::CreateTensorsData(TensorsInfo* tensors_info) {
- ScopeLogger();
if (nullptr == tensors_info) {
- LoggerE("Could not find tensor");
+ LoggerE("tensors_info is a nullptr");
return nullptr;
}
- ml_tensors_data_h tensors_data_handle;
- int ret = ml_tensors_data_create(tensors_info->Handle(), &tensors_data_handle);
- if (ML_ERROR_NONE != ret) {
- LoggerE("ml_tensors_data_create failed: %d (%s)", ret, get_error_message(ret));
+ if (nullptr == tensors_data_handle) {
+ LoggerE("tensors_data_handle is nullptr");
return nullptr;
}
std::lock_guard<std::mutex> lock{map_and_next_id_mutex_};
int id = nextId_++;
- auto t = std::make_unique<TensorsData>(tensors_data_handle, id, tensors_info);
+ auto t = std::make_unique<TensorsData>(tensors_data_handle, id, tensors_info, owns_native_handle,
+ immutable);
map_[id] = std::move(t);
return map_[id].get();
class TensorsData {
public:
- TensorsData(ml_tensors_data_h handle, int id, TensorsInfo* tensors_info);
+ TensorsData(ml_tensors_data_h handle, int id, TensorsInfo* tensors_info,
+ bool owns_native_handle = true, bool immutable = false);
~TensorsData();
ml_tensors_data_h Handle();
int Id();
int TensorsInfoId();
int Count();
+ bool DisposableFromJS();
ml_tensor_type_e GetTensorType(int index);
PlatformResult GetTensorRawData(int index, unsigned int location[ML_TENSOR_RANK_LIMIT],
unsigned int size[ML_TENSOR_RANK_LIMIT],
ml_tensors_data_h handle_;
int id_;
TensorsInfo* tensors_info_;
+ /*
+ * Some TensorsData don't own handle_ but are used as wrappers for handles
+ * managed by nnstreamer iteself. We mustn't release their handle_s.
+ * To ensure, this won't happen, set this field to "true" in constructor.
+ */
+ const bool owns_native_handle_;
+ /*
+ * Some TensorsData from native API, exposed to JS must remain unchanged,
+ * e.g. CustomFilter's input. To ignore all TensorsData.setTensorRawData()
+ * calls set this field to "true" in constructor.
+ */
+ const bool immutable_;
};
class TensorsDataManager {
~TensorsDataManager();
TensorsData* CreateTensorsData(TensorsInfo* tensors_info);
- TensorsData* CreateTensorsData(TensorsInfo* tensors_info, ml_tensors_data_h tensors_data_handle);
+ TensorsData* CreateTensorsData(TensorsInfo* tensors_info,
+ const ml_tensors_data_h tensors_data_handle,
+ bool owns_native_handle = true, bool immutable = false);
TensorsData* GetTensorsData(int id);
PlatformResult DisposeTensorsData(int id);