[ML][Pipeline] Change CustomFilter interface and implementation 23/254323/9
authorPawel Wasowski <p.wasowski2@samsung.com>
Thu, 25 Feb 2021 19:02:20 +0000 (20:02 +0100)
committerRafal Walczyna <r.walczyna@samsung.com>
Tue, 2 Mar 2021 11:57:47 +0000 (12:57 +0100)
ACR: TWDAPI-274

CustomFilter API and implementation are changed to avoid
copying TensorsData.

Change-Id: Id48b774c86b65f5b45f3dd74733c006b6a729b8c
Signed-off-by: Pawel Wasowski <p.wasowski2@samsung.com>
[Verification] Code tested with the snippets below works fine

// Valid CustomFilter callback - the happy scenario
var inputTI = new tizen.ml.TensorsInfo();
inputTI.addTensorInfo('ti1', 'UINT8', [4, 20, 15, 1]);
var outputTI = new tizen.ml.TensorsInfo();
outputTI.addTensorInfo('ti1', 'UINT8', [1200]);
var flattenAndSet123 = function(input, output) {
    console.log("Custom filter called");

    var rawOutputData = new Uint8Array(1200);
    for (var i = 0; i < rawOutputData.length; ++i) {
        rawOutputData[i] = 123;
    }

    output.setTensorRawData(0, rawOutputData);
    return 0;
}

tizen.ml.pipeline.registerCustomFilter('testfilter2', flattenAndSet123, inputTI,
    outputTI, function errorCallback(error) {
        console.warn('custom filter error:') ; console.warn(error);
    });

var pipeline_def = "videotestsrc num-buffers=3 "
                   + "! video/x-raw,width=20,height=15,format=BGRA "
                   + "! tensor_converter "
                   + "! tensor_filter framework=custom-easy model=testfilter2 "
                   + "! appsink name=mysink";

var pipeline = tizen.ml.pipeline.createPipeline(pipeline_def,
                                                state => {console.log(state);})

pipeline.registerSinkListener('mysink', function(sinkName, data) {
    console.log('SinkListener for "' + sinkName + '" sink called');
    console.log(data);
})

// READY
// Custom filter called
// PAUSED

pipeline.start()

// PLAYING
// <CustomFilter and SinkListener callbacks' outputs 3 times>

////////////////////////////////////////////////////////////

// Valid CustomFilter callback - the happy scenario; ignore the data

var inputTI = new tizen.ml.TensorsInfo();
inputTI.addTensorInfo('ti1', 'UINT8', [4, 20, 15, 1]);
var outputTI = new tizen.ml.TensorsInfo();
outputTI.addTensorInfo('ti1', 'UINT8', [1200]);
var flattenPlusOne = function(input, output) {
    console.log("Custom filter called");
        return 1; // ignore data
}

tizen.ml.pipeline.registerCustomFilter('testfilter2', flattenPlusOne, inputTI,
    outputTI, function errorCallback(error) {
        console.warn('custom filter error:') ; console.warn(error);
    });

var pipeline_def = "videotestsrc num-buffers=3 "
                   + "! video/x-raw,width=20,height=15,format=BGRA "
                   + "! tensor_converter "
                   + "! tensor_filter framework=custom-easy model=testfilter2 "
                   + "! appsink name=mysink";

var pipeline = tizen.ml.pipeline.createPipeline(pipeline_def,
                                                state => {console.log(state);})

pipeline.registerSinkListener('mysink', function(sinkName, data) {
    console.log('SinkListener for "' + sinkName + '" sink called');
    console.log(data);
})

// READY
// Custom filter called
// Custom filter called
// Custom filter called
// PAUSED

pipeline.start()

// PLAYING

////////////////////////////////////////////////////////////

// Valid CustomFilter callback - CustomFilter returns an error

var inputTI = new tizen.ml.TensorsInfo();
inputTI.addTensorInfo('ti1', 'UINT8', [4, 20, 15, 1]);
var outputTI = new tizen.ml.TensorsInfo();
outputTI.addTensorInfo('ti1', 'UINT8', [1200]);
var flattenPlusOne = function(input, output) {
    console.log("Custom filter called");
        return -1;
}

tizen.ml.pipeline.registerCustomFilter('testfilter2', flattenPlusOne, inputTI,
    outputTI, function errorCallback(error) {
        console.warn('custom filter error:') ; console.warn(error);
    });

var pipeline_def = "videotestsrc num-buffers=3 "
                   + "! video/x-raw,width=20,height=15,format=BGRA "
                   + "! tensor_converter "
                   + "! tensor_filter framework=custom-easy model=testfilter2 "
                   + "! appsink name=mysink";

var pipeline = tizen.ml.pipeline.createPipeline(pipeline_def,
                                                state => {console.log(state);})

pipeline.registerSinkListener('mysink', function(sinkName, data) {
    console.log('SinkListener for "' + sinkName + '" sink called');
    console.log(data);
})

// READY
// Custom filter called
// PAUSED

pipeline.start()

// PLAYING

////////////////////////////////////////////////////////////

// Invalid CustomFilterOutput.status
var inputTI = new tizen.ml.TensorsInfo();
inputTI.addTensorInfo('ti1', 'UINT8', [4, 20, 15, 1]);
var outputTI = new tizen.ml.TensorsInfo();
outputTI.addTensorInfo('ti1', 'UINT8', [1200]);
var flattenPlusOne = function(input) {
    console.log("Custom filter called");

    return 123;
}

tizen.ml.pipeline.registerCustomFilter('testfilter2', flattenPlusOne, inputTI,
    outputTI, function errorCallback(error) {
        console.warn('custom filter error:') ; console.warn(error);
    });

var pipeline_def = "videotestsrc num-buffers=3 "
                   + "! video/x-raw,width=20,height=15,format=BGRA "
                   + "! tensor_converter "
                   + "! tensor_filter framework=custom-easy model=testfilter2 "
                   + "! appsink name=mysink";

var pipeline = tizen.ml.pipeline.createPipeline(pipeline_def,
                                                state => {console.log(state);})

pipeline.registerSinkListener('mysink', function(sinkName, data) {
    console.log('SinkListener for "' + sinkName + '" sink called');
    console.log(data);
})

// InvalidValuesError,
//  message: "CustomFilterOutput.status === 1 is the only legal positive value"

////////////////////////////////////////////////////////////
// Check if {input, output}.dispose() and input.setTensorRawData()
// have any effect

var inputTI = new tizen.ml.TensorsInfo();
inputTI.addTensorInfo('ti1', 'UINT8', [4, 20, 15, 1]);
var outputTI = new tizen.ml.TensorsInfo();
outputTI.addTensorInfo('ti1', 'UINT8', [1200]);
var flattenAndSet123 = function(input, output) {
    console.log("Custom filter called");

    // dispose should have no efect
    input.dispose();
    console.log('input count: ' + input.tensorsInfo.count);
    // dispose should have no efect
    input.dispose();
    console.log('output count: ' + output.tensorsInfo.count);

    var rawOutputData = new Uint8Array(1200);
    for (var i = 0; i < rawOutputData.length; ++i) {
        rawOutputData[i] = 123;
    }

    output.setTensorRawData(0, rawOutputData);

    // this call should have no effect
    input.setTensorRawData(0, rawOutputData);

    return 0;
}

tizen.ml.pipeline.registerCustomFilter('testfilter2', flattenAndSet123, inputTI,
    outputTI, function errorCallback(error) {
        console.warn('custom filter error:') ; console.warn(error);
    });

var pipeline_def = "videotestsrc num-buffers=3 "
                   + "! video/x-raw,width=20,height=15,format=BGRA "
                   + "! tensor_converter "
                   + "! tensor_filter framework=custom-easy model=testfilter2 "
                   + "! appsink name=mysink";

var pipeline = tizen.ml.pipeline.createPipeline(pipeline_def,
                                                state => {console.log(state);})

pipeline.registerSinkListener('mysink', function(sinkName, data) {
    console.log('SinkListener for "' + sinkName + '" sink called');
    console.log(data);
})

Change-Id: Id48b774c86b65f5b45f3dd74733c006b6a729b8c
Signed-off-by: Pawel Wasowski <p.wasowski2@samsung.com>
src/ml/js/ml_common.js
src/ml/js/ml_manager.js
src/ml/js/ml_pipeline.js
src/ml/ml_instance.cc
src/ml/ml_pipeline_custom_filter.cc
src/ml/ml_pipeline_custom_filter.h
src/ml/ml_pipeline_manager.cc
src/ml/ml_pipeline_manager.h
src/ml/ml_tensors_data_manager.cc
src/ml/ml_tensors_data_manager.h

index 21c5dcc..cb863c8 100755 (executable)
@@ -19,6 +19,7 @@ var validator_ = privUtils_.validator;
 var types_ = validator_.Types;
 var type_ = xwalk.utils.type;
 var native_ = new xwalk.utils.NativeManager(extension);
+var converter_ = xwalk.utils.converter;
 
 var AbortError = new WebAPIException('AbortError', 'An unknown error occurred');
 
@@ -112,7 +113,7 @@ function _CheckIfTensorsDataNotDisposed(id) {
     }
 }
 
-var TensorsData = function(id, tensorsInfoId) {
+var TensorsData = function(id, tensorsInfoId, disposable) {
     Object.defineProperties(this, {
         count: {
             enumerable: true,
@@ -132,6 +133,11 @@ var TensorsData = function(id, tensorsInfoId) {
             value: new TensorsInfo(tensorsInfoId),
             writable: false,
             enumerable: false
+        },
+        _disposable: {
+            value: type_.isNullOrUndefined(disposable) ? true : disposable,
+            writable: false,
+            enumerable: false
         }
     });
     _ValidTensorsDataIds.add(id);
@@ -285,6 +291,10 @@ TensorsData.prototype.setTensorRawData = function() {
 };
 
 TensorsData.prototype.dispose = function() {
+    if (!this._disposable) {
+        return;
+    }
+
     if (false == _ValidTensorsDataIds.has(this._id)) {
         privUtils_.log('TensorsData already disposed');
         return;
index 66e3935..69d96a3 100755 (executable)
@@ -78,4 +78,3 @@ MachineLearningManager.prototype.checkNNFWAvailability = function() {
 
 exports = new MachineLearningManager();
 exports.TensorsInfo = TensorsInfo;
-exports.CustomFilterOutput = CustomFilterOutput;
index 5ace36b..f9a285f 100755 (executable)
@@ -720,54 +720,6 @@ var MachineLearningPipeline = function() {};
 MachineLearningPipeline.prototype.createPipeline = CreatePipeline;
 
 //Pipeline::registerCustomFilter() begin
-var CustomFilterOutput = function() {
-    validator_.isConstructorCall(this, CustomFilterOutput);
-
-    var args = validator_.validateArgs(arguments, [
-        {
-            name: 'status',
-            type: validator_.Types.LONG
-        },
-        {
-            name: 'data',
-            type: types_.PLATFORM_OBJECT,
-            values: TensorsData,
-            optional: true,
-            nullable: true
-        }
-    ]);
-
-    if (!args.has.data) {
-        args.data = null;
-    }
-
-    if (args.status > 0 && args.status !== 1) {
-        throw new WebAPIException(
-            WebAPIException.INVALID_VALUES_ERR,
-            'CustomFilterOutput.status === 1 is the only legal positive value'
-        );
-    }
-
-    if (args.status === 0 && args.data === null) {
-        throw new WebAPIException(
-            WebAPIException.INVALID_VALUES_ERR,
-            'CustomFilterOutput.data === null is illegal when ' +
-                'CustomFilterOutput.status === 0'
-        );
-    }
-
-    Object.defineProperties(this, {
-        status: {
-            enumerable: true,
-            value: args.status
-        },
-        data: {
-            enumerable: true,
-            value: args.data
-        }
-    });
-};
-
 var ValidRegisterCustomFilterExceptions = [
     'InvalidValuesError',
     'NotSupportedError',
@@ -776,6 +728,7 @@ var ValidRegisterCustomFilterExceptions = [
 ];
 
 var ValidCustomFilterOutputErrors = ['InvalidValuesError', 'AbortError'];
+
 MachineLearningPipeline.prototype.registerCustomFilter = function() {
     var args = validator_.validateArgs(arguments, [
         {
@@ -814,16 +767,22 @@ MachineLearningPipeline.prototype.registerCustomFilter = function() {
     /*
      * CustomFilter processing has 4 stages (the description below assumes
      * the typical scenario with no errors):
-     * 1. (C++) C++ callback is called by the native API with input data.
-     * The C++ callback clones the tensors data and associated info and
-     * sends it to JS.
-     * 2. (JS) customFilterWrapper is called with the input data from C++
-     * as one of its arguments. User-provided callback processes the data
-     * and the output is sent to C++ by a call of asynchronous function.
-     * 3. (C++) C++ callback is woken up and clones the output from user
-     * callback to native tensors data. It replies to JS with success/error.
-     * 4. (JS) If C++ responded with success, the operation stops.
+     * 1. (C++; non-main thread) C++ callback is called by the native API with input data.
+     * The C++ callback wraps native ml_tensors_data_h handles in TensorsData
+     * objects and sends them together with associated TensorsInfo to JS.
+     * 2. (JS; main thread) customFilterWrapper is called with the input data from C++
+     * as one of its arguments. User-provided callback processes the data.
+     * The input/output TensorsData that arrive to JS as CustomFilter arguments
+     * are unique in that they:
+     * - cannot be disposed, i.e. calling {input, output}.dispose() is no-op
+     * - input is immutable, i.e. calling input.setTensorRawData() is no-op
+     * output.setTensorRawData() modify the native nnstreamer object directly.
+     * 3. (C++; main thread) Sleeping callback thread is notified. If anything
+     * goes wrong, C++ function returns an error synchronously to stage 4.
+     * 4. (JS; main thread) If C++ returned a success, the operation stops.
      * Otherwise, the error callback provided by the user is called.
+     * 5. (C++; non-main thread) C++ callback is woken up and returns the status
+     * received from user to pipeline.
      */
     var customFilterWrapper = function(msg) {
         /*
@@ -834,7 +793,17 @@ MachineLearningPipeline.prototype.registerCustomFilter = function() {
             return;
         }
 
-        var inputData = new TensorsData(msg.tensorsDataId, msg.tensorsInfoId);
+        var inputData = new TensorsData(
+            msg.inputTensorsDataId,
+            msg.inputTensorsInfoId,
+            false
+        );
+        var outputData = new TensorsData(
+            msg.outputTensorsDataId,
+            msg.outputTensorsInfoId,
+            false
+        );
+
         /*
          * customFilterErrorInJs records errors caused by the CustomFilter callback
          * provided by the user.
@@ -842,75 +811,58 @@ MachineLearningPipeline.prototype.registerCustomFilter = function() {
         var customFilterErrorInJs = null;
         var jsResponse = {
             status: -1,
-            dataId: -1,
             name: nativeArgs.name,
             requestId: msg.requestId
         };
-        var output = null;
 
         try {
-            output = args.customFilter(inputData);
+            jsResponse.status = converter_.toLong(
+                args.customFilter(inputData, outputData)
+            );
         } catch (exception) {
+            var exceptionString =
+                typeof exception.toString === 'function'
+                    ? exception.toString()
+                    : JSON.stringify(exception);
             customFilterErrorInJs = new WebAPIException(
                 WebAPIException.ABORT_ERR,
-                'CustomFilter has thrown exception: ' + xwalk.JSON.stringify(exception)
+                'CustomFilter has thrown exception: ' + exceptionString
             );
         }
 
-        if (output instanceof CustomFilterOutput) {
-            jsResponse.status = output.status;
-            jsResponse.dataId = type_.isNullOrUndefined(output.data)
-                ? -1
-                : output.data._id;
-        } else if (customFilterErrorInJs === null) {
+        if (!customFilterErrorInJs && jsResponse.status > 0 && jsResponse.status !== 1) {
             customFilterErrorInJs = new WebAPIException(
-                WebAPIException.TYPE_MISMATCH_ERR,
-                'The value returned from CustomFilter is not a CustomFilterOutput object'
+                WebAPIException.INVALID_VALUES_ERR,
+                'The only legal positive value of status returned from CustomFilter is 1'
             );
+            jsResponse.status = -1;
         }
 
         /*
-         * Callback called in stage 4.
-         *
-         * It is used to process success/error messages that come from
-         * C++ (stage 3).
-         * It does not handle errors caused by the user-provided CustomFilter
-         * which we detect in JS.
+         * Entering stage 3.
          */
-        function filterOutputCallback(result) {
-            if (native_.isSuccess(result)) {
-                return;
-            }
-
-            var error = native_.getErrorObjectAndValidate(
-                result,
-                ValidCustomFilterOutputErrors,
-                AbortError
-            );
-
-            native_.callIfPossible(args.errorCallback, error);
-        }
+        var result = native_.callSync('MLPipelineManagerCustomFilterOutput', jsResponse);
 
         /*
-         * Entering stage 3.
+         * Stage 4.
          */
-        var result = native_.call(
-            'MLPipelineManagerCustomFilterOutput',
-            jsResponse,
-            filterOutputCallback
-        );
-
         if (customFilterErrorInJs) {
             /*
              * If we detect that user-provided CustomFilter callback caused
              * any errors in JS, the C++ layer gets the message to stop the
              * pipeline (status == -1) and does not reply to JS with errors.
-             * Thus, filterOutputCallback is not called and this is why we
-             * call the user-provided error callback from JS.
+             * Thus, "result" is a success we call the user-provided error
+             * callback here.
              */
             native_.callIfPossible(args.errorCallback, customFilterErrorInJs);
         } else if (native_.isFailure(result)) {
-            filterOutputCallback(result);
+            var error = native_.getErrorObjectAndValidate(
+                result,
+                ValidCustomFilterOutputErrors,
+                AbortError
+            );
+
+            native_.callIfPossible(args.errorCallback, error);
         }
     };
 
index 1e188e1..166c1d9 100644 (file)
@@ -564,6 +564,12 @@ void MlInstance::MLTensorsDataDispose(const picojson::value& args, picojson::obj
                       ("Could not find TensorsData handle with given id: %d", tensors_data_id));
     return;
   }
+
+  if (!tensors_data->DisposableFromJS()) {
+    ReportSuccess(out);
+    return;
+  }
+
   // Dispose underlying tensorsInfo
   PlatformResult result = GetTensorsInfoManager().DisposeTensorsInfo(tensors_data->TensorsInfoId());
   if (!result) {
@@ -1275,17 +1281,12 @@ void MlInstance::MLPipelineManagerCustomFilterOutput(const picojson::value& args
   CHECK_ARGS(args, kName, std::string, out);
   CHECK_ARGS(args, kStatus, double, out);
   CHECK_ARGS(args, kRequestId, double, out);
-  CHECK_ARGS(args, kDataId, double, out);
-  CHECK_ARGS(args, kCallbackId, double, out);
 
   const auto& custom_filter_name = args.get(kName).get<std::string>();
   auto status = static_cast<int>(args.get(kStatus).get<double>());
   auto request_id = static_cast<int>(args.get(kRequestId).get<double>());
-  auto data_id = static_cast<int>(args.get(kDataId).get<double>());
-  auto callback_id = static_cast<int>(args.get(kCallbackId).get<double>());
 
-  auto ret = pipeline_manager_.CustomFilterOutput(custom_filter_name, request_id, status, data_id,
-                                                  callback_id);
+  auto ret = pipeline_manager_.CustomFilterOutput(custom_filter_name, request_id, status);
   if (!ret) {
     LogAndReportError(ret, &out);
     return;
index 53e7891..8373d9b 100644 (file)
@@ -18,6 +18,7 @@
 #include <atomic>
 #include <utility>
 
+#include "common/scope_exit.h"
 #include "common/tools.h"
 #include "ml_pipeline_custom_filter.h"
 #include "ml_utils.h"
@@ -32,8 +33,10 @@ namespace {
 const std::string kCallbackId = "callbackId";
 const std::string kListenerId = "listenerId";
 const std::string kRequestId = "requestId";
-const std::string kTensorsDataId = "tensorsDataId";
-const std::string kTensorsInfoId = "tensorsInfoId";
+const std::string kInputTensorsDataId = "inputTensorsDataId";
+const std::string kInputTensorsInfoId = "inputTensorsInfoId";
+const std::string kOutputTensorsDataId = "outputTensorsDataId";
+const std::string kOutputTensorsInfoId = "outputTensorsInfoId";
 
 }  //  namespace
 
@@ -45,40 +48,6 @@ const int CustomFilter::kCustomFilterError = -1;
 const int CustomFilter::kCustomFilterIgnoreData = 1;
 const int CustomFilter::kCustomFilterSuccess = 0;
 
-CustomFilter::JSResponse::JSResponse(int status, int callback_id, TensorsData* tensors_data_ptr,
-                                     TensorsDataManager* tensors_data_manager_ptr,
-                                     TensorsInfoManager* tensors_info_manager_ptr)
-    : status{status},
-      callback_id{callback_id},
-      tensors_data_ptr{tensors_data_ptr},
-      tensors_data_manager_ptr{tensors_data_manager_ptr},
-      tensors_info_manager_ptr{tensors_info_manager_ptr} {
-  ScopeLogger("status: [%d], callback_id: [%d], tensors_data_ptr: %p]", status, callback_id,
-              tensors_data_ptr);
-}
-
-CustomFilter::JSResponse::~JSResponse() {
-  ScopeLogger("status: [%d], callback_id: [%d], tensors_data_ptr: [%p]", status, callback_id,
-              tensors_data_ptr);
-
-  if (!tensors_data_ptr) {
-    return;
-  }
-  // We ignore errors, because we can't do anything about them and these methods
-  // will log error messages
-  tensors_info_manager_ptr->DisposeTensorsInfo(tensors_data_ptr->TensorsInfoId());
-  tensors_data_manager_ptr->DisposeTensorsData(tensors_data_ptr);
-}
-
-CustomFilter::JSResponse::JSResponse(JSResponse&& other)
-    : status{other.status},
-      callback_id{other.callback_id},
-      tensors_data_ptr{other.tensors_data_ptr},
-      tensors_data_manager_ptr{other.tensors_data_manager_ptr},
-      tensors_info_manager_ptr{other.tensors_info_manager_ptr} {
-  other.tensors_data_ptr = nullptr;
-}
-
 PlatformResult CustomFilter::CreateAndRegisterCustomFilter(
     const std::string& name, const std::string& listener_name, TensorsInfo* input_tensors_info_ptr,
     TensorsInfo* output_tensors_info_ptr, common::Instance* instance_ptr,
@@ -186,15 +155,11 @@ PlatformResult CustomFilter::Unregister() {
   return PlatformResult{};
 }
 
-void CustomFilter::NotifyAboutJSResponse(int request_id, int status, int callback_id,
-                                         TensorsData* tensors_data_ptr) {
-  ScopeLogger("request_id: [%d], status: [%d], callback_id: [%d], tensors_data_ptr: [%p]",
-              request_id, status, callback_id, tensors_data_ptr);
+void CustomFilter::NotifyAboutJSResponse(int request_id, int status) {
+  ScopeLogger("request_id: [%d], status: [%d]", request_id, status);
 
   std::lock_guard<std::mutex>{request_id_to_js_response_mutex_};
-  request_id_to_js_response_.emplace(
-      request_id, JSResponse{status, callback_id, tensors_data_ptr, tensors_data_manager_ptr_,
-                             tensors_info_manager_ptr_});
+  request_id_to_js_response_status_[request_id] = status;
   cv_.notify_all();
 }
 
@@ -205,9 +170,13 @@ int CustomFilter::getRequestId() {
   return data_id++;
 }
 
-bool CustomFilter::PrepareMessageWithInputData(const ml_tensors_data_h input_tensors_data,
-                                               picojson::value* out_message, int* out_request_id) {
-  ScopeLogger("input_tensors_data: [%p]", input_tensors_data);
+bool CustomFilter::PrepareMessageWithInputData(
+    const ml_tensors_data_h native_input_tensors_data_handle,
+    const ml_tensors_data_h native_output_tensors_data_handle, picojson::value* out_message,
+    int* out_request_id, TensorsData** input_tensors_data_ptr,
+    TensorsData** output_tensors_data_ptr) {
+  ScopeLogger("native_input_tensors_data_handle: [%p], native_output_tensors_data_handle: [%p]",
+              native_input_tensors_data_handle, native_output_tensors_data_handle);
 
   auto& message_obj = out_message->get<picojson::object>();
   message_obj[kListenerId] = picojson::value{listener_name_};
@@ -226,109 +195,65 @@ bool CustomFilter::PrepareMessageWithInputData(const ml_tensors_data_h input_ten
     return false;
   }
 
-  auto* input_tensors_data_clone_ptr = tensors_info_manager_ptr_->CloneNativeTensorWithData(
-      input_tensors_info_ptr_->Handle(), input_tensors_data);
-  if (!input_tensors_data_clone_ptr) {
+  *input_tensors_data_ptr = tensors_data_manager_ptr_->CreateTensorsData(
+      input_tensors_info_ptr_, native_input_tensors_data_handle, false, true);
+  if (!input_tensors_data_ptr) {
     LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal CustomFilter error"),
                       &message_obj,
-                      ("Could not clone tensors data. Custom filter won't be triggered."));
+                      ("Could not create TensorsData. Custom filter won't be triggered."));
     return false;
   }
 
-  message_obj[kTensorsDataId] =
-      picojson::value{static_cast<double>(input_tensors_data_clone_ptr->Id())};
-  message_obj[kTensorsInfoId] =
-      picojson::value{static_cast<double>(input_tensors_data_clone_ptr->TensorsInfoId())};
+  *output_tensors_data_ptr = tensors_data_manager_ptr_->CreateTensorsData(
+      output_tensors_info_ptr_, native_output_tensors_data_handle, false, false);
+  if (!output_tensors_data_ptr) {
+    LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal CustomFilter error"),
+                      &message_obj,
+                      ("Could not create TensorsData. Custom filter won't be triggered."));
+    return false;
+  }
+
+  message_obj[kInputTensorsDataId] =
+      picojson::value{static_cast<double>((*input_tensors_data_ptr)->Id())};
+  message_obj[kInputTensorsInfoId] =
+      picojson::value{static_cast<double>((*input_tensors_data_ptr)->TensorsInfoId())};
+  message_obj[kOutputTensorsDataId] =
+      picojson::value{static_cast<double>((*output_tensors_data_ptr)->Id())};
+  message_obj[kOutputTensorsInfoId] =
+      picojson::value{static_cast<double>((*output_tensors_data_ptr)->TensorsInfoId())};
   *out_request_id = getRequestId();
   message_obj[kRequestId] = picojson::value{static_cast<double>(*out_request_id)};
 
   return true;
 }
 
-int CustomFilter::CopyJsFilterOutputToNativeObject(int request_id, const JSResponse& js_response,
-                                                   ml_tensors_data_h output_tensors_data,
-                                                   picojson::value* out_response_to_js) {
-  ScopeLogger("request_id: [%d]", request_id);
-
-  auto& response_to_js_obj = out_response_to_js->get<picojson::object>();
-  response_to_js_obj[kCallbackId] = picojson::value{static_cast<double>(js_response.callback_id)};
-
-  int custom_filter_status = kCustomFilterError;
-  if (kCustomFilterIgnoreData == js_response.status || js_response.status < 0) {
-    /*
-     * Although js_response.status < 0 means "error", we respond with "success" message
-     * to JS, because this status came from JS and the problem, if any, is already handled there.
-     */
-    ReportSuccess(response_to_js_obj);
-    custom_filter_status = js_response.status;
-  } else if (kCustomFilterSuccess == js_response.status) {
-    auto* js_response_tensors_info_ptr =
-        tensors_info_manager_ptr_->GetTensorsInfo(js_response.tensors_data_ptr->TensorsInfoId());
-
-    if (!js_response_tensors_info_ptr) {
-      LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal CustomFilter error"),
-                        &response_to_js_obj,
-                        ("Could not get tensors info. Custom filter won't be triggered."));
-      return kCustomFilterError;
-    }
-
-    if (!output_tensors_info_ptr_->Equals(js_response_tensors_info_ptr)) {
-      LogAndReportError(PlatformResult(ErrorCode::INVALID_VALUES_ERR,
-                                       "Output's TensorsInfo is not equal to expected"),
-                        &response_to_js_obj);
-      return kCustomFilterError;
-    }
-
-    auto tensors_count = js_response_tensors_info_ptr->Count();
-    for (int i = 0; i < tensors_count; ++i) {
-      void* data = nullptr;
-      size_t data_size = 0;
-      auto ret = ml_tensors_data_get_tensor_data(js_response.tensors_data_ptr->Handle(), i, &data,
-                                                 &data_size);
-      if (ML_ERROR_NONE != ret) {
-        LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal CustomFilter error"),
-                          &response_to_js_obj,
-                          ("ml_tensors_data_get_tensor_data() failed: [%d] (%s), i: [%d]", ret,
-                           get_error_message(ret), i));
-        return kCustomFilterError;
-      }
-
-      ret = ml_tensors_data_set_tensor_data(output_tensors_data, i, data, data_size);
-      if (ML_ERROR_NONE != ret) {
-        LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal CustomFilter error"),
-                          &response_to_js_obj,
-                          ("ml_tensors_data_set_tensor_data() failed: [%d] (%s), i: [%d]", ret,
-                           get_error_message(ret), i));
-        return kCustomFilterError;
-      }
-    }
-
-    custom_filter_status = kCustomFilterSuccess;
-  } else {
-    ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Internal Customfilter error"},
-                &response_to_js_obj);
-    custom_filter_status = kCustomFilterError;
-  }
-
-  return custom_filter_status;
-}
-
-int CustomFilter::CustomFilterListener(const ml_tensors_data_h input_tensors_data,
-                                       ml_tensors_data_h output_tensors_data, void* user_data) {
-  ScopeLogger("input_tensors_data: [%p], tensors_info_out: [%p], user_data: [%p]",
-              input_tensors_data, output_tensors_data, user_data);
+int CustomFilter::CustomFilterListener(const ml_tensors_data_h native_input_tensors_data_handle,
+                                       ml_tensors_data_h native_output_tensors_data_handle,
+                                       void* user_data) {
+  ScopeLogger(
+      "native_input_tensors_data_handle: [%p], native_output_tensors_data_handle: [%p], user_data: "
+      "[%p]",
+      native_input_tensors_data_handle, native_output_tensors_data_handle, user_data);
 
   if (!user_data) {
     LoggerE("user_data is a nullptr");
-    return -1;
+    return kCustomFilterError;
   }
 
   CustomFilter* custom_filter_ptr = static_cast<CustomFilter*>(user_data);
 
   picojson::value message{picojson::object{}};
   int request_id = -1;
-  auto success =
-      custom_filter_ptr->PrepareMessageWithInputData(input_tensors_data, &message, &request_id);
+  TensorsData *input_tensors_data_ptr = nullptr, *output_tensors_data_ptr = nullptr;
+
+  SCOPE_EXIT {
+    custom_filter_ptr->tensors_data_manager_ptr_->DisposeTensorsData(input_tensors_data_ptr);
+    custom_filter_ptr->tensors_data_manager_ptr_->DisposeTensorsData(output_tensors_data_ptr);
+  };
+
+  auto success = custom_filter_ptr->PrepareMessageWithInputData(
+      native_input_tensors_data_handle, native_output_tensors_data_handle, &message, &request_id,
+      &input_tensors_data_ptr, &output_tensors_data_ptr);
 
   std::unique_lock<std::mutex> lock{custom_filter_ptr->request_id_to_js_response_mutex_};
   common::Instance::PostMessage(custom_filter_ptr->instance_ptr_, message);
@@ -346,23 +271,17 @@ int CustomFilter::CustomFilterListener(const ml_tensors_data_h input_tensors_dat
    * (the main thread).
    */
   custom_filter_ptr->cv_.wait(lock, [custom_filter_ptr, request_id]() {
-    return custom_filter_ptr->request_id_to_js_response_.count(request_id) != 0;
+    return custom_filter_ptr->request_id_to_js_response_status_.count(request_id) != 0;
   });
 
   /*
-   * Stage 3. of data processing starts here.
+   * Stage 5. of data processing starts here.
    */
-  auto js_response{std::move(custom_filter_ptr->request_id_to_js_response_[request_id])};
-  custom_filter_ptr->request_id_to_js_response_.erase(request_id);
+  auto js_response_status = custom_filter_ptr->request_id_to_js_response_status_[request_id];
+  custom_filter_ptr->request_id_to_js_response_status_.erase(request_id);
   lock.unlock();
 
-  picojson::value response_to_js{picojson::object{}};
-  auto custom_filter_status = custom_filter_ptr->CopyJsFilterOutputToNativeObject(
-      request_id, js_response, output_tensors_data, &response_to_js);
-
-  common::Instance::PostMessage(custom_filter_ptr->instance_ptr_, response_to_js);
-
-  return custom_filter_status;
+  return js_response_status;
 }
 
 }  // namespace pipeline
index ad953fd..5210fd4 100644 (file)
@@ -58,8 +58,7 @@ class CustomFilter {
   CustomFilter(const CustomFilter&) = delete;
   CustomFilter& operator=(const CustomFilter&) = delete;
 
-  void NotifyAboutJSResponse(int request_id, int status, int callback_id,
-                             TensorsData* tensors_data_ptr);
+  void NotifyAboutJSResponse(int request_id, int status);
 
   static const int kCustomFilterError;
   static const int kCustomFilterIgnoreData;
@@ -75,42 +74,14 @@ class CustomFilter {
   static int CustomFilterListener(const ml_tensors_data_h tensors_data_in,
                                   ml_tensors_data_h tensors_data_out, void* user_data);
 
-  struct JSResponse {
-    JSResponse() = default;
-    JSResponse(int status, int callback_id, TensorsData* tensors_data_ptr,
-               TensorsDataManager* tensors_data_manager_ptr,
-               TensorsInfoManager* tensors_info_manager_ptr);
-    JSResponse(JSResponse&& other);
-
-    ~JSResponse();
-
-    JSResponse(const JSResponse&) = delete;
-    JSResponse& operator=(const JSResponse&) = delete;
-
-    int status;
-    int callback_id;
-
-    TensorsData* tensors_data_ptr = nullptr;
-
-    // We need these managers to properly dispose
-    // tensors_data_ptr and the associated TensorsInfo object
-    TensorsDataManager* tensors_data_manager_ptr;
-    TensorsInfoManager* tensors_info_manager_ptr;
-  };
-
   /*
    * Returns "false" if any error occurs and "true" otherwise.
    */
   bool PrepareMessageWithInputData(const ml_tensors_data_h input_tensors_data,
-                                   picojson::value* out_message, int* out_request_id);
-
-  /*
-   * Returns the value to be returned from CustomFilter, which
-   * implements ml_custom_easy_invoke_cb;
-   */
-  int CopyJsFilterOutputToNativeObject(int request_id, const JSResponse& js_response,
-                                       ml_tensors_data_h output_tensors_data,
-                                       picojson::value* out_response_to_js);
+                                   ml_tensors_data_h output_tensors_data,
+                                   picojson::value* out_message, int* out_request_id,
+                                   TensorsData** input_tensors_data_ptr,
+                                   TensorsData** output_tensors_data_ptr);
 
   int getRequestId();
 
@@ -124,7 +95,7 @@ class CustomFilter {
   TensorsDataManager* tensors_data_manager_ptr_;
 
   std::mutex request_id_to_js_response_mutex_;
-  std::unordered_map<int, JSResponse> request_id_to_js_response_;
+  std::unordered_map<int, int> request_id_to_js_response_status_;
   std::condition_variable cv_;
   std::thread::id main_thread_id_;
 };
index 4278e19..91bbdcd 100644 (file)
@@ -257,11 +257,9 @@ PlatformResult PipelineManager::RegisterCustomFilter(const std::string& custom_f
 // Pipeline::registerCustomFilter() end
 
 PlatformResult PipelineManager::CustomFilterOutput(const std::string& custom_filter_name,
-                                                   int request_id, int status, int data_id,
-                                                   int callback_id) {
-  ScopeLogger(
-      "custom_filter_name: [%s], request_id: [%d], status: [%d], data_id: [%d], callback_id: [%d]",
-      custom_filter_name.c_str(), request_id, status, data_id, callback_id);
+                                                   int request_id, int status) {
+  ScopeLogger("custom_filter_name: [%s], request_id: [%d], status: [%d]",
+              custom_filter_name.c_str(), request_id, status);
 
   auto filter_it = custom_filters_.find(custom_filter_name);
   if (custom_filters_.end() == filter_it) {
@@ -269,44 +267,7 @@ PlatformResult PipelineManager::CustomFilterOutput(const std::string& custom_fil
     return PlatformResult{ErrorCode::ABORT_ERR, "Internal CustomFilter error"};
   }
 
-  if (CustomFilter::kCustomFilterSuccess != status) {
-    filter_it->second->NotifyAboutJSResponse(request_id, status, callback_id, nullptr);
-    return PlatformResult{};
-  }
-
-  auto* output_from_js_tensors_data = tensors_data_manager_->GetTensorsData(data_id);
-  if (!output_from_js_tensors_data) {
-    LoggerE("Could not get TensorsData: [%d]", data_id);
-    filter_it->second->NotifyAboutJSResponse(request_id, CustomFilter::kCustomFilterError,
-                                             callback_id, nullptr);
-    return PlatformResult{ErrorCode::ABORT_ERR, "Internal CustomFilter error"};
-  }
-
-  auto* output_from_js_tensors_info =
-      tensors_info_manager_->GetTensorsInfo(output_from_js_tensors_data->TensorsInfoId());
-  if (!output_from_js_tensors_info) {
-    LoggerE("Could not get TensorsInfo: [%d]", output_from_js_tensors_data->TensorsInfoId());
-    filter_it->second->NotifyAboutJSResponse(request_id, CustomFilter::kCustomFilterError,
-                                             callback_id, nullptr);
-    return PlatformResult{ErrorCode::ABORT_ERR, "Internal CustomFilter error"};
-  }
-
-  /*
-   * We clone this tensors data to be sure, that the user won't dispose it before it will be cloned
-   * by CustomFilter::CustomFilterListener.
-   */
-  auto* output_from_js_tensors_data_clone = tensors_info_manager_->CloneNativeTensorWithData(
-      output_from_js_tensors_info->Handle(), output_from_js_tensors_data->Handle());
-  if (!output_from_js_tensors_data_clone) {
-    LoggerE("Could not clone TensorsData: [%d] with TensorsInfo: [%d]", data_id,
-            output_from_js_tensors_info->Id());
-    filter_it->second->NotifyAboutJSResponse(request_id, CustomFilter::kCustomFilterError,
-                                             callback_id, nullptr);
-    return PlatformResult{ErrorCode::ABORT_ERR, "Internal CustomFilter error"};
-  }
-
-  filter_it->second->NotifyAboutJSResponse(request_id, status, callback_id,
-                                           output_from_js_tensors_data_clone);
+  filter_it->second->NotifyAboutJSResponse(request_id, status);
   return PlatformResult{};
 }
 
index 66c3e14..d39244f 100644 (file)
@@ -94,7 +94,7 @@ class PipelineManager {
                                       TensorsInfo* output_tensors_info_ptr);
 
   PlatformResult CustomFilterOutput(const std::string& custom_filter_name, int request_id,
-                                    int status, int data_id, int callback_id);
+                                    int status);
   // Pipeline::registerCustomFilter() end
 
   // Pipeline::unregisterCustomFilter() begin
index 949a390..c8276b5 100644 (file)
@@ -23,15 +23,22 @@ using common::PlatformResult;
 namespace extension {
 namespace ml {
 
-TensorsData::TensorsData(ml_tensors_data_h handle, int id, TensorsInfo* tensors_info)
-    : handle_(handle), id_(id), tensors_info_(tensors_info) {
+TensorsData::TensorsData(ml_tensors_data_h handle, int id, TensorsInfo* tensors_info,
+                         bool owns_native_handle, bool immutable)
+    : handle_(handle),
+      id_(id),
+      tensors_info_(tensors_info),
+      owns_native_handle_(owns_native_handle),
+      immutable_(immutable) {
   ScopeLogger();
 }
 
 TensorsData::~TensorsData() {
-  ScopeLogger();
-  if (!this->NativeDestroy()) {
-    LoggerE("TensorsData NativeDestroy failed");
+  ScopeLogger("id_: %d, owns_native_handle_: %s", id_, owns_native_handle_ ? "true" : "false");
+  if (owns_native_handle_) {
+    if (!this->NativeDestroy()) {
+      LoggerE("TensorsData NativeDestroy failed");
+    }
   }
   // TensorsDataManager releases tensors_info_
 }
@@ -52,6 +59,10 @@ int TensorsData::Count() {
   return tensors_info_->Count();
 }
 
+bool TensorsData::DisposableFromJS() {
+  return owns_native_handle_;
+}
+
 ml_tensor_type_e TensorsData::GetTensorType(int index) {
   ScopeLogger("id_: %d, index: %d", id_, index);
   ml_tensor_type_e tensor_type_enum = ML_TENSOR_TYPE_UNKNOWN;
@@ -170,8 +181,13 @@ PlatformResult TensorsData::GetTensorRawData(int index, unsigned int location[ML
 PlatformResult TensorsData::SetTensorRawData(int index, unsigned int location[ML_TENSOR_RANK_LIMIT],
                                              unsigned int size[ML_TENSOR_RANK_LIMIT],
                                              TensorRawData& tensor_raw_data) {
-  ScopeLogger("id_: %d, index: %d, tensor_raw_data.size_in_bytes: %zu", id_, index,
-              tensor_raw_data.size_in_bytes);
+  ScopeLogger("id_: %d, index: %d, tensor_raw_data.size_in_bytes: %zu, immutable_: %s", id_, index,
+              tensor_raw_data.size_in_bytes, immutable_ ? "true" : "false");
+
+  if (immutable_) {
+    return PlatformResult(ErrorCode::NO_ERROR);
+  }
+
   // Dimensions of whole tensor
   unsigned int dim[ML_TENSOR_RANK_LIMIT];
   // Dimensions of updated tensors relative to location coordiantes
@@ -325,39 +341,48 @@ TensorsDataManager::~TensorsDataManager() {
   map_.clear();
 };
 
-TensorsData* TensorsDataManager::CreateTensorsData(TensorsInfo* tensors_info,
-                                                   ml_tensors_data_h tensors_data_handle) {
+TensorsData* TensorsDataManager::CreateTensorsData(TensorsInfo* tensors_info) {
   ScopeLogger();
   if (nullptr == tensors_info) {
     LoggerE("Could not find tensor");
     return nullptr;
   }
 
+  ml_tensors_data_h tensors_data_handle;
+  int ret = ml_tensors_data_create(tensors_info->Handle(), &tensors_data_handle);
+  if (ML_ERROR_NONE != ret) {
+    LoggerE("ml_tensors_data_create failed: %d (%s)", ret, get_error_message(ret));
+    return nullptr;
+  }
+
   std::lock_guard<std::mutex> lock{map_and_next_id_mutex_};
   int id = nextId_++;
   auto t = std::make_unique<TensorsData>(tensors_data_handle, id, tensors_info);
   map_[id] = std::move(t);
 
   return map_[id].get();
-}
+};
+
+TensorsData* TensorsDataManager::CreateTensorsData(TensorsInfo* tensors_info,
+                                                   const ml_tensors_data_h tensors_data_handle,
+                                                   bool owns_native_handle, bool immutable) {
+  ScopeLogger("owns_native_handle: %s, immutable: %s", owns_native_handle ? "true" : "false",
+              immutable ? "true" : "false");
 
-TensorsData* TensorsDataManager::CreateTensorsData(TensorsInfo* tensors_info) {
-  ScopeLogger();
   if (nullptr == tensors_info) {
-    LoggerE("Could not find tensor");
+    LoggerE("tensors_info is a nullptr");
     return nullptr;
   }
 
-  ml_tensors_data_h tensors_data_handle;
-  int ret = ml_tensors_data_create(tensors_info->Handle(), &tensors_data_handle);
-  if (ML_ERROR_NONE != ret) {
-    LoggerE("ml_tensors_data_create failed: %d (%s)", ret, get_error_message(ret));
+  if (nullptr == tensors_data_handle) {
+    LoggerE("tensors_data_handle is nullptr");
     return nullptr;
   }
 
   std::lock_guard<std::mutex> lock{map_and_next_id_mutex_};
   int id = nextId_++;
-  auto t = std::make_unique<TensorsData>(tensors_data_handle, id, tensors_info);
+  auto t = std::make_unique<TensorsData>(tensors_data_handle, id, tensors_info, owns_native_handle,
+                                         immutable);
   map_[id] = std::move(t);
 
   return map_[id].get();
index 12f5da0..b1d6c03 100644 (file)
@@ -65,13 +65,15 @@ struct TensorRawData {
 
 class TensorsData {
  public:
-  TensorsData(ml_tensors_data_h handle, int id, TensorsInfo* tensors_info);
+  TensorsData(ml_tensors_data_h handle, int id, TensorsInfo* tensors_info,
+              bool owns_native_handle = true, bool immutable = false);
   ~TensorsData();
 
   ml_tensors_data_h Handle();
   int Id();
   int TensorsInfoId();
   int Count();
+  bool DisposableFromJS();
   ml_tensor_type_e GetTensorType(int index);
   PlatformResult GetTensorRawData(int index, unsigned int location[ML_TENSOR_RANK_LIMIT],
                                   unsigned int size[ML_TENSOR_RANK_LIMIT],
@@ -92,6 +94,18 @@ class TensorsData {
   ml_tensors_data_h handle_;
   int id_;
   TensorsInfo* tensors_info_;
+  /*
+   * Some TensorsData don't own handle_ but are used as wrappers for handles
+   * managed by nnstreamer iteself. We mustn't release their handle_s.
+   * To ensure, this won't happen, set this field to "true" in constructor.
+   */
+  const bool owns_native_handle_;
+  /*
+   * Some TensorsData from native API, exposed to JS must remain unchanged,
+   * e.g. CustomFilter's input. To ignore all TensorsData.setTensorRawData()
+   * calls set this field to "true" in constructor.
+   */
+  const bool immutable_;
 };
 
 class TensorsDataManager {
@@ -100,7 +114,9 @@ class TensorsDataManager {
   ~TensorsDataManager();
 
   TensorsData* CreateTensorsData(TensorsInfo* tensors_info);
-  TensorsData* CreateTensorsData(TensorsInfo* tensors_info, ml_tensors_data_h tensors_data_handle);
+  TensorsData* CreateTensorsData(TensorsInfo* tensors_info,
+                                 const ml_tensors_data_h tensors_data_handle,
+                                 bool owns_native_handle = true, bool immutable = false);
   TensorsData* GetTensorsData(int id);
 
   PlatformResult DisposeTensorsData(int id);