[ML][Common] Add TensorsInfo constructor and TensorsInfo::addTensorInfo 58/251158/10
authorRafal Walczyna <r.walczyna@samsung.com>
Fri, 8 Jan 2021 15:40:47 +0000 (16:40 +0100)
committerRafal Walczyna <r.walczyna@samsung.com>
Tue, 19 Jan 2021 10:04:13 +0000 (11:04 +0100)
ACR: TWDAPI-273

Added:
- TensorsInfo constructor
- TensorsInfo::addTensorInfo method
- TensorsInfo::count property

Test code:
var ti = new tizen.ml.TensorsInfo();
console.log(ti.count)
ti.addTensorInfo(null, "UINT8", [1, 1])
console.log(ti.count)
console.log(JSON.stringify(ti))

[Verification] Built successful, tested in Chrome Dev console

Change-Id: Ic194d71439e4c4ce30b9722de031e9312c1c8183
Signed-off-by: Rafal Walczyna <r.walczyna@samsung.com>
src/ml/js/ml_common.js
src/ml/ml_instance.cc
src/ml/ml_instance.h
src/ml/ml_tensors_info_manager.cc
src/ml/ml_tensors_info_manager.h
src/ml/ml_utils.cc
src/ml/ml_utils.h

index 415ff53..77c30eb 100755 (executable)
@@ -21,6 +21,9 @@ var native_ = new xwalk.utils.NativeManager(extension);
 
 var AbortError = new WebAPIException('AbortError', 'An unknown error occurred');
 
+// Constants
+var MAX_TENSORS_INFO_COUNT = 16;
+
 // TensorRawData
 
 var TensorRawData = function() {
@@ -93,20 +96,87 @@ TensorsData.prototype.dispose = function() {
 
 // TensorsInfo
 
+function tensorsInfoCountGetter(id) {
+    var result = native_.callSync('MLTensorsInfoCountGetter', { tensorsInfoId: id });
+
+    if (native_.isFailure(result)) {
+        return 0;
+    } else {
+        return native_.getResultObject(result);
+    }
+}
+
 var TensorsInfo = function() {
     validator_.isConstructorCall(this, TensorsInfo);
+
+    var result = native_.callSync('MLTensorsInfoCreate');
+    if (native_.isFailure(result)) {
+        throw AbortError;
+    }
+
     Object.defineProperties(this, {
         count: {
             enumerable: true,
             get: function() {
-                throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
+                return tensorsInfoCountGetter(this._id);
             }
-        }
+        },
+        _id: { value: result.id, writable: false, enumerable: false }
     });
 };
 
+var TensorsInfoAddTensorInfoValidExceptions = [
+    'InvalidValuesError',
+    'TypeMismatchError',
+    'NotSupportedError',
+    'AbortError'
+];
+
 TensorsInfo.prototype.addTensorInfo = function() {
-    throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
+    var args = validator_.validateArgs(arguments, [
+        {
+            name: 'name',
+            type: types_.STRING,
+            optional: false,
+            nullable: true
+        },
+        {
+            name: 'type',
+            type: types_.ENUM,
+            values: Object.values(TensorType),
+            optional: false
+        },
+        {
+            name: 'dimensions',
+            type: types_.ARRAY,
+            optional: false
+        }
+    ]);
+
+    args.dimensions.forEach(function(d) {
+        if (Number.isInteger(d) == false) {
+            throw new WebAPIException(
+                WebAPIException.INVALID_VALUES_ERR,
+                'dimensions array has to contain only integers'
+            );
+        }
+    });
+
+    var callArgs = {
+        name: args.name,
+        type: args.type,
+        dimensions: args.dimensions,
+        tensorsInfoId: this._id
+    };
+
+    var result = native_.callSync('MLTensorsInfoAddTensorInfo', callArgs);
+    if (native_.isFailure(result)) {
+        throw native_.getErrorObjectAndValidate(
+            result,
+            TensorsInfoAddTensorInfoValidExceptions,
+            AbortError
+        );
+    }
 };
 
 TensorsInfo.prototype.clone = function() {
index 97d1c37..301333e 100644 (file)
@@ -28,16 +28,44 @@ namespace ml {
 namespace {
 const std::string kNnfw = "nnfw";
 const std::string kHw = "hw";
-}
+const std::string kTensorsInfoId = "tensorsInfoId";
+const std::string kIndex = "index";
+const std::string kType = "type";
+const std::string kName = "name";
+const std::string kDimensions = "dimensions";
+const std::string kId = "id";
+const std::string kDefinition = "definition";
+const std::string kPipelineStateChangeListenerName = "listenerName";
+
+}  //  namespace
 
 using namespace common;
 
-#define CHECK_EXIST(args, name, out)                                                            \
-  if (!args.contains(name)) {                                                                   \
-    LogAndReportError(TypeMismatchException(std::string(name) + " is required argument"), out); \
-    return;                                                                                     \
+#define CHECK_EXIST(args, name, out)                                            \
+  if (!args.contains(name)) {                                                   \
+    std::string msg = std::string(name) + " is required argument";              \
+    LogAndReportError(PlatformResult(ErrorCode::TYPE_MISMATCH_ERR, msg), &out); \
+    return;                                                                     \
   }
 
+// CHECK_TYPE will throw AbortError by default, but it can be changed by providing
+// additional parameter to the macro, i.e.:
+// CHECK_TYPE(args, "name", std::string, out, ErrorCode::TYPE_MISMATCH_ERR)
+#define CHECK_TYPE(...) CHECK_TYPE_X(__VA_ARGS__, CHECK_TYPE_5, CHECK_TYPE_4)(__VA_ARGS__)
+#define CHECK_TYPE_X(_1, _2, _3, _4, _5, EXC_TYPE, ...) EXC_TYPE
+#define CHECK_TYPE_5(args, name, type, out, error_type)        \
+  if (!args.get(name).is<type>()) {                            \
+    std::string msg = std::string(name) + " has invalid type"; \
+    LogAndReportError(PlatformResult(error_type, msg), &out);  \
+    return;                                                    \
+  }
+#define CHECK_TYPE_4(args, name, type, out) \
+  CHECK_TYPE_5(args, name, type, out, ErrorCode::ABORT_ERR)
+
+#define CHECK_ARGS(args, name, type, out) \
+  CHECK_EXIST(args, name, out)            \
+  CHECK_TYPE(args, name, type, out)
+
 MlInstance::MlInstance() : pipeline_manager_{this} {
   ScopeLogger();
   using namespace std::placeholders;
@@ -46,7 +74,9 @@ MlInstance::MlInstance() : pipeline_manager_{this} {
 
   // Common ML API begin
   REGISTER_METHOD(MLCheckNNFWAvailability);
-
+  REGISTER_METHOD(MLTensorsInfoCountGetter);
+  REGISTER_METHOD(MLTensorsInfoAddTensorInfo);
+  REGISTER_METHOD(MLTensorsInfoCreate);
   // Common ML API end
 
   // Single API begin
@@ -87,6 +117,104 @@ void MlInstance::MLCheckNNFWAvailability(const picojson::value& args, picojson::
   picojson::value available = picojson::value{availability_val};
   ReportSuccess(available, out);
 }
+
+void MlInstance::MLTensorsInfoCreate(const picojson::value& args, picojson::object& out) {
+  ScopeLogger("args: %s", args.serialize().c_str());
+
+  TensorsInfo* tensorsInfo = GetTensorsInfoManager().CreateTensorsInfo();
+  if (nullptr == tensorsInfo) {
+    LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
+                      ("Could not create new TensorsInfo handle"));
+    return;
+  }
+  out["id"] = picojson::value(static_cast<double>(tensorsInfo->Id()));
+  ReportSuccess(out);
+}
+
+void MlInstance::MLTensorsInfoCountGetter(const picojson::value& args, picojson::object& out) {
+  ScopeLogger("args: %s", args.serialize().c_str());
+  CHECK_ARGS(args, kTensorsInfoId, double, out);
+
+  int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
+  TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+  if (nullptr == tensorsInfo) {
+    LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
+                      ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+    return;
+  }
+  unsigned int count = 0;
+  PlatformResult result = tensorsInfo->NativeGetCount(&count);
+  if (!result) {
+    ReportError(result, &out);
+    return;
+  }
+  picojson::value val = picojson::value{static_cast<double>(count)};
+  ReportSuccess(val, out);
+}
+
+void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args, picojson::object& out) {
+  ScopeLogger("args: %s", args.serialize().c_str());
+  CHECK_ARGS(args, kTensorsInfoId, double, out);
+  CHECK_ARGS(args, kType, std::string, out);
+  // kName is nullable
+  CHECK_EXIST(args, kName, out);
+  CHECK_ARGS(args, kDimensions, picojson::array, out);
+
+  int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
+  TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+  if (nullptr == tensorsInfo) {
+    LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
+                      ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+    return;
+  }
+
+  const std::string& tensorType = args.get(kType).get<std::string>();
+  ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
+  PlatformResult result = types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
+  if (!result) {
+    LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting value of TensorType"),
+                      &out,
+                      ("TensorTypeEnum.getValue() failed, error: %s", result.message().c_str()));
+    return;
+  }
+
+  std::string name;
+  if (args.get(kName).is<std::string>()) {
+    name = args.get(kName).get<std::string>();
+    LoggerD("name: %s", name.c_str());
+  }
+
+  unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {1, 1, 1, 1};
+
+  // CHECK_ARGS has already validated type of kDimensions
+  auto dim = args.get(kDimensions).get<picojson::array>();
+  int i = 0;
+  for (const auto& d : dim) {
+    if (i >= ML_TENSOR_RANK_LIMIT) {
+      LoggerD("Provided dimensions array is bigger than supported");
+      break;
+    }
+
+    if (!d.is<double>()) {
+      LogAndReportError(PlatformResult(ErrorCode::INVALID_VALUES_ERR,
+                                       "dimensions array contains an invalid value"),
+                        &out,
+                        ("dimensions array contains an invalid value: %s", d.serialize().c_str()));
+      return;
+    }
+
+    dimensions[i] = static_cast<unsigned int>(d.get<double>());
+    i++;
+  }
+
+  result = tensorsInfo->AddTensorInfo(name, tensorTypeEnum, dimensions);
+  if (!result) {
+    LogAndReportError(result, &out);
+    return;
+  }
+
+  ReportSuccess(out);
+}
 // Common ML API end
 
 // Single API begin
@@ -95,15 +223,6 @@ void MlInstance::MLCheckNNFWAvailability(const picojson::value& args, picojson::
 
 // Pipeline API begin
 
-namespace {
-
-const std::string kId = "id";
-const std::string kName = "name";
-const std::string kDefinition = "definition";
-const std::string kPipelineStateChangeListenerName = "listenerName";
-
-}  //  namespace
-
 // PipelineManager::createPipeline() begin
 namespace {
 
@@ -349,6 +468,11 @@ void MlInstance::MLPipelineGetSwitch(const picojson::value& args, picojson::obje
 // Pipeline API end
 
 #undef CHECK_EXIST
+#undef CHECK_TYPE
+#undef CHECK_TYPE_X
+#undef CHECK_TYPE_4
+#undef CHECK_TYPE_5
+#undef CHECK_ARGS
 
 }  // namespace ml
 }  // namespace extension
index ff2f74b..f8141a9 100644 (file)
@@ -37,6 +37,9 @@ class MlInstance : public common::ParsedInstance {
  private:
   // Common ML API begin
   void MLCheckNNFWAvailability(const picojson::value& args, picojson::object& out);
+  void MLTensorsInfoCountGetter(const picojson::value& args, picojson::object& out);
+  void MLTensorsInfoCreate(const picojson::value& args, picojson::object& out);
+  void MLTensorsInfoAddTensorInfo(const picojson::value& args, picojson::object& out);
 
   TensorsInfoManager tensors_info_manager_;
   // Common ML API end
index ea80daf..eaf0f1c 100644 (file)
@@ -24,7 +24,7 @@ using common::PlatformResult;
 namespace extension {
 namespace ml {
 
-TensorsInfo::TensorsInfo(ml_tensors_info_h handle, int id) : handle_(handle), id_(id) {
+TensorsInfo::TensorsInfo(ml_tensors_info_h handle, int id) : handle_(handle), id_(id), count_(0) {
   ScopeLogger();
 }
 
@@ -41,6 +41,47 @@ int TensorsInfo::Id() {
   return this->id_;
 }
 
+int TensorsInfo::Count() {
+  return this->count_;
+}
+
+PlatformResult TensorsInfo::AddTensorInfo(std::string name, ml_tensor_type_e type,
+                                          unsigned int dim[ML_TENSOR_RANK_LIMIT]) {
+  ScopeLogger("id_: %d", id_);
+  if (Count() >= ML_TENSOR_SIZE_LIMIT) {
+    return (PlatformResult(ErrorCode::ABORT_ERR, "Maximum size of tensors info reached."));
+  }
+  int index = Count();
+
+  PlatformResult result = NativeSetCount(index + 1);
+  if (!result) {
+    return result;
+  }
+
+  if (!name.empty()) {
+    result = NativeSetTensorName(index, name);
+    if (!result) {
+      LoggerD("Failed to set name, reducing tensorsInfo count");
+      NativeSetCount(index);
+      return result;
+    }
+  }
+
+  result = NativeSetTensorType(index, type);
+  if (!result) {
+    LoggerD("Failed to set type, reducing tensorsInfo count");
+    NativeSetCount(index);
+    return result;
+  }
+
+  result = NativeSetTensorDimensions(index, dim);
+  if (!result) {
+    LoggerD("Failed to set type, reducing tensorsInfo count");
+    NativeSetCount(index);
+  }
+  return result;
+}
+
 PlatformResult TensorsInfo::NativeDestroy() {
   ScopeLogger("id_: %d", id_);
   int ret = ml_tensors_info_destroy(handle_);
@@ -59,6 +100,7 @@ PlatformResult TensorsInfo::NativeGetCount(unsigned int* count) {
     LoggerE("ml_tensors_info_get_count failed: %d (%s)", ret, get_error_message(ret));
     return util::ToPlatformResult(ret, "Failed to get count");
   }
+  count_ = *count;
   return PlatformResult(ErrorCode::NO_ERROR);
 }
 
@@ -70,6 +112,7 @@ PlatformResult TensorsInfo::NativeSetCount(unsigned int count) {
     LoggerE("ml_tensors_info_set_count failed: %d (%s)", ret, get_error_message(ret));
     return util::ToPlatformResult(ret, "Failed to set count");
   }
+  count_ = count;
   return PlatformResult(ErrorCode::NO_ERROR);
 }
 
index 734a56a..29923b6 100644 (file)
@@ -39,6 +39,10 @@ class TensorsInfo {
 
   ml_tensors_info_h Handle();
   int Id();
+  int Count();
+
+  PlatformResult AddTensorInfo(std::string name, ml_tensor_type_e type,
+                               unsigned int dim[ML_TENSOR_RANK_LIMIT]);
 
   PlatformResult NativeDestroy();
   PlatformResult NativeGetCount(unsigned int* count);
@@ -54,6 +58,7 @@ class TensorsInfo {
  private:
   ml_tensors_info_h handle_;
   int id_;
+  int count_;
 };
 
 class TensorsInfoManager {
index d29605d..e021050 100644 (file)
@@ -46,6 +46,15 @@ const PlatformEnum<ml_nnfw_type_e> NNFWTypeEnum{{"ANY", ML_NNFW_TYPE_ANY},
                                                 {"TENSORFLOW", ML_NNFW_TYPE_TENSORFLOW},
                                                 {"TENSORFLOW_LITE", ML_NNFW_TYPE_TENSORFLOW_LITE},
                                                 {"VIVANTE", ML_NNFW_TYPE_VIVANTE}};
+
+const PlatformEnum<ml_tensor_type_e> TensorTypeEnum{
+    {"INT8", ML_TENSOR_TYPE_INT8},       {"UINT8", ML_TENSOR_TYPE_UINT8},
+    {"INT16", ML_TENSOR_TYPE_INT16},     {"UINT16", ML_TENSOR_TYPE_UINT16},
+    {"FLOAT32", ML_TENSOR_TYPE_FLOAT32}, {"INT32", ML_TENSOR_TYPE_INT32},
+    {"UINT32", ML_TENSOR_TYPE_UINT32},   {"FLOAT64", ML_TENSOR_TYPE_FLOAT64},
+    {"INT64", ML_TENSOR_TYPE_INT64},     {"UINT64", ML_TENSOR_TYPE_UINT64},
+    {"UNKNOWN", ML_TENSOR_TYPE_UNKNOWN}};
+
 }  // types
 
 namespace util {
index bc476b1..481483e 100644 (file)
@@ -34,6 +34,7 @@ namespace types {
 
 extern const PlatformEnum<ml_nnfw_hw_e> HWTypeEnum;
 extern const PlatformEnum<ml_nnfw_type_e> NNFWTypeEnum;
+extern const PlatformEnum<ml_tensor_type_e> TensorTypeEnum;
 
 }  // types