var AbortError = new WebAPIException('AbortError', 'An unknown error occurred');
+// Constants
+var MAX_TENSORS_INFO_COUNT = 16;
+
// TensorRawData
var TensorRawData = function() {
// TensorsInfo
+function tensorsInfoCountGetter(id) {
+ var result = native_.callSync('MLTensorsInfoCountGetter', { tensorsInfoId: id });
+
+ if (native_.isFailure(result)) {
+ return 0;
+ } else {
+ return native_.getResultObject(result);
+ }
+}
+
var TensorsInfo = function() {
validator_.isConstructorCall(this, TensorsInfo);
+
+ var result = native_.callSync('MLTensorsInfoCreate');
+ if (native_.isFailure(result)) {
+ throw AbortError;
+ }
+
Object.defineProperties(this, {
count: {
enumerable: true,
get: function() {
- throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
+ return tensorsInfoCountGetter(this._id);
}
- }
+ },
+ _id: { value: result.id, writable: false, enumerable: false }
});
};
+var TensorsInfoAddTensorInfoValidExceptions = [
+ 'InvalidValuesError',
+ 'TypeMismatchError',
+ 'NotSupportedError',
+ 'AbortError'
+];
+
TensorsInfo.prototype.addTensorInfo = function() {
- throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
+ var args = validator_.validateArgs(arguments, [
+ {
+ name: 'name',
+ type: types_.STRING,
+ optional: false,
+ nullable: true
+ },
+ {
+ name: 'type',
+ type: types_.ENUM,
+ values: Object.values(TensorType),
+ optional: false
+ },
+ {
+ name: 'dimensions',
+ type: types_.ARRAY,
+ optional: false
+ }
+ ]);
+
+ args.dimensions.forEach(function(d) {
+ if (Number.isInteger(d) == false) {
+ throw new WebAPIException(
+ WebAPIException.INVALID_VALUES_ERR,
+ 'dimensions array has to contain only integers'
+ );
+ }
+ });
+
+ var callArgs = {
+ name: args.name,
+ type: args.type,
+ dimensions: args.dimensions,
+ tensorsInfoId: this._id
+ };
+
+ var result = native_.callSync('MLTensorsInfoAddTensorInfo', callArgs);
+ if (native_.isFailure(result)) {
+ throw native_.getErrorObjectAndValidate(
+ result,
+ TensorsInfoAddTensorInfoValidExceptions,
+ AbortError
+ );
+ }
};
TensorsInfo.prototype.clone = function() {
namespace {
const std::string kNnfw = "nnfw";
const std::string kHw = "hw";
-}
+const std::string kTensorsInfoId = "tensorsInfoId";
+const std::string kIndex = "index";
+const std::string kType = "type";
+const std::string kName = "name";
+const std::string kDimensions = "dimensions";
+const std::string kId = "id";
+const std::string kDefinition = "definition";
+const std::string kPipelineStateChangeListenerName = "listenerName";
+
+} // namespace
using namespace common;
-#define CHECK_EXIST(args, name, out) \
- if (!args.contains(name)) { \
- LogAndReportError(TypeMismatchException(std::string(name) + " is required argument"), out); \
- return; \
+#define CHECK_EXIST(args, name, out) \
+ if (!args.contains(name)) { \
+ std::string msg = std::string(name) + " is required argument"; \
+ LogAndReportError(PlatformResult(ErrorCode::TYPE_MISMATCH_ERR, msg), &out); \
+ return; \
}
+// CHECK_TYPE will throw AbortError by default, but it can be changed by providing
+// additional parameter to the macro, i.e.:
+// CHECK_TYPE(args, "name", std::string, out, ErrorCode::TYPE_MISMATCH_ERR)
+#define CHECK_TYPE(...) CHECK_TYPE_X(__VA_ARGS__, CHECK_TYPE_5, CHECK_TYPE_4)(__VA_ARGS__)
+#define CHECK_TYPE_X(_1, _2, _3, _4, _5, EXC_TYPE, ...) EXC_TYPE
+#define CHECK_TYPE_5(args, name, type, out, error_type) \
+ if (!args.get(name).is<type>()) { \
+ std::string msg = std::string(name) + " has invalid type"; \
+ LogAndReportError(PlatformResult(error_type, msg), &out); \
+ return; \
+ }
+#define CHECK_TYPE_4(args, name, type, out) \
+ CHECK_TYPE_5(args, name, type, out, ErrorCode::ABORT_ERR)
+
+#define CHECK_ARGS(args, name, type, out) \
+ CHECK_EXIST(args, name, out) \
+ CHECK_TYPE(args, name, type, out)
+
MlInstance::MlInstance() : pipeline_manager_{this} {
ScopeLogger();
using namespace std::placeholders;
// Common ML API begin
REGISTER_METHOD(MLCheckNNFWAvailability);
-
+ REGISTER_METHOD(MLTensorsInfoCountGetter);
+ REGISTER_METHOD(MLTensorsInfoAddTensorInfo);
+ REGISTER_METHOD(MLTensorsInfoCreate);
// Common ML API end
// Single API begin
picojson::value available = picojson::value{availability_val};
ReportSuccess(available, out);
}
+
+void MlInstance::MLTensorsInfoCreate(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+
+ TensorsInfo* tensorsInfo = GetTensorsInfoManager().CreateTensorsInfo();
+ if (nullptr == tensorsInfo) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
+ ("Could not create new TensorsInfo handle"));
+ return;
+ }
+ out["id"] = picojson::value(static_cast<double>(tensorsInfo->Id()));
+ ReportSuccess(out);
+}
+
+void MlInstance::MLTensorsInfoCountGetter(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+ CHECK_ARGS(args, kTensorsInfoId, double, out);
+
+ int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
+ TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ if (nullptr == tensorsInfo) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ return;
+ }
+ unsigned int count = 0;
+ PlatformResult result = tensorsInfo->NativeGetCount(&count);
+ if (!result) {
+ ReportError(result, &out);
+ return;
+ }
+ picojson::value val = picojson::value{static_cast<double>(count)};
+ ReportSuccess(val, out);
+}
+
+void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+ CHECK_ARGS(args, kTensorsInfoId, double, out);
+ CHECK_ARGS(args, kType, std::string, out);
+ // kName is nullable
+ CHECK_EXIST(args, kName, out);
+ CHECK_ARGS(args, kDimensions, picojson::array, out);
+
+ int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
+ TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ if (nullptr == tensorsInfo) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ return;
+ }
+
+ const std::string& tensorType = args.get(kType).get<std::string>();
+ ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
+ PlatformResult result = types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
+ if (!result) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting value of TensorType"),
+ &out,
+ ("TensorTypeEnum.getValue() failed, error: %s", result.message().c_str()));
+ return;
+ }
+
+ std::string name;
+ if (args.get(kName).is<std::string>()) {
+ name = args.get(kName).get<std::string>();
+ LoggerD("name: %s", name.c_str());
+ }
+
+ unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {1, 1, 1, 1};
+
+ // CHECK_ARGS has already validated type of kDimensions
+ auto dim = args.get(kDimensions).get<picojson::array>();
+ int i = 0;
+ for (const auto& d : dim) {
+ if (i >= ML_TENSOR_RANK_LIMIT) {
+ LoggerD("Provided dimensions array is bigger than supported");
+ break;
+ }
+
+ if (!d.is<double>()) {
+ LogAndReportError(PlatformResult(ErrorCode::INVALID_VALUES_ERR,
+ "dimensions array contains an invalid value"),
+ &out,
+ ("dimensions array contains an invalid value: %s", d.serialize().c_str()));
+ return;
+ }
+
+ dimensions[i] = static_cast<unsigned int>(d.get<double>());
+ i++;
+ }
+
+ result = tensorsInfo->AddTensorInfo(name, tensorTypeEnum, dimensions);
+ if (!result) {
+ LogAndReportError(result, &out);
+ return;
+ }
+
+ ReportSuccess(out);
+}
// Common ML API end
// Single API begin
// Pipeline API begin
-namespace {
-
-const std::string kId = "id";
-const std::string kName = "name";
-const std::string kDefinition = "definition";
-const std::string kPipelineStateChangeListenerName = "listenerName";
-
-} // namespace
-
// PipelineManager::createPipeline() begin
namespace {
// Pipeline API end
#undef CHECK_EXIST
+#undef CHECK_TYPE
+#undef CHECK_TYPE_X
+#undef CHECK_TYPE_4
+#undef CHECK_TYPE_5
+#undef CHECK_ARGS
} // namespace ml
} // namespace extension
namespace extension {
namespace ml {
-TensorsInfo::TensorsInfo(ml_tensors_info_h handle, int id) : handle_(handle), id_(id) {
+TensorsInfo::TensorsInfo(ml_tensors_info_h handle, int id) : handle_(handle), id_(id), count_(0) {
ScopeLogger();
}
return this->id_;
}
+int TensorsInfo::Count() {
+ return this->count_;
+}
+
+PlatformResult TensorsInfo::AddTensorInfo(std::string name, ml_tensor_type_e type,
+ unsigned int dim[ML_TENSOR_RANK_LIMIT]) {
+ ScopeLogger("id_: %d", id_);
+ if (Count() >= ML_TENSOR_SIZE_LIMIT) {
+ return (PlatformResult(ErrorCode::ABORT_ERR, "Maximum size of tensors info reached."));
+ }
+ int index = Count();
+
+ PlatformResult result = NativeSetCount(index + 1);
+ if (!result) {
+ return result;
+ }
+
+ if (!name.empty()) {
+ result = NativeSetTensorName(index, name);
+ if (!result) {
+ LoggerD("Failed to set name, reducing tensorsInfo count");
+ NativeSetCount(index);
+ return result;
+ }
+ }
+
+ result = NativeSetTensorType(index, type);
+ if (!result) {
+ LoggerD("Failed to set type, reducing tensorsInfo count");
+ NativeSetCount(index);
+ return result;
+ }
+
+ result = NativeSetTensorDimensions(index, dim);
+ if (!result) {
+ LoggerD("Failed to set type, reducing tensorsInfo count");
+ NativeSetCount(index);
+ }
+ return result;
+}
+
PlatformResult TensorsInfo::NativeDestroy() {
ScopeLogger("id_: %d", id_);
int ret = ml_tensors_info_destroy(handle_);
LoggerE("ml_tensors_info_get_count failed: %d (%s)", ret, get_error_message(ret));
return util::ToPlatformResult(ret, "Failed to get count");
}
+ count_ = *count;
return PlatformResult(ErrorCode::NO_ERROR);
}
LoggerE("ml_tensors_info_set_count failed: %d (%s)", ret, get_error_message(ret));
return util::ToPlatformResult(ret, "Failed to set count");
}
+ count_ = count;
return PlatformResult(ErrorCode::NO_ERROR);
}