var TensorsInfo = function() {
validator_.isConstructorCall(this, TensorsInfo);
- var result = native_.callSync('MLTensorsInfoCreate');
- if (native_.isFailure(result)) {
- throw AbortError;
+ var id;
+ if (arguments.length == 0) {
+ var result = native_.callSync('MLTensorsInfoCreate');
+ if (native_.isFailure(result)) {
+ throw AbortError;
+ }
+ id = result.id;
+ } else {
+ var args = validator_.validateArgs(arguments, [
+ {
+ name: 'id',
+ type: types_.LONG
+ }
+ ]);
+ id = args.id;
}
Object.defineProperties(this, {
return tensorsInfoCountGetter(this._id);
}
},
- _id: { value: result.id, writable: false, enumerable: false }
+ _id: { value: id, writable: false, enumerable: false }
});
};
];
TensorsInfo.prototype.clone = function() {
- throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
+ var callArgs = {
+ tensorsInfoId: this._id
+ };
+
+ var result = native_.callSync('MLTensorsInfoClone', callArgs);
+
+ if (native_.isFailure(result)) {
+ throw AbortError;
+ }
+
+ return new TensorsInfo(result.id);
};
TensorsInfo.prototype.equals = function() {
- throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
+ var args = validator_.validateArgs(arguments, [
+ {
+ name: 'other',
+ type: types_.PLATFORM_OBJECT,
+ values: TensorsInfo
+ }
+ ]);
+
+ if (this._id == args.other._id) {
+ return true;
+ }
+
+ var callArgs = {
+ tensorsInfoId: this._id,
+ otherId: args.other._id
+ };
+
+ var result = native_.callSync('MLTensorsInfoEquals', callArgs);
+
+ if (native_.isFailure(result)) {
+ return false;
+ }
+
+ return native_.getResultObject(result);
};
TensorsInfo.prototype.getDimensions = function() {
const std::string kId = "id";
const std::string kDefinition = "definition";
const std::string kPipelineStateChangeListenerName = "listenerName";
-
+const std::string kOtherId = "otherId";
} // namespace
using namespace common;
REGISTER_METHOD(MLTensorsInfoSetTensorName);
REGISTER_METHOD(MLTensorsInfoGetTensorType);
REGISTER_METHOD(MLTensorsInfoSetTensorType);
-
- // Common ML API end
+ REGISTER_METHOD(MLTensorsInfoClone);
+ REGISTER_METHOD(MLTensorsInfoEquals);
// Single API begin
}
ReportSuccess(out);
}
+
+void MlInstance::MLTensorsInfoClone(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+ CHECK_ARGS(args, kTensorsInfoId, double, out);
+
+ int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
+ TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ if (nullptr == tensorsInfo) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ return;
+ }
+
+ TensorsInfo* cloned = GetTensorsInfoManager().CloneTensorsInfo(tensorsInfo);
+ if (nullptr == cloned) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
+ ("Could not clone TensorsInfo with given id: %d", tensorsInfoId));
+ return;
+ }
+
+ out["id"] = picojson::value(static_cast<double>(cloned->Id()));
+ ReportSuccess(out);
+}
+
+void MlInstance::MLTensorsInfoEquals(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+ CHECK_ARGS(args, kTensorsInfoId, double, out);
+ CHECK_ARGS(args, kOtherId, double, out);
+
+ int firstId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
+ int secondId = static_cast<int>(args.get(kOtherId).get<double>());
+
+ TensorsInfo* first = GetTensorsInfoManager().GetTensorsInfo(firstId);
+ if (nullptr == first) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
+ ("Could not find TensorsInfo handle with given id: %d", firstId));
+ return;
+ }
+
+ TensorsInfo* second = GetTensorsInfoManager().GetTensorsInfo(secondId);
+ if (nullptr == second) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
+ ("Could not find TensorsInfo handle with given id: %d", secondId));
+ return;
+ }
+
+ bool equals = first->Equals(second);
+ picojson::value val = picojson::value{equals};
+ ReportSuccess(val, out);
+}
// Common ML API end
// Single API begin
void MLTensorsInfoSetTensorName(const picojson::value& args, picojson::object& out);
void MLTensorsInfoGetTensorType(const picojson::value& args, picojson::object& out);
void MLTensorsInfoSetTensorType(const picojson::value& args, picojson::object& out);
+ void MLTensorsInfoClone(const picojson::value& args, picojson::object& out);
+ void MLTensorsInfoEquals(const picojson::value& args, picojson::object& out);
TensorsInfoManager tensors_info_manager_;
// Common ML API end
return result;
}
+std::shared_ptr<TensorsInfo> TensorsInfo::CreateClone(int cloneId) {
+ ScopeLogger("id_: %d, cloneId: %d", id_, cloneId);
+ ml_tensors_info_h clone_h;
+
+ int ret = ml_tensors_info_create(&clone_h);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_info_create failed: %d (%s)", ret, get_error_message(ret));
+ return nullptr;
+ }
+
+ ret = ml_tensors_info_clone(clone_h, this->Handle());
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_info_clone failed: %d (%s)", ret, get_error_message(ret));
+ return nullptr;
+ }
+ auto t = std::make_shared<TensorsInfo>(clone_h, cloneId);
+ t->count_ = this->Count();
+ return t;
+}
+
+bool TensorsInfo::Equals(TensorsInfo* other) {
+ ScopeLogger();
+ if (nullptr == other) {
+ return false;
+ }
+ int count1 = this->Count(), count2 = other->Count();
+ if (count1 != count2) {
+ LoggerD("Tensors count not equal");
+ return false;
+ }
+ for (int i = 0; i < count1; i++) {
+ ml_tensor_type_e type1 = ML_TENSOR_TYPE_UNKNOWN, type2 = ML_TENSOR_TYPE_UNKNOWN;
+ if ((!this->NativeGetTensorType(i, &type1) || !other->NativeGetTensorType(i, &type2)) ||
+ (type1 != type2)) {
+ LoggerD("Tensors type not equal at index %d, [%d, %d]", i, type1, type2);
+ return false;
+ }
+
+ unsigned int dim1[ML_TENSOR_RANK_LIMIT], dim2[ML_TENSOR_RANK_LIMIT];
+ if ((!this->NativeGetTensorDimensions(i, dim1) || !other->NativeGetTensorDimensions(i, dim2))) {
+ LoggerD("Tensors dimensions getters error");
+ return false;
+ }
+ for (int j = 0; j < ML_TENSOR_RANK_LIMIT; j++) {
+ if (dim1[j] != dim2[j]) {
+ LoggerD("Tensors dimensions not equal at index %d, [%d][%u - %u]", i, j, dim1[j], dim2[j]);
+ return false;
+ }
+ }
+ // names do not need to be compared
+ // [Sangjung Woo 18.01.2020]
+ // Just compare only type and dimensions.
+ // Actually "name" is not used when negotiating the pipeline.
+ }
+ return true;
+}
+
PlatformResult TensorsInfo::NativeDestroy() {
ScopeLogger("id_: %d", id_);
int ret = ml_tensors_info_destroy(handle_);
return t.get();
};
+TensorsInfo* TensorsInfoManager::CloneTensorsInfo(TensorsInfo* src) {
+ ScopeLogger();
+ if (nullptr == src) {
+ LoggerE("Source TensorsInfo is null");
+ return nullptr;
+ }
+
+ int id = nextId_++;
+ auto t = src->CreateClone(id);
+ if (nullptr == t) {
+ return nullptr;
+ }
+
+ map_by_id_[t->Id()] = t;
+ map_by_handle_[t->Handle()] = t;
+
+ return t.get();
+};
+
TensorsInfo* TensorsInfoManager::GetTensorsInfo(int id) {
ScopeLogger("id: %d", id);
PlatformResult AddTensorInfo(std::string name, ml_tensor_type_e type,
unsigned int dim[ML_TENSOR_RANK_LIMIT]);
+ std::shared_ptr<TensorsInfo> CreateClone(int cloneId);
+ bool Equals(TensorsInfo* other);
+
PlatformResult NativeDestroy();
PlatformResult NativeGetCount(unsigned int* count);
PlatformResult NativeSetCount(unsigned int count);
TensorsInfoManager();
~TensorsInfoManager();
TensorsInfo* CreateTensorsInfo();
+ TensorsInfo* CloneTensorsInfo(TensorsInfo* src);
TensorsInfo* GetTensorsInfo(int id);
TensorsInfo* GetTensorsInfo(ml_tensors_info_h handle);