throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
}
},
- tensorsInfo : {
+ tensorsInfo: {
enumerable: true,
get: function() {
throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
MachineLearningManager.prototype.pipeline = new MachineLearningPipeline();
+var NNFWType = {
+ ANY: 'ANY',
+ ARM_NN: 'ARM_NN',
+ CUSTOM_FILTER: 'CUSTOM_FILTER',
+ EDGE_TPU: 'EDGE_TPU',
+ MVNC: 'MVNC',
+ NNFW: 'NNFW',
+ OPEN_VINO: 'OPEN_VINO',
+ SNPE: 'SNPE',
+ TENSORFLOW: 'TENSORFLOW',
+ TENSORFLOW_LITE: 'TENSORFLOW_LITE',
+ VIVANTE: 'VIVANTE'
+};
+
+var HWType = {
+ ANY: 'ANY',
+ AUTO: 'AUTO',
+ CPU: 'CPU',
+ CPU_NEON: 'CPU_NEON',
+ CPU_SIMD: 'CPU_SIMD',
+ GPU: 'GPU',
+ NPU: 'NPU',
+ NPU_EDGE_TPU: 'NPU_EDGE_TPU',
+ NPU_MOVIDIUS: 'NPU_MOVIDIUS',
+ NPU_SR: 'NPU_SR',
+ NPU_VIVANTE: 'NPU_VIVANTE'
+};
+
MachineLearningManager.prototype.checkNNFWAvailability = function() {
- throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
+ var args = validator_.validateArgs(arguments, [
+ {
+ name: 'nnfw',
+ type: types_.ENUM,
+ values: Object.values(NNFWType),
+ optional: false
+ },
+ {
+ name: 'hw',
+ type: types_.ENUM,
+ values: Object.values(HWType),
+ optional: false
+ }
+ ]);
+ var callArgs = {
+ nnfw: args.nnfw,
+ hw: args.hw
+ };
+
+ var result = native_.callSync('MLCheckNNFWAvailability', callArgs);
+
+ if (native_.isFailure(result)) {
+ return false;
+ }
+ return native_.getResultObject(result);
};
exports = new MachineLearningManager();
*/
#include "ml_instance.h"
+#include "ml_utils.h"
#include "common/logger.h"
#include "common/picojson.h"
+#include "common/platform_result.h"
namespace extension {
namespace ml {
+namespace {
+const std::string kNnfw = "nnfw";
+const std::string kHw = "hw";
+}
+
using namespace common;
+#define CHECK_EXIST(args, name, out) \
+ if (!args.contains(name)) { \
+ LogAndReportError(TypeMismatchException(std::string(name) + " is required argument"), out); \
+ return; \
+ }
+
MlInstance::MlInstance() : pipeline_manager_{this} {
ScopeLogger();
using namespace std::placeholders;
#define REGISTER_METHOD(M) RegisterSyncHandler(#M, std::bind(&MlInstance::M, this, _1, _2))
// Common ML API begin
+ REGISTER_METHOD(MLCheckNNFWAvailability);
// Common ML API end
}
// Common ML API begin
+void MlInstance::MLCheckNNFWAvailability(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+ CHECK_EXIST(args, kNnfw, out)
+ CHECK_EXIST(args, kHw, out)
+
+ std::string nnfw = args.get(kNnfw).get<std::string>();
+ std::string hw = args.get(kHw).get<std::string>();
+ bool availability_val = util::CheckNNFWAvailability(nnfw, hw);
+ picojson::value available = picojson::value{availability_val};
+ ReportSuccess(available, out);
+}
// Common ML API end
// Single API begin
// Valve::setOpen() end
// Pipeline API end
+#undef CHECK_EXIST
+
} // namespace ml
} // namespace extension
private:
// Common ML API begin
+ void MLCheckNNFWAvailability(const picojson::value& args, picojson::object& out);
// Common ML API end
#include <memory>
-#include "ml_utils.h"
-
#include "common/logger.h"
+#include "ml_utils.h"
namespace extension {
namespace ml {
+
+namespace types {
+const PlatformEnum<ml_nnfw_hw_e> HWTypeEnum{{"ANY", ML_NNFW_HW_ANY},
+ {"AUTO", ML_NNFW_HW_AUTO},
+ {"CPU", ML_NNFW_HW_CPU},
+ {"CPU_NEON", ML_NNFW_HW_CPU_NEON},
+ {"CPU_SIMD", ML_NNFW_HW_CPU_SIMD},
+ {"GPU", ML_NNFW_HW_GPU},
+ {"NPU", ML_NNFW_HW_NPU},
+ {"NPU_EDGE_TPU", ML_NNFW_HW_NPU_EDGE_TPU},
+ {"NPU_MOVIDIUS", ML_NNFW_HW_NPU_MOVIDIUS},
+ {"NPU_SR", ML_NNFW_HW_NPU_SR},
+ {"NPU_VIVANTE", ML_NNFW_HW_NPU_VIVANTE}};
+
+const PlatformEnum<ml_nnfw_type_e> NNFWTypeEnum{{"ANY", ML_NNFW_TYPE_ANY},
+ {"ARM_NN", ML_NNFW_TYPE_ARMNN},
+ {"CUSTOM_FILTER", ML_NNFW_TYPE_CUSTOM_FILTER},
+ {"EDGE_TPU", ML_NNFW_TYPE_EDGE_TPU},
+ {"MVNC", ML_NNFW_TYPE_MVNC},
+ {"NNFW", ML_NNFW_TYPE_NNFW},
+ {"OPEN_VINO", ML_NNFW_TYPE_OPENVINO},
+ {"SNPE", ML_NNFW_TYPE_SNPE},
+ {"TENSORFLOW", ML_NNFW_TYPE_TENSORFLOW},
+ {"TENSORFLOW_LITE", ML_NNFW_TYPE_TENSORFLOW_LITE},
+ {"VIVANTE", ML_NNFW_TYPE_VIVANTE}};
+} // types
+
namespace util {
PlatformResult ToPlatformResult(int ml_error_code, const std::string& error_message_beginning) {
}
}
-using namespace common;
+bool CheckNNFWAvailability(const std::string& nnfw, const std::string& hw) {
+ ScopeLogger();
+ ml_nnfw_type_e nnfw_e = ML_NNFW_TYPE_ANY;
+ ml_nnfw_hw_e hw_e = ML_NNFW_HW_ANY;
+
+ PlatformResult result = types::NNFWTypeEnum.getValue(nnfw, &nnfw_e);
+ if (!result) {
+ LoggerE("NNFWTypeEnum.getValue() failed, error: %s", result.message().c_str());
+ return false;
+ }
+ result = types::HWTypeEnum.getValue(hw, &hw_e);
+ if (!result) {
+ LoggerE("HWTypeEnum.getValue() failed, error: %s", result.message().c_str());
+ return false;
+ }
+ bool available = false;
+ int ret = ml_check_nnfw_availability(nnfw_e, hw_e, &available);
+
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_check_nnfw_availability failed: %d (%s)", ret, get_error_message(ret));
+ return false;
+ }
+
+ LoggerD("ml_check_nnfw_availability: %s", available ? "true" : "false");
+ return available;
+}
} // util
} // ml
#include <nnstreamer/nnstreamer.h>
#include "common/picojson.h"
+#include "common/platform_enum.h"
#include "common/platform_result.h"
+using common::PlatformEnum;
using common::PlatformResult;
using common::ErrorCode;
namespace extension {
namespace ml {
+
+namespace types {
+
+extern const PlatformEnum<ml_nnfw_hw_e> HWTypeEnum;
+extern const PlatformEnum<ml_nnfw_type_e> NNFWTypeEnum;
+
+} // types
+
namespace util {
PlatformResult ToPlatformResult(int ml_error_code, const std::string& error_message);
+bool CheckNNFWAvailability(const std::string& nnfw, const std::string& hw);
+
} // util
} // ml
} // extension