3 * Copyright (C) 2020 Samsung Electronics
4 * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
7 * @file ne-host-handler.cc
9 * @brief Implementation of APIs to access NPU from Host
10 * @see https://code.sec.samsung.net/confluence/display/ODLC/2020+Overall+Software+Stack
11 * @author Dongju Chae <dongju.chae@samsung.com>
12 * @bug No known bugs except for NYI items
15 #include <npubinfmt.h>
16 #include <NPUdrvAPI.h>
17 #include <CommPlugin.h>
21 #include "ne-scheduler.h"
22 #include "ne-handler.h"
27 #include <condition_variable>
34 #define INIT_HOST_HANDLER(handler, dev) \
35 Device *tdev = static_cast <Device *> (dev); \
36 if (tdev == nullptr) return -EINVAL; \
37 HostHandler *handler = tdev->getHostHandler (); \
38 if (handler == nullptr) return -EINVAL;
40 /** @brief device class. it contains all related instances */
43 /** @brief Factory method to create a trinity device dependong on dev type */
44 static Device *createInstance (dev_type device_type, int device_id);
46 /** @brief constructor of device */
47 Device (dev_type type, int id, bool need_model = true)
48 : comm_(CommPlugin::getCommPlugin()), type_ (type), id_ (id),
49 need_model_ (true), mode_ (NPUASYNC_WAIT), initialized_ (ATOMIC_FLAG_INIT) {}
51 /** @brief destructor of device */
54 /** @brief initialization */
56 if (!initialized_.test_and_set()) {
57 /** create the corresponding driver API */
58 api_ = DriverAPI::createDriverAPI (type_, id_);
59 if (api_.get() == nullptr) {
61 logerr (TAG, "Failed to create driver API\n");
65 handler_.reset (new HostHandler (this));
66 scheduler_.reset (new Scheduler (api_.get()));
67 mem_ = MemAllocator::createInstance (api_.get());
73 HostHandler *getHostHandler () { return handler_.get(); }
74 dev_type getType () { return type_; }
75 int getID () { return id_; }
76 bool needModel () { return need_model_; }
77 void setAsyncMode (npu_async_mode mode) { mode_ = mode; }
79 HWmem * allocMemory () { return mem_->allocMemory (); }
80 void deallocMemory (int dmabuf_fd) { mem_->deallocMemory (dmabuf_fd); }
82 /** it stops all requests in this device (choose wait or force) */
83 int stop (bool force_stop) {
84 Request *req = new Request (NPUINPUT_STOP);
85 req->setForceStop (force_stop);
86 return scheduler_->submitRequest (req);
89 virtual Model * registerModel (const generic_buffer *model) = 0;
90 virtual int run (npu_input_opmode opmode, const Model *model,
91 const input_buffers *input, npuOutputNotify cb, void *cb_data,
92 uint64_t *sequence) = 0;
95 /** the device instance has ownership of all related components */
96 std::unique_ptr<DriverAPI> api_; /**< device api */
97 std::unique_ptr<MemAllocator> mem_; /**< memory allocator */
98 std::unique_ptr<HostHandler> handler_; /**< host handler */
99 std::unique_ptr<Scheduler> scheduler_; /**< scheduler */
101 CommPlugin& comm_; /**< plugin communicator */
103 dev_type type_; /**< device type */
104 int id_; /**< device id */
105 bool need_model_; /**< indicates whether the device needs model */
106 npu_async_mode mode_; /**< async run mode */
109 std::atomic_flag initialized_;
112 /** @brief Trinity Vision (TRIV) classs */
113 class TrinityVision : public Device {
115 TrinityVision (int id) : Device (NPUCOND_TRIV_CONN_SOCIP, id) {}
118 static size_t manipulateData (const Model *model, uint32_t idx, bool is_input,
119 void *dst, void *src, size_t size);
121 Model * registerModel (const generic_buffer *model_buf) {
122 Model *model = mem_->allocModel ();
123 if (model == nullptr) {
124 logerr (TAG, "Failed to allocate model\n");
129 if (model_buf->type == BUFFER_DMABUF) {
130 model->setDmabuf (model_buf->dmabuf);
131 model->setOffset (model_buf->offset);
132 model->setSize (model_buf->size);
134 status = model->alloc (model_buf->size);
136 logerr (TAG, "Failed to allocate model: %d\n", status);
140 status = comm_.extractGenericBuffer (model_buf, model->getData(), nullptr);
142 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
147 status = model->setMetadata (model->getData());
151 model_config_t config;
152 config.dmabuf_id = model->getDmabuf();
153 config.program_size = model->getMetadata()->getProgramSize();
154 config.program_offset_addr = model->getOffset() + model->getMetadata()->getMetaSize();
155 config.weight_offset_addr = config.program_offset_addr + config.program_size;
157 status = api_->setModel (&config);
168 Buffer * prepareInputBuffers (const Model *model, const input_buffers *input) {
169 const Metadata *meta = model->getMetadata();
170 const generic_buffer *first = &input->bufs[0];
172 if (meta->getInputNum() != input->num_buffers)
175 Buffer * buffer = mem_->allocBuffer ();
176 if (buffer != nullptr) {
179 if (first->type == BUFFER_DMABUF) {
180 buffer->setDmabuf (first->dmabuf);
181 buffer->setOffset (first->offset);
182 buffer->setSize (meta->getBufferSize());
184 status = buffer->alloc (meta->getBufferSize ());
186 logerr (TAG, "Failed to allocate buffer: %d\n", status);
191 status = buffer->createTensors (meta);
193 logerr (TAG, "Failed to allocate tensors: %s\n", status);
205 int run (npu_input_opmode opmode, const Model *model,
206 const input_buffers *input, npuOutputNotify cb, void *cb_data,
207 uint64_t *sequence) {
208 if (opmode != NPUINPUT_HOST)
211 Buffer *buffer = prepareInputBuffers (model, input);
212 if (buffer == nullptr)
215 for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
216 auto func = std::bind (TrinityVision::manipulateData, model, idx, true,
217 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
218 int status = comm_.extractGenericBuffer (&input->bufs[idx],
219 buffer->getInputTensor(idx)->getData(), func);
221 logerr (TAG, "Failed to feed input buffer: %d\n", status);
226 /** this device uses CMA buffer */
228 Request *req = new Request (opmode);
229 req->setModel (model);
230 req->setBuffer (buffer);
231 req->setCallback (std::bind (&TrinityVision::callback, this, req, cb, cb_data));
234 *sequence = req->getID();
236 return scheduler_->submitRequest (req);
239 void callback (Request *req, npuOutputNotify cb, void *cb_data) {
240 const Model *model = req->getModel ();
241 Buffer *buffer = req->getBuffer ();
242 output_buffers output = {
243 .num_buffers = buffer->getOutputNum ()
246 for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
247 uint32_t output_tensor_size = model->getOutputTensorSize (idx);
249 output.bufs[idx].type = BUFFER_MAPPED;
250 output.bufs[idx].size = output_tensor_size;
251 /** user needs to free this */
252 output.bufs[idx].addr = malloc (output_tensor_size);
254 auto func = std::bind (TrinityVision::manipulateData, model, idx, false,
255 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
256 int status = comm_.insertGenericBuffer (buffer->getOutputTensor(idx)->getData(),
257 &output.bufs[idx], func);
259 logerr (TAG, "Failed to return output buffer: %d\n", status);
263 cb (&output, req->getID(), cb_data);
267 /** @brief Trinity Vision2 (TRIV2) classs */
268 class TrinityVision2 : public Device {
270 TrinityVision2 (int id) : Device (NPUCOND_TRIV2_CONN_SOCIP, id) {}
271 ~TrinityVision2 () {}
273 static size_t manipulateData (const Model *model, uint32_t idx, bool is_input,
274 void *dst, void *src, size_t size) {
275 memcpy (dst, src, size);
279 Model * registerModel (const generic_buffer *model_buf) {
280 /** TODO: model's weight values are stored in segments */
284 int run (npu_input_opmode opmode, const Model *model,
285 const input_buffers *input, npuOutputNotify cb, void *cb_data,
286 uint64_t *sequence) {
287 if (opmode != NPUINPUT_HOST && opmode != NPUINPUT_HW_RECURRING)
290 /** this device uses segment table */
292 Request *req = new Request (opmode);
293 req->setModel (model);
295 req->setSegmentTable (segt);
297 req->setCallback (std::bind (&TrinityVision2::callback, this, req, cb, cb_data));
300 *sequence = req->getID();
302 return scheduler_->submitRequest (req);
305 void callback (Request *req, npuOutputNotify cb, void *cb_data) {
309 /** @brief Trinity Asr (TRIA) classs */
310 class TrinityAsr : public Device {
312 TrinityAsr (int id) : Device (NPUCOND_TRIA_CONN_SOCIP, id, false) {}
315 static size_t manipulateData (const Model *model, uint32_t idx, bool is_input,
316 void *dst, void *src, size_t size) {
317 memcpy (dst, src, size);
321 Model * registerModel (const generic_buffer *model_buf) { return nullptr; }
323 int run (npu_input_opmode opmode, const Model *model,
324 const input_buffers *input, npuOutputNotify cb, void *cb_data,
325 uint64_t *sequence) {
326 if (opmode != NPUINPUT_HOST)
329 /** ASR does not require model and support only a single tensor */
330 const generic_buffer *first_buf = &input->bufs[0];
331 Buffer * buffer = mem_->allocBuffer ();
333 if (first_buf->type == BUFFER_DMABUF) {
334 buffer->setDmabuf (first_buf->dmabuf);
335 buffer->setOffset (first_buf->offset);
336 buffer->setSize (first_buf->size);
338 status = buffer->alloc (first_buf->size);
344 buffer->createTensors ();
346 status = comm_.extractGenericBuffer (first_buf,
347 buffer->getInputTensor(0)->getData(), nullptr);
351 Request *req = new Request (opmode);
352 req->setBuffer (buffer);
353 req->setCallback (std::bind (&TrinityAsr::callback, this, req, cb, cb_data));
356 *sequence = req->getID();
358 return scheduler_->submitRequest (req);
361 void callback (Request *req, npuOutputNotify cb, void *cb_data) {
367 #define do_quantized_memcpy(type) do {\
370 while (idx < num_elems) {\
371 val = ((type *) src)[idx];\
374 val = (val > 255.0) ? 255.0 : 0.0;\
375 ((uint8_t *) dst)[idx++] = (uint8_t) val;\
378 while (idx < num_elems) {\
379 val = *(uint8_t *) src;\
382 ((type *) dst)[idx++] = (type) val;\
383 dst = (void*)(((uint8_t *) dst) + data_size);\
384 src = (void*)(((uint8_t *) src) + 1);\
390 * @brief memcpy during quantization
392 static void memcpy_with_quant (bool quant, data_type type, float scale, uint32_t zero_point,
393 void *dst, const void *src, uint32_t num_elems)
395 double _scale = (double) scale;
396 double _zero_point = (double) zero_point;
398 uint32_t data_size = get_data_size (type);
403 do_quantized_memcpy (int8_t);
405 case DATA_TYPE_UINT8:
406 do_quantized_memcpy (uint8_t);
408 case DATA_TYPE_INT16:
409 do_quantized_memcpy (int16_t);
411 case DATA_TYPE_UINT16:
412 do_quantized_memcpy (uint16_t);
414 case DATA_TYPE_INT32:
415 do_quantized_memcpy (int32_t);
417 case DATA_TYPE_UINT32:
418 do_quantized_memcpy (uint32_t);
420 case DATA_TYPE_INT64:
421 do_quantized_memcpy (int64_t);
423 case DATA_TYPE_UINT64:
424 do_quantized_memcpy (uint64_t);
426 case DATA_TYPE_FLOAT32:
427 do_quantized_memcpy (float);
429 case DATA_TYPE_FLOAT64:
430 do_quantized_memcpy (double);
433 logerr (TAG, "Unsupported datatype %d\n", type);
438 * @brief perform data manipulation
439 * @param[in] model model instance
440 * @param[in] idx tensor index
441 * @param[in] is_input indicate it's input manipulation
442 * @param[out] dst destination buffer
443 * @param[in] src source buffer (feature map)
444 * @param[in] size size to be copied
445 * @return size of memory copy if no error, otherwise zero
447 * @note the input data format should be NHWC
448 * @detail rules for the memory address of activations in NPU HW.
449 * (https://code.sec.samsung.net/confluence/pages/viewpage.action?pageId=146491864)
451 * 1) Special case (depth == 3)
452 * - addr(x,y,z) = addr(0,0,0) + (z) + 3 * (x + width * y)
455 * - addr(x,y,z) = addr(0,0,0) + (z % MPA_L) + MPA_L * (x + width * (y + height * (z / MPA_L)))
457 * Thus, if depth is not a multiple of MPA_L (i.e., 64), zero padding is required
460 TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
461 void *dst, void *src, size_t size)
463 const Metadata *meta = model->getMetadata();
464 const tensor_data_info* info;
465 const uint32_t *dims;
469 /** extract required information from the metadata */
471 if (idx >= meta->getInputNum()) {
472 logerr (TAG, "Wrong information for input tensors in metadata\n");
476 info = model->getInputDataInfo (idx);
477 dims = meta->getInputDims (idx);
478 zero_point = meta->getInputQuantZero (idx);
479 scale = meta->getInputQuantScale (idx);
481 if (idx >= meta->getOutputNum()) {
482 logerr (TAG, "Wrong information for output tensors in metadata\n");
486 info = model->getOutputDataInfo (idx);
487 dims = meta->getOutputDims (idx);
488 zero_point = meta->getOutputQuantZero (idx);
489 scale = meta->getOutputQuantScale (idx);
492 if (info == nullptr) {
493 logerr (TAG, "Unmatched tensors info\n");
497 uint32_t batch = dims[0];
498 uint32_t height = dims[1];
499 uint32_t width = dims[2];
500 uint32_t depth = dims[3];
502 uint32_t data_size = get_data_size (info->type);
503 if (data_size == 0) {
504 logerr (TAG, "Invalid data size\n");
508 bool need_quantization = false;
510 * note that we assume DATA_TYPE_SRNPU is the smallest data type that we consider.
511 * Also, DATA_TYPE_SRNPU and uint8_t may be regarded as the same in the view of apps.
513 if (info->type != DATA_TYPE_SRNPU) {
514 assert (data_size >= get_data_size (DATA_TYPE_SRNPU));
516 if (data_size > get_data_size (DATA_TYPE_SRNPU) ||
517 !(zero_point == DEFAULT_ZERO_POINT && scale == DEFAULT_SCALE))
518 need_quantization = true;
521 /** check data manipulation is required */
522 if (depth != 3 && depth != 64 && info->layout != DATA_LAYOUT_SRNPU) {
523 uint32_t MPA_L = DATA_GRANULARITY;
525 uint32_t std_offset; /* standard offset in NHWC data format */
526 uint32_t npu_offset; /* npu offset in NPU HW data format*/
531 /* @todo we currently support only NHWC */
532 if (info->layout != DATA_LAYOUT_NHWC) {
533 logerr (TAG, "data manipulation is supported for NHWC only\n");
537 for (n = 0; n < batch; n++) {
538 for (h = 0; h < height; h++) {
539 for (w = 0; w < width; w++) {
540 for (d = 0; d < depth; d += MPA_L) {
541 std_offset = d + depth * (w + width * (h + n * height));
542 npu_offset = MPA_L * (w + width * (h + (n + d / MPA_L) * height));
543 slice_size = (depth - d >= MPA_L) ? MPA_L : depth - d;
546 src_offset = std_offset * data_size;
547 dst_offset = npu_offset;
549 src_offset = npu_offset;
550 dst_offset = std_offset * data_size;
553 /* if depth is not a multiple of MPA_L, add zero paddings (not exact values) */
554 if (need_quantization) {
555 memcpy_with_quant (is_input, info->type, scale, zero_point,
556 static_cast<char*>(dst) + dst_offset,
557 static_cast<char*>(src) + src_offset,
561 static_cast<char*>(dst) + dst_offset,
562 static_cast<char*>(src) + src_offset,
569 } else if (need_quantization) {
570 /** depth == 3 || depth == 64; special cases which can directly copy input tensor data */
572 size = size / data_size;
574 memcpy_with_quant (is_input, info->type, scale, zero_point,
577 memcpy (dst, src, size);
586 TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
587 void *dst, void *src, size_t size)
589 memcpy (dst, src, size);
596 * @brief create device instance depending on device type and id
597 * @param[in] type device type
598 * @param[in] id device id
599 * @return device instance
602 Device::createInstance (dev_type type, int id)
604 Device *device = nullptr;
606 switch (type & DEVICETYPE_MASK) {
607 case DEVICETYPE_TRIV:
608 device = new TrinityVision (id);
610 case DEVICETYPE_TRIV2:
611 device = new TrinityVision2 (id);
613 case DEVICETYPE_TRIA:
614 device = new TrinityAsr (id);
620 if (device != nullptr && device->init () != 0) {
628 /** @brief host handler constructor */
629 HostHandler::HostHandler (Device *device)
631 /* ignored as we don't use double buffering anymore, but for backward-compatibility */
632 async_mode_ (NPUASYNC_WAIT)
636 /** @brief host handler destructor */
637 HostHandler::~HostHandler ()
642 * @brief register model from generic buffer
643 * @param[in] model_buf model buffer
644 * @param[out] modelid model id
645 * @return 0 if no error. otherwise a negative errno
648 HostHandler::registerModel (generic_buffer *model_buf, uint32_t *modelid)
650 Model *model = device_->registerModel (model_buf);
651 if (model == nullptr) {
652 logerr (TAG, "Failed to register model\n");
656 int status = models_.insert (model->getID(), model);
658 logerr (TAG, "Failed to insert model id\n");
663 *modelid = model->getID();
668 * @brief remove the registered model
669 * @param[in] modelid model id
670 * @return 0 if no error. otherwise a negative errno
673 HostHandler::unregisterModel (uint32_t modelid)
675 return models_.remove (modelid);
679 * @brief remove all registered models
683 HostHandler::unregisterModels ()
690 * @brief Set the data layout for input/output tensors
691 * @param[in] modelid The ID of model whose layouts are set
692 * @param[in] in the layout/type info for input tensors
693 * @param[in] out the layout/type info for output tensors
694 * @return @c 0 if no error. otherwise a negative error value
695 * @note if this function is not called, default layout/type will be used.
698 HostHandler::setDataInfo (uint32_t modelid, tensors_data_info *in,
699 tensors_data_info *out)
701 Model *model = models_.find (modelid);
702 if (model == nullptr)
705 return model->setDataInfo (in, out);
709 * @brief Set the inference constraint for next NPU inferences
710 * @param[in] modelid The target model id
711 * @param[in] constraint inference constraint (e.g., timeout, priority)
712 * @return @c 0 if no error. otherwise a negative error value
713 * @note If this function is not called, default values are used.
716 HostHandler::setConstraint (uint32_t modelid, npuConstraint constraint)
718 Model *model = models_.find (modelid);
719 if (model == nullptr)
722 model->setConstraint (constraint);
728 * @brief find and return model instance
729 * @param[in] modelid model id
730 * @return model instance if found. otherwise nullptr
733 HostHandler::getModel (uint32_t modelid)
735 return models_.find (modelid);
738 /** @brief dummay callback for runSync. */
741 callbackSync (output_buffers *output) : output_(output), done_(false) {}
743 static void callback (output_buffers *output, uint64_t sequence, void *data) {
744 callbackSync *sync = static_cast<callbackSync *>(data);
745 sync->callback (output, sequence);
748 void callback (output_buffers *output, uint64_t sequence) {
749 /** just copy internal variables of output buffers */
750 memcpy (output_, output, sizeof (output_buffers));
756 std::unique_lock<std::mutex> lock (m_);
757 cv_.wait (lock, [this]() { return done_; });
762 std::condition_variable cv_;
763 output_buffers *output_;
768 * @brief Execute inference. Wait (block) until the output is available.
769 * @param[in] modelid The model to be inferred.
770 * @param[in] input The input data to be inferred.
771 * @param[out] output The output result.
772 * @return @c 0 if no error. otherwise a negative error value
775 HostHandler::runSync (uint32_t modelid, const input_buffers *input,
776 output_buffers *output)
778 callbackSync sync (output);
779 int status = runAsync (modelid, input, callbackSync::callback,
780 static_cast <void*> (&sync), NPUASYNC_DROP_OLD, nullptr);
782 /** sync needs to wait callback */
789 * @brief Invoke NPU inference. Unblocking call.
790 * @param[in] modelid The model to be inferred.
791 * @param[in] input The input data to be inferred.
792 * @param[in] cb The output buffer handler.
793 * @param[in] cb_data The data given as a parameter to the runNPU_async call.
794 * @param[in] mode Configures how this operation works.
795 * @param[out] sequence The sequence number returned with runNPU_async.
796 * @return @c 0 if no error. otherwise a negative error value
799 HostHandler::runAsync (uint32_t modelid, const input_buffers *input,
800 npuOutputNotify cb, void *cb_data, npu_async_mode mode, uint64_t *sequence)
802 Model *model = nullptr;
804 if (device_->needModel()) {
805 model = getModel (modelid);
806 if (model == nullptr)
810 device_->setAsyncMode (mode);
811 return device_->run (NPUINPUT_HOST, model, input, cb, cb_data, sequence);
815 * @brief get number of available devices
816 * @param[in] type device type
817 * @return number of devices
820 HostHandler::getNumDevices (dev_type type)
822 return DriverAPI::getNumDevices (type);
826 * @brief get device instance
827 * @param[out] dev device instance
828 * @param[in] type device type
829 * @param[in] id device id
830 * @return 0 if no error. otherwise a negative errno
833 HostHandler::getDevice (npudev_h *dev, dev_type type, uint32_t id)
835 int num_devices = getNumDevices (type);
837 /** check the validity of device id */
838 if (!(num_devices > 0 && id < static_cast<uint32_t>(num_devices))) {
839 logerr (TAG, "Invalid arguments provided\n");
843 Device *device = Device::createInstance (type, id);
844 if (device == nullptr) {
845 logerr (TAG, "Failed to create a device with the given type\n");
850 /** This is just for backward-compatility; we don't guarantee its corresness */
857 * @brief allocate generic buffer (just for users)
858 * @param[out] buffer buffer instance
859 * @return 0 if no error. otherwise a negative errno
862 HostHandler::allocGenericBuffer (generic_buffer *buffer)
867 if (buffer->size > UINT32_MAX) {
868 logerr (TAG, "Don't support such a large size");
872 if (buffer->type == BUFFER_FILE) {
874 if (buffer->filepath == nullptr)
877 /* now, npu-engine always provides dmabuf-based allocation */
878 HWmem *hwmem = device_->allocMemory ();
879 if (hwmem == nullptr || hwmem->alloc (buffer->size) < 0)
882 buffer->dmabuf = hwmem->getDmabuf();
883 buffer->offset = hwmem->getOffset();
884 buffer->addr = hwmem->getData();
890 * @brief deallocate generic buffer (just for users)
891 * @param[in] buffer buffer instance
892 * @return 0 if no error. otherwise a negative errno
895 HostHandler::deallocGenericBuffer (generic_buffer *buffer)
900 if (buffer->type != BUFFER_FILE)
901 device_->deallocMemory (buffer->dmabuf);
907 * @brief allocate multiple generic buffers (just for users)
908 * @param[out] buffers multi-buffer instance
909 * @return 0 if no error. otherwise a negative errno
912 HostHandler::allocGenericBuffer (generic_buffers *buffers)
914 if (buffers == NULL || buffers->num_buffers < 1)
917 buffer_types type = buffers->bufs[0].type;
918 if (type == BUFFER_FILE)
921 uint64_t total_size = 0;
922 for (uint32_t idx = 0; idx < buffers->num_buffers; idx++)
923 total_size += buffers->bufs[idx].size;
925 uint64_t first_size = buffers->bufs[0].size;
926 buffers->bufs[0].size = total_size;
927 int status = allocGenericBuffer (&buffers->bufs[0]);
931 uint64_t offset = first_size;
932 for (uint32_t idx = 1; idx < buffers->num_buffers; idx++) {
933 buffers->bufs[idx].dmabuf = buffers->bufs[0].dmabuf;
934 buffers->bufs[idx].offset = buffers->bufs[0].offset + offset;
935 buffers->bufs[idx].type = type;
937 offset += buffers->bufs[idx].size;
944 * @brief deallocate multiple generic buffers (just for users)
945 * @param[in] buffers multi-buffer instance
946 * @return 0 if no error. otherwise a negative errno
949 HostHandler::deallocGenericBuffer (generic_buffers *buffers)
951 if (buffers == NULL || buffers->num_buffers < 1)
954 return deallocGenericBuffer (&buffers->bufs[0]);
957 /** just for backward-compatability */
958 npudev_h HostHandler::latest_dev_ = nullptr;
960 /** implementation of libnpuhost APIs */
963 * @brief Returns the number of available NPU devices.
964 * @return @c The number of NPU devices.
965 * @retval 0 if no NPU devices available. if positive (number of NPUs) if NPU devices available. otherwise, a negative error value.
966 * @note the caller should call putNPUdevice() to release the device handle
968 int getnumNPUdeviceByType (dev_type type)
970 return HostHandler::getNumDevices (type);
974 * @brief Returns the handle of the chosen NPU devices.
975 * @param[out] dev The NPU device handle
976 * @param[in] id The NPU id to get the handle. 0 <= id < getnumNPUdeviceByType().
977 * @return @c 0 if no error. otherwise a negative error value
978 * @note the caller should call putNPUdevice() to release the device handle
980 int getNPUdeviceByType (npudev_h *dev, dev_type type, uint32_t id)
982 return HostHandler::getDevice (dev, type, id);
986 * @brief Returns the handle of an NPU device meeting the condition
987 * @param[out] dev The NPU device handle
988 * @param[in] cond The condition for device search.
989 * @return @c 0 if no error. otherwise a negative error value
990 * @note the caller should call putNPUdevice() to release the device handle
991 * @note it's not supported yet
993 int getNPUdeviceByCondition(npudev_h *dev, const npucondition *cond)
995 /** not implmeneted yet */
996 return getNPUdeviceByType (dev, NPUCOND_TRIV_CONN_SOCIP, 0);
1000 * @brief release the NPU device instance obtained by getDevice ()
1001 * @param[in] dev the NPU device handle
1003 void putNPUdevice (npudev_h dev)
1006 delete static_cast<Device *> (dev);
1010 * @brief Send the NN model to NPU.
1011 * @param[in] dev The NPU device handle
1012 * @param[in] modelfile The filepath to the compiled NPU NN model in any buffer_type
1013 * @param[out] modelid The modelid allocated for this instance of NN model.
1014 * @return @c 0 if no error. otherwise a negative error value
1016 * @detail For ASR devices, which do not accept models, but have models
1017 * embedded in devices, you do not need to call register and
1018 * register calls for ASR are ignored.
1020 * @todo Add a variation: in-memory model register.
1022 int registerNPUmodel (npudev_h dev, generic_buffer *modelfile, uint32_t *modelid)
1024 INIT_HOST_HANDLER (host_handler, dev);
1026 return host_handler->registerModel (modelfile, modelid);
1030 * @brief Remove the NN model from NPU
1031 * @param[in] dev The NPU device handle
1032 * @param[in] modelid The model to be removed from the NPU.
1033 * @return @c 0 if no error. otherwise a negative error value
1034 * @detail This may incur some latency with memory compatcion.
1036 int unregisterNPUmodel(npudev_h dev, uint32_t modelid)
1038 INIT_HOST_HANDLER (host_handler, dev);
1040 return host_handler->unregisterModel (modelid);
1044 * @brief Remove all NN models from NPU
1045 * @param[in] dev The NPU device handle
1046 * @return @c 0 if no error. otherwise a negative error value
1048 int unregisterNPUmodel_all(npudev_h dev)
1050 INIT_HOST_HANDLER (host_handler, dev);
1052 return host_handler->unregisterModels ();
1056 * @brief [OPTIONAL] Set the data layout for input/output tensors
1057 * @param[in] dev The NPU device handle
1058 * @param[in] modelid The ID of model whose layouts are set
1059 * @param[in] info_in the layout/type info for input tensors
1060 * @param[in] info_out the layout/type info for output tensors
1061 * @return @c 0 if no error. otherwise a negative error value
1062 * @note if this function is not called, default layout/type will be used.
1064 int setNPU_dataInfo(npudev_h dev, uint32_t modelid,
1065 tensors_data_info *info_in, tensors_data_info *info_out)
1067 INIT_HOST_HANDLER (host_handler, dev);
1069 return host_handler->setDataInfo (modelid, info_in, info_out);
1073 * @brief [OPTIONAL] Set the inference constraint for next NPU inferences
1074 * @param[in] dev The NPU device handle
1075 * @param[in] modelid The target model id
1076 * @param[in] constraint inference constraint (e.g., timeout, priority)
1077 * @return @c 0 if no error. otherwise a negative error value
1078 * @note If this function is not called, default values are used.
1080 int setNPU_constraint(npudev_h dev, uint32_t modelid, npuConstraint constraint)
1082 INIT_HOST_HANDLER (host_handler, dev);
1084 return host_handler->setConstraint (modelid, constraint);
1088 * @brief Execute inference. Wait (block) until the output is available.
1089 * @param[in] dev The NPU device handle
1090 * @param[in] modelid The model to be inferred.
1091 * @param[in] input The input data to be inferred.
1092 * @param[out] output The output result. The caller MUST allocate appropriately before calling this.
1093 * @return @c 0 if no error. otherwise a negative error value
1095 * @detail This is a syntactic sugar of runNPU_async().
1096 * CAUTION: There is a memcpy for the output buffer.
1098 int runNPU_sync(npudev_h dev, uint32_t modelid, const input_buffers *input,
1099 output_buffers *output)
1101 INIT_HOST_HANDLER (host_handler, dev);
1103 return host_handler->runSync (modelid, input, output);
1107 * @brief Invoke NPU inference. Unblocking call.
1108 * @param[in] dev The NPU device handle
1109 * @param[in] modelid The model to be inferred.
1110 * @param[in] input The input data to be inferred.
1111 * @param[in] cb The output buffer handler.
1112 * @param[out] sequence The sequence number returned with runNPU_async.
1113 * @param[in] data The data given as a parameter to the runNPU_async call.
1114 * @param[in] mode Configures how this operation works.
1115 * @return @c 0 if no error. otherwise a negative error value
1117 int runNPU_async(npudev_h dev, uint32_t modelid, const input_buffers *input,
1118 npuOutputNotify cb, uint64_t *sequence, void *data,
1119 npu_async_mode mode)
1121 INIT_HOST_HANDLER (host_handler, dev);
1123 return host_handler->runAsync (modelid, input, cb, data, mode, sequence);
1127 * @brief Allocate a buffer for NPU model with the requested buffer type.
1128 * @param[in] dev The NPU device handle
1129 * @param[in/out] Buffer the buffer pointer where memory is allocated.
1130 * @return 0 if no error, otherwise a negative errno.
1132 int allocNPU_modelBuffer (npudev_h dev, generic_buffer *buffer)
1134 INIT_HOST_HANDLER (host_handler, dev);
1136 return host_handler->allocGenericBuffer (buffer);
1140 * @brief Free the buffer and remove the address mapping.
1141 * @param[in] dev The NPU device handle
1142 * @param[in] buffer the model buffer
1143 * @return 0 if no error, otherwise a negative errno.
1145 int cleanNPU_modelBuffer (npudev_h dev, generic_buffer *buffer)
1147 INIT_HOST_HANDLER (host_handler, dev);
1149 return host_handler->deallocGenericBuffer (buffer);
1153 * @brief Allocate a buffer for NPU input with the requested buffer type.
1154 * @param[in] dev The NPU device handle
1155 * @param[in/out] Buffer the buffer pointer where memory is allocated.
1156 * @return 0 if no error, otherwise a negative errno.
1157 * @note please utilize allocInputBuffers() for multiple input tensors because subsequent
1158 * calls of allocInputBuffer() don't gurantee contiguous allocations between them.
1160 int allocNPU_inputBuffer (npudev_h dev, generic_buffer *buffer)
1162 INIT_HOST_HANDLER (host_handler, dev);
1164 return host_handler->allocGenericBuffer (buffer);
1168 * @brief Free the buffer and remove the address mapping.
1169 * @param[in] dev The NPU device handle
1170 * @param[in] buffer the input buffer
1171 * @return 0 if no error, otherwise a negative errno.
1173 int cleanNPU_inputBuffer (npudev_h dev, generic_buffer *buffer)
1175 INIT_HOST_HANDLER (host_handler, dev);
1177 return host_handler->deallocGenericBuffer (buffer);
1181 * @brief Allocate input buffers, which have multiple instances of generic_buffer
1182 * @param[in] dev The NPU device handle
1183 * @param[in/out] input input buffers.
1184 * @return 0 if no error, otherwise a negative errno.
1185 * @note it reuses allocInputBuffer().
1186 * @details in case of BUFFER_DMABUF, this function can be used to gurantee physically-contiguous
1187 * memory mapping for multiple tensors (in a single inference, not batch size).
1189 int allocNPU_inputBuffers (npudev_h dev, input_buffers * input)
1191 INIT_HOST_HANDLER (host_handler, dev);
1193 return host_handler->allocGenericBuffer (input);
1197 * @brief Free input buffers allocated by allocInputBuffers().
1198 * @param[in] dev The NPU device handle
1199 * @param[in/out] input input buffers.
1200 * @note it reuses cleanInputbuffer().
1201 * @return 0 if no error, otherwise a negative errno.
1203 int cleanNPU_inputBuffers (npudev_h dev, input_buffers * input)
1205 INIT_HOST_HANDLER (host_handler, dev);
1207 return host_handler->deallocGenericBuffer (input);
1211 * @brief Get metadata for NPU model
1212 * @param[in] model The path of model binary file
1213 * @param[in] need_extra whether you want to extract the extra data in metadata
1214 * @return the metadata structure to be filled if no error, otherwise nullptr
1216 * @note For most npu-engine users, the extra data is not useful because it will be
1217 * used for second-party users (e.g., compiler, simulator).
1218 * Also, the caller needs to free the metadata.
1220 * @note the caller needs to free the metadata
1222 npubin_meta * getNPUmodel_metadata (const char *model, bool need_extra)
1231 fp = fopen (model, "rb");
1233 logerr (TAG, "Failed to open the model binary: %d\n", -errno);
1237 meta = (npubin_meta *) malloc (NPUBIN_META_SIZE);
1239 logerr (TAG, "Failed to allocate metadata\n");
1243 ret = fread (meta, 1, NPUBIN_META_SIZE, fp);
1244 if (ret != NPUBIN_META_SIZE) {
1245 logerr (TAG, "Failed to read the metadata\n");
1249 if (!CHECK_NPUBIN (meta->magiccode)) {
1250 logerr (TAG, "Invalid metadata provided\n");
1254 if (need_extra && NPUBIN_META_EXTRA (meta->magiccode) > 0) {
1255 npubin_meta *new_meta;
1257 new_meta = (npubin_meta *) realloc (meta, NPUBIN_META_TOTAL_SIZE(meta->magiccode));
1259 logerr (TAG, "Failed to allocate extra metadata\n");
1263 ret = fread (new_meta->reserved_extra, 1, NPUBIN_META_EXTRA_SIZE (meta->magiccode), fp);
1264 if (ret != NPUBIN_META_EXTRA_SIZE (meta->magiccode)) {
1265 logerr (TAG, "Invalid extra metadata provided\n");
1285 /** deprecated buffer APIs; please use the above APIs */
1288 * @brief Returns the number of NPU devices (TRIV).
1290 int getnumNPUdevice (void)
1292 logwarn (TAG, "deprecated. Please use getnumNPUdeviceByType ()\n");
1293 return getnumNPUdeviceByType (NPUCOND_TRIV_CONN_SOCIP);
1297 * @brief Returns the list of ASR devices (TRIA)
1299 int getnumASRdevice (void)
1301 logwarn (TAG, "deprecated. Please use getnumNPUdeviceByType ()\n");
1302 return getnumNPUdeviceByType (NPUCOND_TRIA_CONN_SOCIP);
1306 * @brief Returns the handle of the chosen TRIV device.
1308 int getNPUdevice (npudev_h *dev, uint32_t id)
1310 logwarn (TAG, "deprecated. Please use getNPUdeviceByType ()\n");
1311 return getNPUdeviceByType (dev, NPUCOND_TRIV_CONN_SOCIP, id);
1315 * @brief Returns the handle of the chosen TRIA device.
1317 int getASRdevice (npudev_h *dev, uint32_t id)
1319 logwarn (TAG, "deprecated. Please use getNPUdeviceByType ()\n");
1320 return getNPUdeviceByType (dev, NPUCOND_TRIA_CONN_SOCIP, id);
1323 /** @brief deprecated */
1324 int allocModelBuffer (generic_buffer *buffer)
1326 logwarn (TAG, "deprecated. Please use allocNPU_modelBuffer\n");
1327 return allocNPU_modelBuffer (HostHandler::getLatestDevice(), buffer);
1330 /** @brief deprecated */
1331 int cleanModelBuffer (generic_buffer *buffer)
1333 logwarn (TAG, "deprecated. Please use cleanNPU_modelBuffer\n");
1334 return allocNPU_modelBuffer (HostHandler::getLatestDevice(), buffer);
1337 /** @brief deprecated */
1338 int allocInputBuffer (generic_buffer *buffer)
1340 logwarn (TAG, "deprecated. Please use allocNPU_inputBuffer\n");
1341 return allocNPU_inputBuffer (HostHandler::getLatestDevice(), buffer);
1344 /** @brief deprecated */
1345 int cleanInputBuffer (generic_buffer *buffer)
1347 logwarn (TAG, "deprecated. Please use cleanNPU_inputBuffer\n");
1348 return cleanNPU_inputBuffer (HostHandler::getLatestDevice(), buffer);
1351 /** @brief deprecated */
1352 int allocInputBuffers (input_buffers * input)
1354 logwarn (TAG, "deprecated. Please use allocNPU_inputBuffers\n");
1355 return allocNPU_inputBuffers (HostHandler::getLatestDevice(), input);
1358 /** @brief deprecated */
1359 int cleanInputBuffers (input_buffers * input)
1361 logwarn (TAG, "deprecated. Please use cleanNPU_inputBuffers\n");
1362 return cleanNPU_inputBuffers (HostHandler::getLatestDevice(), input);
1365 /** @brief deprecated */
1366 int allocNPUBuffer (uint64_t size, buffer_types type,
1367 const char * filepath, generic_buffer *buffer)
1370 buffer->size = size;
1371 buffer->type = type;
1372 buffer->filepath = filepath;
1375 logwarn (TAG, "deprecated. Please use allocNPU_* APIs\n");
1376 return allocModelBuffer (buffer);
1379 /** @brief deprecated */
1380 int cleanNPUBuffer (generic_buffer * buffer)
1382 logwarn (TAG, "deprecated. Please use cleanNPU_* APIs\n");
1383 return cleanModelBuffer (buffer);