3 * Copyright (C) 2020 Samsung Electronics
4 * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
7 * @file ne-host-handler.cc
9 * @brief Implementation of APIs to access NPU from Host
10 * @see https://code.sec.samsung.net/confluence/display/ODLC/2020+Overall+Software+Stack
11 * @author Dongju Chae <dongju.chae@samsung.com>
12 * @bug No known bugs except for NYI items
15 #include "ne-handler.h"
17 #include <libnpuhost.h>
18 #include <npubinfmt.h>
19 #include <NPUdrvAPI.h>
20 #include <CommPlugin.h>
25 #include <condition_variable>
32 #define INIT_HOST_HANDLER(handler, dev) \
33 Device *tdev = static_cast <Device *> (dev); \
34 if (tdev == nullptr) return -EINVAL; \
35 HostHandler *handler = tdev->getHostHandler (); \
36 if (handler == nullptr) return -EINVAL;
38 /** just for backward-compatability */
39 npudev_h HostHandler::latest_dev_ = nullptr;
41 /** implement libnpuhost APIs */
44 * @brief Returns the number of available NPU devices.
45 * @return @c The number of NPU devices.
46 * @retval 0 if no NPU devices available. if positive (number of NPUs) if NPU devices available. otherwise, a negative error value.
47 * @note the caller should call putNPUdevice() to release the device handle
49 int getnumNPUdeviceByType (dev_type type)
51 return HostHandler::getNumDevices (type);
55 * @brief Returns the handle of the chosen NPU devices.
56 * @param[out] dev The NPU device handle
57 * @param[in] id The NPU id to get the handle. 0 <= id < getnumNPUdeviceByType().
58 * @return @c 0 if no error. otherwise a negative error value
59 * @note the caller should call putNPUdevice() to release the device handle
61 int getNPUdeviceByType (npudev_h *dev, dev_type type, uint32_t id)
63 return HostHandler::getDevice (dev, type, id);
67 * @brief Returns the handle of an NPU device meeting the condition
68 * @param[out] dev The NPU device handle
69 * @param[in] cond The condition for device search.
70 * @return @c 0 if no error. otherwise a negative error value
71 * @note the caller should call putNPUdevice() to release the device handle
72 * @note it's not supported yet
74 int getNPUdeviceByCondition(npudev_h *dev, const npucondition *cond)
76 /** not implmeneted yet */
77 return getNPUdeviceByType (dev, NPUCOND_TRIV_CONN_SOCIP, 0);
81 * @brief release the NPU device instance obtained by getDevice ()
82 * @param[in] dev the NPU device handle
84 void putNPUdevice (npudev_h dev)
87 delete static_cast<Device *> (dev);
91 * @brief Send the NN model to NPU.
92 * @param[in] dev The NPU device handle
93 * @param[in] modelfile The filepath to the compiled NPU NN model in any buffer_type
94 * @param[out] modelid The modelid allocated for this instance of NN model.
95 * @return @c 0 if no error. otherwise a negative error value
97 * @detail For ASR devices, which do not accept models, but have models
98 * embedded in devices, you do not need to call register and
99 * register calls for ASR are ignored.
101 * @todo Add a variation: in-memory model register.
103 int registerNPUmodel (npudev_h dev, generic_buffer *modelfile, uint32_t *modelid)
105 INIT_HOST_HANDLER (host_handler, dev);
107 return host_handler->registerModel (modelfile, modelid);
111 * @brief Remove the NN model from NPU
112 * @param[in] dev The NPU device handle
113 * @param[in] modelid The model to be removed from the NPU.
114 * @return @c 0 if no error. otherwise a negative error value
115 * @detail This may incur some latency with memory compatcion.
117 int unregisterNPUmodel(npudev_h dev, uint32_t modelid)
119 INIT_HOST_HANDLER (host_handler, dev);
121 return host_handler->unregisterModel (modelid);
125 * @brief Remove all NN models from NPU
126 * @param[in] dev The NPU device handle
127 * @return @c 0 if no error. otherwise a negative error value
129 int unregisterNPUmodel_all(npudev_h dev)
131 INIT_HOST_HANDLER (host_handler, dev);
133 return host_handler->unregisterModels ();
137 * @brief [OPTIONAL] Set the data layout for input/output tensors
138 * @param[in] dev The NPU device handle
139 * @param[in] modelid The ID of model whose layouts are set
140 * @param[in] info_in the layout/type info for input tensors
141 * @param[in] info_out the layout/type info for output tensors
142 * @return @c 0 if no error. otherwise a negative error value
143 * @note if this function is not called, default layout/type will be used.
145 int setNPU_dataInfo(npudev_h dev, uint32_t modelid,
146 tensors_data_info *info_in, tensors_data_info *info_out)
148 INIT_HOST_HANDLER (host_handler, dev);
150 return host_handler->setDataInfo (modelid, info_in, info_out);
154 * @brief [OPTIONAL] Set the inference constraint for next NPU inferences
155 * @param[in] dev The NPU device handle
156 * @param[in] modelid The target model id
157 * @param[in] constraint inference constraint (e.g., timeout, priority)
158 * @return @c 0 if no error. otherwise a negative error value
159 * @note If this function is not called, default values are used.
161 int setNPU_constraint(npudev_h dev, uint32_t modelid, npuConstraint constraint)
163 INIT_HOST_HANDLER (host_handler, dev);
165 return host_handler->setConstraint (modelid, constraint);
169 * @brief Execute inference. Wait (block) until the output is available.
170 * @param[in] dev The NPU device handle
171 * @param[in] modelid The model to be inferred.
172 * @param[in] input The input data to be inferred.
173 * @param[out] output The output result. The caller MUST allocate appropriately before calling this.
174 * @return @c 0 if no error. otherwise a negative error value
176 * @detail This is a syntactic sugar of runNPU_async().
177 * CAUTION: There is a memcpy for the output buffer.
179 int runNPU_sync(npudev_h dev, uint32_t modelid, const input_buffers *input,
180 output_buffers *output)
182 INIT_HOST_HANDLER (host_handler, dev);
184 return host_handler->runSync (modelid, input, output);
188 * @brief Invoke NPU inference. Unblocking call.
189 * @param[in] dev The NPU device handle
190 * @param[in] modelid The model to be inferred.
191 * @param[in] input The input data to be inferred.
192 * @param[in] cb The output buffer handler.
193 * @param[out] sequence The sequence number returned with runNPU_async.
194 * @param[in] data The data given as a parameter to the runNPU_async call.
195 * @param[in] mode Configures how this operation works.
196 * @return @c 0 if no error. otherwise a negative error value
198 int runNPU_async(npudev_h dev, uint32_t modelid, const input_buffers *input,
199 npuOutputNotify cb, uint64_t *sequence, void *data,
202 INIT_HOST_HANDLER (host_handler, dev);
204 return host_handler->runAsync (modelid, input, cb, data, mode, sequence);
208 * @brief Allocate a buffer for NPU model with the requested buffer type.
209 * @param[in] dev The NPU device handle
210 * @param[in/out] Buffer the buffer pointer where memory is allocated.
211 * @return 0 if no error, otherwise a negative errno.
213 int allocNPU_modelBuffer (npudev_h dev, generic_buffer *buffer)
215 INIT_HOST_HANDLER (host_handler, dev);
217 return host_handler->allocGenericBuffer (buffer);
221 * @brief Free the buffer and remove the address mapping.
222 * @param[in] dev The NPU device handle
223 * @param[in] buffer the model buffer
224 * @return 0 if no error, otherwise a negative errno.
226 int cleanNPU_modelBuffer (npudev_h dev, generic_buffer *buffer)
228 INIT_HOST_HANDLER (host_handler, dev);
230 return host_handler->deallocGenericBuffer (buffer);
234 * @brief Allocate a buffer for NPU input with the requested buffer type.
235 * @param[in] dev The NPU device handle
236 * @param[in/out] Buffer the buffer pointer where memory is allocated.
237 * @return 0 if no error, otherwise a negative errno.
238 * @note please utilize allocInputBuffers() for multiple input tensors because subsequent
239 * calls of allocInputBuffer() don't gurantee contiguous allocations between them.
241 int allocNPU_inputBuffer (npudev_h dev, generic_buffer *buffer)
243 INIT_HOST_HANDLER (host_handler, dev);
245 return host_handler->allocGenericBuffer (buffer);
249 * @brief Free the buffer and remove the address mapping.
250 * @param[in] dev The NPU device handle
251 * @param[in] buffer the input buffer
252 * @return 0 if no error, otherwise a negative errno.
254 int cleanNPU_inputBuffer (npudev_h dev, generic_buffer *buffer)
256 INIT_HOST_HANDLER (host_handler, dev);
258 return host_handler->deallocGenericBuffer (buffer);
262 * @brief Allocate input buffers, which have multiple instances of generic_buffer
263 * @param[in] dev The NPU device handle
264 * @param[in/out] input input buffers.
265 * @return 0 if no error, otherwise a negative errno.
266 * @note it reuses allocInputBuffer().
267 * @details in case of BUFFER_DMABUF, this function can be used to gurantee physically-contiguous
268 * memory mapping for multiple tensors (in a single inference, not batch size).
270 int allocNPU_inputBuffers (npudev_h dev, input_buffers * input)
272 INIT_HOST_HANDLER (host_handler, dev);
274 return host_handler->allocGenericBuffer (input);
278 * @brief Free input buffers allocated by allocInputBuffers().
279 * @param[in] dev The NPU device handle
280 * @param[in/out] input input buffers.
281 * @note it reuses cleanInputbuffer().
282 * @return 0 if no error, otherwise a negative errno.
284 int cleanNPU_inputBuffers (npudev_h dev, input_buffers * input)
286 INIT_HOST_HANDLER (host_handler, dev);
288 return host_handler->deallocGenericBuffer (input);
292 * @brief get the current memory status for the given device
293 * @param[in] dev The NPU device handle
294 * @param[out] alloc_total The size of allocated memory until now
295 * @param[out] free_total The size of freed memory until now
296 * @return @c 0 if no error. otherwise a negatice error value
298 int getNPU_memoryStatus(npudev_h dev, size_t *alloc_total, size_t *free_total)
300 INIT_HOST_HANDLER (host_handler, dev);
302 return host_handler->getMemoryStatus (alloc_total, free_total);
306 * @brief Get metadata for NPU model
307 * @param[in] model The path of model binary file
308 * @param[in] need_extra whether you want to extract the extra data in metadata
309 * @return the metadata structure to be filled if no error, otherwise nullptr
311 * @note For most npu-engine users, the extra data is not useful because it will be
312 * used for second-party users (e.g., compiler, simulator).
313 * Also, the caller needs to free the metadata.
315 * @note the caller needs to free the metadata
317 npubin_meta * getNPUmodel_metadata (const char *model, bool need_extra)
326 fp = fopen (model, "rb");
328 logerr (TAG, "Failed to open the model binary: %d\n", -errno);
332 meta = (npubin_meta *) malloc (NPUBIN_META_SIZE);
334 logerr (TAG, "Failed to allocate metadata\n");
338 ret = fread (meta, 1, NPUBIN_META_SIZE, fp);
339 if (ret != NPUBIN_META_SIZE) {
340 logerr (TAG, "Failed to read the metadata\n");
344 if (!CHECK_NPUBIN (meta->magiccode)) {
345 logerr (TAG, "Invalid metadata provided\n");
349 if (need_extra && NPUBIN_META_EXTRA (meta->magiccode) > 0) {
350 npubin_meta *new_meta;
352 new_meta = (npubin_meta *) realloc (meta, NPUBIN_META_TOTAL_SIZE(meta->magiccode));
354 logerr (TAG, "Failed to allocate extra metadata\n");
358 ret = fread (new_meta->reserved_extra, 1, NPUBIN_META_EXTRA_SIZE (meta->magiccode), fp);
359 if (ret != NPUBIN_META_EXTRA_SIZE (meta->magiccode)) {
360 logerr (TAG, "Invalid extra metadata provided\n");
380 /** deprecated buffer APIs; please use the above APIs */
383 * @brief Returns the number of NPU devices (TRIV).
385 int getnumNPUdevice (void)
387 logwarn (TAG, "deprecated. Please use getnumNPUdeviceByType ()\n");
388 return getnumNPUdeviceByType (NPUCOND_TRIV_CONN_SOCIP);
392 * @brief Returns the list of ASR devices (TRIA)
394 int getnumASRdevice (void)
396 logwarn (TAG, "deprecated. Please use getnumNPUdeviceByType ()\n");
397 return getnumNPUdeviceByType (NPUCOND_TRIA_CONN_SOCIP);
401 * @brief Returns the handle of the chosen TRIV device.
403 int getNPUdevice (npudev_h *dev, uint32_t id)
405 logwarn (TAG, "deprecated. Please use getNPUdeviceByType ()\n");
406 return getNPUdeviceByType (dev, NPUCOND_TRIV_CONN_SOCIP, id);
410 * @brief Returns the handle of the chosen TRIA device.
412 int getASRdevice (npudev_h *dev, uint32_t id)
414 logwarn (TAG, "deprecated. Please use getNPUdeviceByType ()\n");
415 return getNPUdeviceByType (dev, NPUCOND_TRIA_CONN_SOCIP, id);
418 /** @brief deprecated */
419 int allocModelBuffer (generic_buffer *buffer)
421 logwarn (TAG, "deprecated. Please use allocNPU_modelBuffer\n");
422 return allocNPU_modelBuffer (HostHandler::getLatestDevice(), buffer);
425 /** @brief deprecated */
426 int cleanModelBuffer (generic_buffer *buffer)
428 logwarn (TAG, "deprecated. Please use cleanNPU_modelBuffer\n");
429 return allocNPU_modelBuffer (HostHandler::getLatestDevice(), buffer);
432 /** @brief deprecated */
433 int allocInputBuffer (generic_buffer *buffer)
435 logwarn (TAG, "deprecated. Please use allocNPU_inputBuffer\n");
436 return allocNPU_inputBuffer (HostHandler::getLatestDevice(), buffer);
439 /** @brief deprecated */
440 int cleanInputBuffer (generic_buffer *buffer)
442 logwarn (TAG, "deprecated. Please use cleanNPU_inputBuffer\n");
443 return cleanNPU_inputBuffer (HostHandler::getLatestDevice(), buffer);
446 /** @brief deprecated */
447 int allocInputBuffers (input_buffers * input)
449 logwarn (TAG, "deprecated. Please use allocNPU_inputBuffers\n");
450 return allocNPU_inputBuffers (HostHandler::getLatestDevice(), input);
453 /** @brief deprecated */
454 int cleanInputBuffers (input_buffers * input)
456 logwarn (TAG, "deprecated. Please use cleanNPU_inputBuffers\n");
457 return cleanNPU_inputBuffers (HostHandler::getLatestDevice(), input);
460 /** @brief deprecated */
461 int allocNPUBuffer (uint64_t size, buffer_types type,
462 const char * filepath, generic_buffer *buffer)
467 buffer->filepath = filepath;
470 logwarn (TAG, "deprecated. Please use allocNPU_* APIs\n");
471 return allocModelBuffer (buffer);
474 /** @brief deprecated */
475 int cleanNPUBuffer (generic_buffer * buffer)
477 logwarn (TAG, "deprecated. Please use cleanNPU_* APIs\n");
478 return cleanModelBuffer (buffer);
481 /** implement methods of HostHandler class */
483 /** @brief host handler constructor */
484 HostHandler::HostHandler (Device *device)
486 /* ignored as we don't use double buffering anymore, but for backward-compatibility */
487 async_mode_ (NPUASYNC_WAIT)
491 /** @brief host handler destructor */
492 HostHandler::~HostHandler ()
497 * @brief register model from generic buffer
498 * @param[in] model_buf model buffer
499 * @param[out] modelid model id
500 * @return 0 if no error. otherwise a negative errno
503 HostHandler::registerModel (generic_buffer *model_buf, uint32_t *modelid)
505 if (model_buf == nullptr || modelid == nullptr) {
506 logerr (TAG, "Invalid arguments given\n");
510 Model *model = nullptr;
511 int status = device_->setModel (model_buf, &model);
513 logerr (TAG, "Failed to set model: %d\n", status);
517 assert (model != nullptr);
519 status = models_.insert (model->getID(), model);
521 logerr (TAG, "Failed to insert model id\n");
526 *modelid = model->getID();
531 * @brief remove the registered model
532 * @param[in] modelid model id
533 * @return 0 if no error. otherwise a negative errno
536 HostHandler::unregisterModel (uint32_t modelid)
538 return models_.remove (modelid);
542 * @brief remove all registered models
546 HostHandler::unregisterModels ()
553 * @brief Set the data layout for input/output tensors
554 * @param[in] modelid The ID of model whose layouts are set
555 * @param[in] in the layout/type info for input tensors
556 * @param[in] out the layout/type info for output tensors
557 * @return @c 0 if no error. otherwise a negative error value
558 * @note if this function is not called, default layout/type will be used.
561 HostHandler::setDataInfo (uint32_t modelid, tensors_data_info *in,
562 tensors_data_info *out)
564 Model *model = models_.find (modelid);
565 if (model == nullptr)
568 return model->setDataInfo (in, out);
572 * @brief Set the inference constraint for next NPU inferences
573 * @param[in] modelid The target model id
574 * @param[in] constraint inference constraint (e.g., timeout, priority)
575 * @return @c 0 if no error. otherwise a negative error value
576 * @note If this function is not called, default values are used.
579 HostHandler::setConstraint (uint32_t modelid, npuConstraint constraint)
581 Model *model = models_.find (modelid);
582 if (model == nullptr)
585 model->setConstraint (constraint);
591 * @brief find and return model instance
592 * @param[in] modelid model id
593 * @return model instance if found. otherwise nullptr
596 HostHandler::getModel (uint32_t modelid)
598 return models_.find (modelid);
601 /** @brief dummay callback for runSync. */
604 callbackSync (output_buffers *output) : output_(output), done_(false) {}
606 static void callback (output_buffers *output, uint64_t sequence, void *data) {
607 callbackSync *sync = static_cast<callbackSync *>(data);
608 sync->callback (output, sequence);
611 void callback (output_buffers *output, uint64_t sequence) {
612 if (output_ != nullptr) {
613 /** just copy internal variables of output buffers */
614 memcpy (output_, output, sizeof (output_buffers));
621 std::unique_lock<std::mutex> lock (m_);
622 cv_.wait (lock, [this]() { return done_; });
627 std::condition_variable cv_;
628 output_buffers *output_;
633 * @brief Execute inference. Wait (block) until the output is available.
634 * @param[in] modelid The model to be inferred.
635 * @param[in] input The input data to be inferred.
636 * @param[out] output The output result.
637 * @return @c 0 if no error. otherwise a negative error value
640 HostHandler::runSync (uint32_t modelid, const input_buffers *input,
641 output_buffers *output)
643 callbackSync sync (output);
644 int status = runAsync (modelid, input, callbackSync::callback,
645 static_cast <void*> (&sync), NPUASYNC_DROP_OLD, nullptr);
647 /** sync needs to wait callback */
654 * @brief Invoke NPU inference. Unblocking call.
655 * @param[in] modelid The model to be inferred.
656 * @param[in] input The input data to be inferred.
657 * @param[in] cb The output buffer handler.
658 * @param[in] cb_data The data given as a parameter to the runNPU_async call.
659 * @param[in] mode Configures how this operation works.
660 * @param[out] sequence The sequence number returned with runNPU_async.
661 * @return @c 0 if no error. otherwise a negative error value
664 HostHandler::runAsync (uint32_t modelid, const input_buffers *input,
665 npuOutputNotify cb, void *cb_data, npu_async_mode mode, uint64_t *sequence)
667 Model *model = nullptr;
669 if (device_->needModel()) {
670 model = getModel (modelid);
671 if (model == nullptr)
675 device_->setAsyncMode (mode);
676 return device_->run (NPUINPUT_HOST, model, input, cb, cb_data, sequence);
680 * @brief get number of available devices
681 * @param[in] type device type
682 * @return number of devices
685 HostHandler::getNumDevices (dev_type type)
687 return DriverAPI::getNumDevices (type);
691 * @brief get device instance
692 * @param[out] dev device instance
693 * @param[in] type device type
694 * @param[in] id device id
695 * @return 0 if no error. otherwise a negative errno
698 HostHandler::getDevice (npudev_h *dev, dev_type type, uint32_t id)
700 int num_devices = getNumDevices (type);
702 /** check the validity of device id */
703 if (!(num_devices > 0 && id < static_cast<uint32_t>(num_devices))) {
704 logerr (TAG, "Invalid arguments provided\n");
708 Device *device = Device::createInstance (type, id);
709 if (device == nullptr) {
710 logerr (TAG, "Failed to create a device with the given type\n");
715 /** This is just for backward-compatility; we don't guarantee its corresness */
722 * @brief allocate generic buffer (just for users)
723 * @param[out] buffer buffer instance
724 * @return 0 if no error. otherwise a negative errno
727 HostHandler::allocGenericBuffer (generic_buffer *buffer)
732 if (buffer->size == 0) {
733 logerr (TAG, "Invalid size\n");
737 if (buffer->size > UINT32_MAX) {
738 logerr (TAG, "Don't support such a large size");
742 switch (buffer->type) {
745 if (buffer->filepath == nullptr)
751 /* now, npu-engine always provides dmabuf-based allocation */
753 int status = device_->allocMemory (buffer->size, &hwmem);
757 buffer->dmabuf = hwmem->getDmabuf();
758 buffer->offset = hwmem->getOffset();
759 buffer->addr = hwmem->getData();
769 * @brief deallocate generic buffer (just for users)
770 * @param[in] buffer buffer instance
771 * @return 0 if no error. otherwise a negative errno
774 HostHandler::deallocGenericBuffer (generic_buffer *buffer)
780 switch (buffer->type) {
782 status = 0; /** always true cuz nothing to do */
786 status = device_->deallocMemory (buffer->dmabuf);
797 * @brief allocate multiple generic buffers (just for users)
798 * @param[out] buffers multi-buffer instance
799 * @return 0 if no error. otherwise a negative errno
802 HostHandler::allocGenericBuffer (generic_buffers *buffers)
804 if (buffers == NULL || buffers->num_buffers < 1)
807 buffer_types type = buffers->bufs[0].type;
808 if (type == BUFFER_FILE)
811 uint64_t total_size = 0;
812 for (uint32_t idx = 0; idx < buffers->num_buffers; idx++)
813 total_size += buffers->bufs[idx].size;
815 uint64_t first_size = buffers->bufs[0].size;
816 buffers->bufs[0].size = total_size;
817 int status = allocGenericBuffer (&buffers->bufs[0]);
821 uint64_t offset = first_size;
822 for (uint32_t idx = 1; idx < buffers->num_buffers; idx++) {
823 buffers->bufs[idx].dmabuf = buffers->bufs[0].dmabuf;
824 buffers->bufs[idx].offset = buffers->bufs[0].offset + offset;
825 buffers->bufs[idx].addr = static_cast<char*>(buffers->bufs[0].addr) + offset;
826 buffers->bufs[idx].type = type;
828 offset += buffers->bufs[idx].size;
831 buffers->bufs[0].size = first_size;
837 * @brief deallocate multiple generic buffers (just for users)
838 * @param[in] buffers multi-buffer instance
839 * @return 0 if no error. otherwise a negative errno
842 HostHandler::deallocGenericBuffer (generic_buffers *buffers)
844 if (buffers == NULL || buffers->num_buffers < 1)
847 return deallocGenericBuffer (&buffers->bufs[0]);
851 * @brief get the current memory status
852 * @param[out] alloc_total The size of allocated memory until now
853 * @param[out] free_total The size of freed memory until now
854 * @return 0 if no error. otherwise a negatice error value
857 HostHandler::getMemoryStatus (size_t *alloc_total, size_t *free_total)
859 /** API is always set in initialize () */
860 const DriverAPI * api = device_->getDriverAPI ();
861 assert (api != nullptr);
863 return api->getMemoryStatus (alloc_total, free_total);
866 /** implement methods of Device class */
868 /** @brief constructor of device */
869 Device::Device (dev_type type, int id, bool need_model)
870 : comm_ (CommPlugin::getCommPlugin()), type_ (type), id_ (id), need_model_ (true),
871 mode_ (NPUASYNC_WAIT), initialized_ (false), atomic_flag_ (ATOMIC_FLAG_INIT)
876 * @brief create device instance depending on device type and id
877 * @param[in] type device type
878 * @param[in] id device id
879 * @return device instance
882 Device::createInstance (dev_type type, int id)
884 Device *device = nullptr;
886 switch (type & DEVICETYPE_MASK) {
887 case DEVICETYPE_TRIV:
888 device = new TrinityVision (id);
890 case DEVICETYPE_TRIV2:
891 device = new TrinityVision2 (id);
893 case DEVICETYPE_TRIA:
894 device = new TrinityAsr (id);
900 if (device != nullptr && device->init () != 0) {
909 * @brief device initialization
910 * @return 0 if no error, otherwise a negative errno
911 * @note Init failures come from createDriverAPI() only.
916 /** should be initilizaed only once */
917 if (!atomic_flag_.test_and_set()) {
918 /** create the corresponding driver API */
919 api_ = DriverAPI::createDriverAPI (type_, id_);
920 if (api_.get() == nullptr) {
921 atomic_flag_.clear();
922 logerr (TAG, "Failed to create driver API\n");
926 handler_.reset (new HostHandler (this));
927 scheduler_.reset (new Scheduler (api_.get()));
928 mem_ = MemAllocator::createInstance (api_.get());
930 initialized_ = true; /** c++11 does not provide test() of atomic flag */
937 * @brief stop all requests from this device
938 * @param[in] force_stop indicate the schedduler waits until to handle previous requests
939 * @return 0 if no error, otherwise a negative errno
942 Device::stop (bool force_stop)
944 if (!initialized ()) {
945 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
949 Request *req = new Request (NPUINPUT_STOP);
950 req->setForceStop (force_stop);
951 return scheduler_->submitRequest (req);
955 * @brief allocate generic memory buffer
956 * @param[out] hwmem_ptr hwmem instance pointer
957 * @return 0 if no error, otherwise a negative errno
960 Device::allocMemory (size_t size, HWmem ** hwmem_ptr)
962 if (!initialized ()) {
963 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
967 if (size == 0 || hwmem_ptr == nullptr)
970 HWmem *hwmem = mem_->allocMemory (size);
971 if (hwmem == nullptr)
979 * @brief deallocate generic memory buffer
980 * @param[in] dmabuf_fd dmabuf file descriptor
981 * @return 0 if no error, otherwise a negative errno
984 Device::deallocMemory (int dmabuf_fd)
986 if (!initialized ()) {
987 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
991 return mem_->deallocMemory (dmabuf_fd);
995 * @brief extract the buffer instance from input generic buffers
996 * @param[in] meta the model metadata
997 * @param[in] input the input generic buffers
998 * @return the buffer instance
1001 TrinityVision::prepareInputBuffers (const Metadata *meta, const input_buffers *input)
1003 if (meta == nullptr || input == nullptr ||
1004 meta->getInputNum() != input->num_buffers) {
1005 logerr (TAG, "Invalid metadata info provided\n");
1010 const generic_buffer *first = &input->bufs[0];
1011 if (first->type == BUFFER_DMABUF) {
1012 buffer = mem_->allocBuffer (new HWmemExternal);
1013 if (buffer == nullptr)
1016 buffer->setDmabuf (first->dmabuf);
1017 buffer->setOffset (first->offset);
1018 buffer->setSize (meta->getBufferSize());
1020 buffer = mem_->allocBuffer (new HWmemDevice);
1021 if (buffer == nullptr)
1024 int status = buffer->alloc (meta->getBufferSize ());
1026 logerr (TAG, "Failed to allocate buffer: %d\n", status);
1032 int status = buffer->createTensors (meta);
1034 logerr (TAG, "Failed to create tensors: %d\n", status);
1043 * @brief implementation of TRIV's setModel ()
1044 * @param[in] model_buf the model generic buffer
1045 * @param[out] model the model instance
1046 * @return 0 if no error, otherwise a negative errno
1049 TrinityVision::setModel (const generic_buffer *model_buf, Model ** model_ptr)
1051 if (!initialized ()) {
1052 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1056 if (model_buf == nullptr || model_ptr == nullptr)
1062 switch (model_buf->type) {
1064 model = mem_->allocModel (new HWmemExternal);
1065 if (model == nullptr) {
1066 logerr (TAG, "Failed to allocate model\n");
1070 model->setDmabuf (model_buf->dmabuf);
1071 model->setOffset (model_buf->offset);
1072 model->setSize (model_buf->size);
1076 model = mem_->allocModel (new HWmemDevice);
1077 if (model == nullptr) {
1078 logerr (TAG, "Failed to allocate model\n");
1082 status = model->alloc (model_buf->size);
1084 logerr (TAG, "Failed to allocate model: %d\n", status);
1088 status = comm_.extractGenericBuffer (model_buf, model->getData(), nullptr);
1090 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1098 status = model->setMetadata (model->getData());
1102 model_config_t config;
1103 config.dbuf_fd = model->getDmabuf();
1104 config.program_size = model->getMetadata()->getProgramSize();
1105 config.program_offset_addr = model->getOffset() + model->getMetadata()->getMetaSize();
1106 config.weight_offset_addr = config.program_offset_addr + config.program_size;
1108 status = api_->registerModel (&config);
1112 model->setInternalID(config.id);
1123 * @brief implementation of TRIV's run()
1124 * @param[in] opmode input opmode
1125 * @param[in] model the model instance
1126 * @param[in] input generic buffers of input data
1127 * @param[in] cb the output callback
1128 * @param[in] cb_data the output callback data
1129 * @param[out] sequence The sequence number returned with runNPU_async.
1132 TrinityVision::run (npu_input_opmode opmode, const Model *model,
1133 const input_buffers *input, npuOutputNotify cb, void *cb_data,
1136 if (!initialized ()) {
1137 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1141 if (opmode != NPUINPUT_HOST) {
1142 logerr (TAG, "TRIV supports only host inputservice\n");
1146 if (model == nullptr || input == nullptr) {
1147 logerr (TAG, "TRIV requires both model and input buffers\n");
1151 Buffer *buffer = prepareInputBuffers (model->getMetadata(), input);
1152 if (buffer == nullptr) {
1153 logerr (TAG, "Failed to extract buffer instance\n");
1157 if (!buffer->isExternal ()) {
1158 for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
1159 auto func = std::bind (TrinityVision::manipulateData, model, idx, true,
1160 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1161 int status = comm_.extractGenericBuffer (&input->bufs[idx],
1162 buffer->getInputTensor(idx)->getData(), func);
1164 logerr (TAG, "Failed to feed input buffer: %d\n", status);
1170 /** this device uses CMA buffer */
1172 Request *req = new Request (opmode);
1173 req->setModel (model);
1174 req->setBuffer (buffer);
1177 req->setCallback (std::bind (&TrinityVision::callback, this, req, cb, cb_data));
1179 if (sequence != nullptr)
1180 *sequence = req->getID();
1182 return scheduler_->submitRequest (req);
1186 * @brief callback of TRIV2 request
1187 * @param[in] req the request instance
1188 * @param[in] cb callback for completion
1189 * @param[in] cb_data callback data
1190 * @note The callback invoke does not gurantee the request was successful
1191 * @todo Check the request failures
1194 TrinityVision::callback (Request *req, npuOutputNotify cb, void *cb_data)
1196 const Model *model = req->getModel ();
1197 Buffer *buffer = req->getBuffer ();
1198 output_buffers output = {
1199 .num_buffers = buffer->getOutputNum ()
1202 for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
1203 uint32_t output_tensor_size = model->getOutputTensorSize (idx);
1205 if (buffer->isExternal ()) {
1206 output.bufs[idx].type = BUFFER_DMABUF;
1207 output.bufs[idx].size = output_tensor_size;
1208 output.bufs[idx].addr = buffer->getOutputTensor(idx)->getData();
1210 output.bufs[idx].type = BUFFER_MAPPED;
1211 output.bufs[idx].size = output_tensor_size;
1212 /** user needs to free this */
1213 output.bufs[idx].addr = malloc (output_tensor_size);
1215 auto func = std::bind (TrinityVision::manipulateData, model, idx, false,
1216 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1217 int status = comm_.insertGenericBuffer (buffer->getOutputTensor(idx)->getData(),
1218 &output.bufs[idx], func);
1220 logerr (TAG, "Failed to return output buffer: %d\n", status);
1225 cb (&output, req->getID(), cb_data);
1231 * @brief extract the segment table instance from input generic buffers
1232 * @param[in] model the model instance
1233 * @param[in] input the input generic buffers
1234 * @return the segment table instance
1237 TrinityVision2::prepareSegmentTable (const Model *model, const input_buffers *input)
1239 if (model == nullptr || input == nullptr) {
1240 logerr (TAG, "Invalid arguments provided\n");
1244 const Metadata *meta = model->getMetadata ();
1245 if (meta == nullptr ||
1246 meta->getInputNum() != input->num_buffers) {
1247 logerr (TAG, "Invalid metadata info provided\n");
1251 SegmentTable * segt = mem_->allocSegmentTable (new HWmemDevice);
1252 int status = segt->alloc ();
1254 logerr (TAG, "Failed to allocate segment table: %d\n", status);
1258 status = segt->createSegments (model, input);
1260 logerr (TAG, "Failed to create segments: %d\n", status);
1271 /** @brief implementation of TRIV2's setModel () */
1273 TrinityVision2::setModel (const generic_buffer *model_buf, Model ** model_ptr)
1275 if (!initialized ()) {
1276 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1280 if (model_buf == nullptr || model_ptr == nullptr)
1286 switch (model_buf->type) {
1288 model = mem_->allocModel (new HWmemExternal);
1289 if (model == nullptr) {
1290 logerr (TAG, "Failed to allocate model\n");
1294 model->setDmabuf (model_buf->dmabuf);
1295 model->setOffset (model_buf->offset);
1296 model->setSize (model_buf->size);
1300 model = mem_->allocModel (new HWmemDevice);
1301 if (model == nullptr) {
1302 logerr (TAG, "Failed to allocate model\n");
1306 status = model->alloc (model_buf->size);
1308 logerr (TAG, "Failed to allocate model: %d\n", status);
1312 status = comm_.extractGenericBuffer (model_buf, model->getData(), nullptr);
1314 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1322 status = model->setMetadata (model->getData());
1326 model_config_t config;
1327 config.dbuf_fd = model->getDmabuf();
1328 config.program_size = model->getMetadata()->getProgramSize();
1329 config.program_offset_addr = model->getOffset() + model->getMetadata()->getMetaSize();
1330 /** [TRIV2] weight is stored in a segment */
1331 model->setWeightData (model->getData () + model->getMetadata()->getMetaSize() +
1332 model->getMetadata()->getProgramSize());
1334 status = api_->registerModel (&config);
1338 model->setInternalID(config.id);
1347 /** @brief implementation of TRIV2's run() */
1349 TrinityVision2::run (npu_input_opmode opmode, const Model *model,
1350 const input_buffers *input, npuOutputNotify cb, void *cb_data,
1353 if (!initialized ()) {
1354 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1358 if (opmode != NPUINPUT_HOST && opmode != NPUINPUT_HW_RECURRING)
1361 /** this device uses segment table */
1362 SegmentTable * segt = prepareSegmentTable (model, input);
1363 if (segt == nullptr) {
1364 logerr (TAG, "Failed to create segment table instance\n");
1368 if (!model->isExternal ()) {
1369 /** set weight data */
1370 HWmem * weight = segt->getWeightSegment ();
1371 if (weight == nullptr) {
1372 logerr (TAG, "Failed to get a weight segment\n");
1376 const Metadata * meta = model->getMetadata ();
1377 memcpy (weight->getData (), model->getWeightData (), meta->getWeightSize ());
1380 if (!segt->isExternal ()) {
1381 /** extract input data */
1382 for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
1383 auto func = std::bind (TrinityVision2::manipulateData, model, idx, true,
1384 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1385 int status = comm_.extractGenericBuffer (
1387 segt->getInputSegment(idx)->getData() + segt->getInputSegmentOffset(idx),
1390 logerr (TAG, "Failed to feed input segment: %d\n", status);
1396 Request *req = new Request (opmode);
1397 req->setModel (model);
1398 req->setSegmentTable (segt);
1399 req->setCallback (std::bind (&TrinityVision2::callback, this, req, cb, cb_data));
1402 *sequence = req->getID();
1404 return scheduler_->submitRequest (req);
1407 /** @brief callback of TRIV2 request */
1409 TrinityVision2::callback (Request *req, npuOutputNotify cb, void *cb_data)
1411 const Model *model = req->getModel ();
1412 SegmentTable *segt = req->getSegmentTable ();
1413 output_buffers output = {
1414 .num_buffers = segt->getNumOutputSegments ()
1417 for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
1418 uint32_t output_tensor_size = model->getOutputTensorSize (idx);
1420 output.bufs[idx].type = BUFFER_MAPPED;
1421 output.bufs[idx].size = output_tensor_size;
1422 /** user needs to free this */
1423 output.bufs[idx].addr = malloc (output_tensor_size);
1425 auto func = std::bind (TrinityVision2::manipulateData, model, idx, false,
1426 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1427 int status = comm_.insertGenericBuffer (
1428 segt->getOutputSegment(idx)->getData() + segt->getOutputSegmentOffset(idx),
1429 &output.bufs[idx], func);
1431 logerr (TAG, "Failed to return output buffer: %d\n", status);
1435 cb (&output, req->getID(), cb_data);
1440 /** @brief implementation of TRIA's run(): WIP */
1442 TrinityAsr::run (npu_input_opmode opmode, const Model *model,
1443 const input_buffers *input, npuOutputNotify cb, void *cb_data,
1446 if (!initialized ()) {
1447 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1451 if (opmode != NPUINPUT_HOST)
1456 /** ASR does not require model and support only a single tensor */
1457 const generic_buffer *first_buf = &input->bufs[0];
1458 if (first_buf->type == BUFFER_DMABUF) {
1459 buffer = mem_->allocBuffer (new HWmemExternal);
1460 if (buffer == nullptr)
1463 buffer->setDmabuf (first_buf->dmabuf);
1464 buffer->setOffset (first_buf->offset);
1465 buffer->setSize (first_buf->size);
1467 buffer = mem_->allocBuffer (new HWmemDevice);
1468 if (buffer == nullptr)
1471 status = buffer->alloc (first_buf->size);
1478 status = buffer->createTensors ();
1480 logerr (TAG, "Failed to create tensors: %d\n", status);
1485 if (!buffer->isExternal ()) {
1486 status = comm_.extractGenericBuffer (first_buf,
1487 buffer->getInputTensor(0)->getData(), nullptr);
1492 Request *req = new Request (opmode);
1493 req->setBuffer (buffer);
1494 req->setCallback (std::bind (&TrinityAsr::callback, this, req, cb, cb_data));
1497 *sequence = req->getID();
1499 return scheduler_->submitRequest (req);
1502 /** @brief callback of TRIA request: WIP */
1504 TrinityAsr::callback (Request *req, npuOutputNotify cb, void *cb_data)
1508 /** Implement data manipulation (each device may have different impl.) */
1512 #define do_quantized_memcpy(type) do {\
1515 while (idx < num_elems) {\
1516 val = ((type *) src)[idx];\
1517 val = val / _scale;\
1518 val += _zero_point;\
1519 val = (val > 255.0) ? 255.0 : 0.0;\
1520 ((uint8_t *) dst)[idx++] = (uint8_t) val;\
1523 while (idx < num_elems) {\
1524 val = *(uint8_t *) src;\
1525 val -= _zero_point;\
1527 ((type *) dst)[idx++] = (type) val;\
1528 dst = (void*)(((uint8_t *) dst) + data_size);\
1529 src = (void*)(((uint8_t *) src) + 1);\
1535 * @brief memcpy during quantization
1537 static void memcpy_with_quant (bool quant, data_type type, float scale, uint32_t zero_point,
1538 void *dst, const void *src, uint32_t num_elems)
1540 double _scale = (double) scale;
1541 double _zero_point = (double) zero_point;
1543 uint32_t data_size = get_data_size (type);
1547 case DATA_TYPE_INT8:
1548 do_quantized_memcpy (int8_t);
1550 case DATA_TYPE_UINT8:
1551 do_quantized_memcpy (uint8_t);
1553 case DATA_TYPE_INT16:
1554 do_quantized_memcpy (int16_t);
1556 case DATA_TYPE_UINT16:
1557 do_quantized_memcpy (uint16_t);
1559 case DATA_TYPE_INT32:
1560 do_quantized_memcpy (int32_t);
1562 case DATA_TYPE_UINT32:
1563 do_quantized_memcpy (uint32_t);
1565 case DATA_TYPE_INT64:
1566 do_quantized_memcpy (int64_t);
1568 case DATA_TYPE_UINT64:
1569 do_quantized_memcpy (uint64_t);
1571 case DATA_TYPE_FLOAT32:
1572 do_quantized_memcpy (float);
1574 case DATA_TYPE_FLOAT64:
1575 do_quantized_memcpy (double);
1578 logerr (TAG, "Unsupported datatype %d\n", type);
1583 * @brief perform data manipulation
1584 * @param[in] model model instance
1585 * @param[in] idx tensor index
1586 * @param[in] is_input indicate it's input manipulation
1587 * @param[out] dst destination buffer
1588 * @param[in] src source buffer (feature map)
1589 * @param[in] size size to be copied
1590 * @return size of memory copy if no error, otherwise zero
1592 * @note the input data format should be NHWC
1593 * @detail rules for the memory address of activations in NPU HW.
1594 * (https://code.sec.samsung.net/confluence/pages/viewpage.action?pageId=146491864)
1596 * 1) Special case (depth == 3)
1597 * - addr(x,y,z) = addr(0,0,0) + (z) + 3 * (x + width * y)
1600 * - addr(x,y,z) = addr(0,0,0) + (z % MPA_L) + MPA_L * (x + width * (y + height * (z / MPA_L)))
1602 * Thus, if depth is not a multiple of MPA_L (i.e., 64), zero padding is required
1605 TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
1606 void *dst, void *src, size_t size)
1608 const Metadata *meta = model->getMetadata();
1609 const tensor_data_info* info;
1610 const uint32_t *dims;
1611 uint32_t zero_point;
1614 /** extract required information from the metadata */
1616 if (idx >= meta->getInputNum()) {
1617 logerr (TAG, "Wrong information for input tensors in metadata\n");
1621 info = model->getInputDataInfo (idx);
1622 dims = meta->getInputDims (idx);
1623 zero_point = meta->getInputQuantZero (idx);
1624 scale = meta->getInputQuantScale (idx);
1626 if (idx >= meta->getOutputNum()) {
1627 logerr (TAG, "Wrong information for output tensors in metadata\n");
1631 info = model->getOutputDataInfo (idx);
1632 dims = meta->getOutputDims (idx);
1633 zero_point = meta->getOutputQuantZero (idx);
1634 scale = meta->getOutputQuantScale (idx);
1637 if (info == nullptr) {
1638 logerr (TAG, "Unmatched tensors info\n");
1642 uint32_t batch = dims[0];
1643 uint32_t height = dims[1];
1644 uint32_t width = dims[2];
1645 uint32_t depth = dims[3];
1647 uint32_t data_size = get_data_size (info->type);
1648 if (data_size == 0) {
1649 logerr (TAG, "Invalid data size\n");
1653 bool need_quantization = false;
1655 * note that we assume DATA_TYPE_SRNPU is the smallest data type that we consider.
1656 * Also, DATA_TYPE_SRNPU and uint8_t may be regarded as the same in the view of apps.
1658 if (info->type != DATA_TYPE_SRNPU) {
1659 assert (data_size >= get_data_size (DATA_TYPE_SRNPU));
1661 if (data_size > get_data_size (DATA_TYPE_SRNPU) ||
1662 !(zero_point == default_quant_zero && scale == default_quant_scale))
1663 need_quantization = true;
1666 /** check data manipulation is required */
1667 if (depth != 3 && depth != 64 && info->layout != DATA_LAYOUT_SRNPU) {
1668 uint32_t MPA_L = DATA_GRANULARITY;
1669 uint32_t n, h, w, d;
1670 uint32_t std_offset; /* standard offset in NHWC data format */
1671 uint32_t npu_offset; /* npu offset in NPU HW data format*/
1672 uint32_t src_offset;
1673 uint32_t dst_offset;
1674 uint32_t slice_size;
1676 /* @todo we currently support only NHWC */
1677 if (info->layout != DATA_LAYOUT_NHWC) {
1678 logerr (TAG, "data manipulation is supported for NHWC only\n");
1682 for (n = 0; n < batch; n++) {
1683 for (h = 0; h < height; h++) {
1684 for (w = 0; w < width; w++) {
1685 for (d = 0; d < depth; d += MPA_L) {
1686 std_offset = d + depth * (w + width * (h + n * height));
1687 npu_offset = MPA_L * (w + width * (h + (n + d / MPA_L) * height));
1688 slice_size = (depth - d >= MPA_L) ? MPA_L : depth - d;
1691 src_offset = std_offset * data_size;
1692 dst_offset = npu_offset;
1694 src_offset = npu_offset;
1695 dst_offset = std_offset * data_size;
1698 /* if depth is not a multiple of MPA_L, add zero paddings (not exact values) */
1699 if (need_quantization) {
1700 memcpy_with_quant (is_input, info->type, scale, zero_point,
1701 static_cast<char*>(dst) + dst_offset,
1702 static_cast<char*>(src) + src_offset,
1706 static_cast<char*>(dst) + dst_offset,
1707 static_cast<char*>(src) + src_offset,
1714 } else if (need_quantization) {
1715 /** depth == 3 || depth == 64; special cases which can directly copy input tensor data */
1716 memcpy_with_quant (is_input, info->type, scale, zero_point,
1717 dst, src, is_input ? size / data_size : size);
1719 memcpy (dst, src, size);
1728 TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
1729 void *dst, void *src, size_t size)
1731 memcpy (dst, src, size);
1737 /** other device types don't have data manip impl. yet */
1740 TrinityVision2::manipulateData (const Model *model, uint32_t idx, bool is_input,
1741 void *dst, void *src, size_t size)
1743 memcpy (dst, src, size);
1748 TrinityAsr::manipulateData (const Model *model, uint32_t idx, bool is_input,
1749 void *dst, void *src, size_t size)
1751 memcpy (dst, src, size);