3 * Copyright (C) 2020 Samsung Electronics
4 * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
7 * @file ne-host-handler.cc
9 * @brief Implementation of APIs to access NPU from Host
10 * @see https://code.sec.samsung.net/confluence/display/ODLC/2020+Overall+Software+Stack
11 * @author Dongju Chae <dongju.chae@samsung.com>
12 * @bug No known bugs except for NYI items
15 #include "ne-handler.h"
17 #include <libnpuhost.h>
18 #include <npubinfmt.h>
19 #include <NPUdrvAPI.h>
20 #include <CommPlugin.h>
25 #include <condition_variable>
32 #define INIT_HOST_HANDLER(handler, dev) \
33 Device *tdev = static_cast <Device *> (dev); \
34 if (tdev == nullptr) return -EINVAL; \
35 HostHandler *handler = tdev->getHostHandler (); \
36 if (handler == nullptr) return -EINVAL;
38 /** just for backward-compatability */
39 npudev_h HostHandler::latest_dev_ = nullptr;
41 /** implement libnpuhost APIs */
44 * @brief Returns the number of available NPU devices.
45 * @return @c The number of NPU devices.
46 * @retval 0 if no NPU devices available. if positive (number of NPUs) if NPU devices available. otherwise, a negative error value.
47 * @note the caller should call putNPUdevice() to release the device handle
49 int getnumNPUdeviceByType (dev_type type)
51 return HostHandler::getNumDevices (type);
55 * @brief Returns the handle of the chosen NPU devices.
56 * @param[out] dev The NPU device handle
57 * @param[in] id The NPU id to get the handle. 0 <= id < getnumNPUdeviceByType().
58 * @return @c 0 if no error. otherwise a negative error value
59 * @note the caller should call putNPUdevice() to release the device handle
61 int getNPUdeviceByType (npudev_h *dev, dev_type type, uint32_t id)
63 return HostHandler::getDevice (dev, type, id);
67 * @brief release the NPU device instance obtained by getDevice ()
68 * @param[in] dev the NPU device handle
70 void putNPUdevice (npudev_h dev)
73 delete static_cast<Device *> (dev);
77 * @brief Send the NN model to NPU.
78 * @param[in] dev The NPU device handle
79 * @param[in] modelfile The filepath to the compiled NPU NN model in any buffer_type
80 * @param[out] modelid The modelid allocated for this instance of NN model.
81 * @return @c 0 if no error. otherwise a negative error value
83 * @detail For ASR devices, which do not accept models, but have models
84 * embedded in devices, you do not need to call register and
85 * register calls for ASR are ignored.
87 * @todo Add a variation: in-memory model register.
89 int registerNPUmodel (npudev_h dev, generic_buffer *modelfile, uint32_t *modelid)
91 INIT_HOST_HANDLER (host_handler, dev);
93 return host_handler->registerModel (modelfile, modelid);
97 * @brief Remove the NN model from NPU
98 * @param[in] dev The NPU device handle
99 * @param[in] modelid The model to be removed from the NPU.
100 * @return @c 0 if no error. otherwise a negative error value
101 * @detail This may incur some latency with memory compatcion.
103 int unregisterNPUmodel(npudev_h dev, uint32_t modelid)
105 INIT_HOST_HANDLER (host_handler, dev);
107 return host_handler->unregisterModel (modelid);
111 * @brief Remove all NN models from NPU
112 * @param[in] dev The NPU device handle
113 * @return @c 0 if no error. otherwise a negative error value
115 int unregisterNPUmodel_all(npudev_h dev)
117 INIT_HOST_HANDLER (host_handler, dev);
119 return host_handler->unregisterModels ();
123 * @brief [OPTIONAL] Set the data layout for input/output tensors
124 * @param[in] dev The NPU device handle
125 * @param[in] modelid The ID of model whose layouts are set
126 * @param[in] info_in the layout/type info for input tensors
127 * @param[in] info_out the layout/type info for output tensors
128 * @return @c 0 if no error. otherwise a negative error value
129 * @note if this function is not called, default layout/type will be used.
131 int setNPU_dataInfo(npudev_h dev, uint32_t modelid,
132 tensors_data_info *info_in, tensors_data_info *info_out)
134 INIT_HOST_HANDLER (host_handler, dev);
136 return host_handler->setDataInfo (modelid, info_in, info_out);
140 * @brief [OPTIONAL] Set the inference constraint for next NPU inferences
141 * @param[in] dev The NPU device handle
142 * @param[in] modelid The target model id
143 * @param[in] constraint inference constraint (e.g., timeout, priority)
144 * @return @c 0 if no error. otherwise a negative error value
145 * @note If this function is not called, default values are used.
147 int setNPU_constraint(npudev_h dev, uint32_t modelid, npuConstraint constraint)
149 INIT_HOST_HANDLER (host_handler, dev);
151 return host_handler->setConstraint (modelid, constraint);
155 * @brief Execute inference. Wait (block) until the output is available.
156 * @param[in] dev The NPU device handle
157 * @param[in] modelid The model to be inferred.
158 * @param[in] input The input data to be inferred.
159 * @param[out] output The output result. The caller MUST allocate appropriately before calling this.
160 * @return @c 0 if no error. otherwise a negative error value
162 * @detail This is a syntactic sugar of runNPU_async().
163 * CAUTION: There is a memcpy for the output buffer.
165 int runNPU_sync(npudev_h dev, uint32_t modelid, const input_buffers *input,
166 output_buffers *output)
168 INIT_HOST_HANDLER (host_handler, dev);
170 return host_handler->runSync (modelid, input, output);
174 * @brief Invoke NPU inference. Unblocking call.
175 * @param[in] dev The NPU device handle
176 * @param[in] modelid The model to be inferred.
177 * @param[in] input The input data to be inferred.
178 * @param[in] cb The output buffer handler.
179 * @param[out] sequence The sequence number returned with runNPU_async.
180 * @param[in] data The data given as a parameter to the runNPU_async call.
181 * @param[in] mode Configures how this operation works.
182 * @return @c 0 if no error. otherwise a negative error value
184 int runNPU_async(npudev_h dev, uint32_t modelid, const input_buffers *input,
185 npuOutputNotify cb, uint64_t *sequence, void *data,
188 INIT_HOST_HANDLER (host_handler, dev);
190 return host_handler->runAsync (modelid, input, cb, data, mode, sequence);
194 * @brief Allocate a buffer for NPU model with the requested buffer type.
195 * @param[in] dev The NPU device handle
196 * @param[in/out] Buffer the buffer pointer where memory is allocated.
197 * @return 0 if no error, otherwise a negative errno.
199 int allocNPU_modelBuffer (npudev_h dev, generic_buffer *buffer)
201 INIT_HOST_HANDLER (host_handler, dev);
203 return host_handler->allocGenericBuffer (buffer);
207 * @brief Free the buffer and remove the address mapping.
208 * @param[in] dev The NPU device handle
209 * @param[in] buffer the model buffer
210 * @return 0 if no error, otherwise a negative errno.
212 int cleanNPU_modelBuffer (npudev_h dev, generic_buffer *buffer)
214 INIT_HOST_HANDLER (host_handler, dev);
216 return host_handler->deallocGenericBuffer (buffer);
220 * @brief Allocate a buffer for NPU input with the requested buffer type.
221 * @param[in] dev The NPU device handle
222 * @param[in/out] Buffer the buffer pointer where memory is allocated.
223 * @return 0 if no error, otherwise a negative errno.
224 * @note please utilize allocInputBuffers() for multiple input tensors because subsequent
225 * calls of allocInputBuffer() don't gurantee contiguous allocations between them.
227 int allocNPU_inputBuffer (npudev_h dev, generic_buffer *buffer)
229 INIT_HOST_HANDLER (host_handler, dev);
231 return host_handler->allocGenericBuffer (buffer);
235 * @brief Free the buffer and remove the address mapping.
236 * @param[in] dev The NPU device handle
237 * @param[in] buffer the input buffer
238 * @return 0 if no error, otherwise a negative errno.
240 int cleanNPU_inputBuffer (npudev_h dev, generic_buffer *buffer)
242 INIT_HOST_HANDLER (host_handler, dev);
244 return host_handler->deallocGenericBuffer (buffer);
248 * @brief Allocate input buffers, which have multiple instances of generic_buffer
249 * @param[in] dev The NPU device handle
250 * @param[in/out] input input buffers.
251 * @return 0 if no error, otherwise a negative errno.
252 * @note it reuses allocInputBuffer().
253 * @details in case of BUFFER_DMABUF, this function can be used to gurantee physically-contiguous
254 * memory mapping for multiple tensors (in a single inference, not batch size).
256 int allocNPU_inputBuffers (npudev_h dev, input_buffers * input)
258 INIT_HOST_HANDLER (host_handler, dev);
260 return host_handler->allocGenericBuffer (input);
264 * @brief Free input buffers allocated by allocInputBuffers().
265 * @param[in] dev The NPU device handle
266 * @param[in/out] input input buffers.
267 * @note it reuses cleanInputbuffer().
268 * @return 0 if no error, otherwise a negative errno.
270 int cleanNPU_inputBuffers (npudev_h dev, input_buffers * input)
272 INIT_HOST_HANDLER (host_handler, dev);
274 return host_handler->deallocGenericBuffer (input);
278 * @brief get the current memory status for the given device
279 * @param[in] dev The NPU device handle
280 * @param[out] alloc_total The size of allocated memory until now
281 * @param[out] free_total The size of freed memory until now
282 * @return @c 0 if no error. otherwise a negatice error value
284 int getNPU_memoryStatus(npudev_h dev, size_t *alloc_total, size_t *free_total)
286 INIT_HOST_HANDLER (host_handler, dev);
288 return host_handler->getMemoryStatus (alloc_total, free_total);
292 * @brief Get the current device status to be used
293 * @param[in] dev The NPU device handle
294 * @param[out] status the device status
295 * @param[out] num_requests the number of running requests (or pending)
296 * @return 0 if no error, otherwise a negative errno.
298 int getNPU_deviceStatus(npudev_h dev, npu_status *status, uint32_t *num_requests)
300 INIT_HOST_HANDLER (host_handler, dev);
302 return host_handler->getDeviceStatus (status, num_requests);
306 * @brief Get metadata for NPU model
307 * @param[in] model The path of model binary file
308 * @param[in] need_extra whether you want to extract the extra data in metadata
309 * @return the metadata structure to be filled if no error, otherwise nullptr
311 * @note For most npu-engine users, the extra data is not useful because it will be
312 * used for second-party users (e.g., compiler, simulator).
313 * Also, the caller needs to free the metadata.
315 * @note the caller needs to free the metadata
317 npubin_meta * getNPUmodel_metadata (const char *model, bool need_extra)
326 fp = fopen (model, "rb");
328 logerr (TAG, "Failed to open the model binary: %d\n", -errno);
332 meta = (npubin_meta *) malloc (NPUBIN_META_SIZE);
334 logerr (TAG, "Failed to allocate metadata\n");
338 ret = fread (meta, 1, NPUBIN_META_SIZE, fp);
339 if (ret != NPUBIN_META_SIZE) {
340 logerr (TAG, "Failed to read the metadata\n");
344 if (!CHECK_NPUBIN (meta->magiccode)) {
345 logerr (TAG, "Invalid metadata provided\n");
349 if (need_extra && NPUBIN_META_EXTRA (meta->magiccode) > 0) {
350 npubin_meta *new_meta;
352 new_meta = (npubin_meta *) realloc (meta, NPUBIN_META_TOTAL_SIZE(meta->magiccode));
354 logerr (TAG, "Failed to allocate extra metadata\n");
358 ret = fread (new_meta->reserved_extra, 1, NPUBIN_META_EXTRA_SIZE (meta->magiccode), fp);
359 if (ret != NPUBIN_META_EXTRA_SIZE (meta->magiccode)) {
360 logerr (TAG, "Invalid extra metadata provided\n");
380 /** implement methods of HostHandler class */
382 /** @brief host handler constructor */
383 HostHandler::HostHandler (Device *device)
385 /* ignored as we don't use double buffering anymore, but for backward-compatibility */
386 async_mode_ (NPUASYNC_WAIT)
390 /** @brief host handler destructor */
391 HostHandler::~HostHandler ()
396 * @brief register model from generic buffer
397 * @param[in] model_buf model buffer
398 * @param[out] modelid model id
399 * @return 0 if no error. otherwise a negative errno
402 HostHandler::registerModel (generic_buffer *model_buf, uint32_t *modelid)
404 if (model_buf == nullptr || modelid == nullptr) {
405 logerr (TAG, "Invalid arguments given\n");
409 Model *model = nullptr;
410 int status = device_->setModel (model_buf, &model);
412 logerr (TAG, "Failed to set model: %d\n", status);
416 assert (model != nullptr);
418 status = models_.insert (model->getID(), model);
420 logerr (TAG, "Failed to insert model id\n");
425 *modelid = model->getID();
430 * @brief remove the registered model
431 * @param[in] modelid model id
432 * @return 0 if no error. otherwise a negative errno
435 HostHandler::unregisterModel (uint32_t modelid)
437 Model *model = models_.find (modelid);
438 if (model == nullptr)
441 int status = device_->unsetModel (model);
443 logerr (TAG, "Failed to unset model: %d\n", status);
447 return models_.remove (modelid);
451 * @brief remove all registered models
455 HostHandler::unregisterModels ()
462 * @brief Set the data layout for input/output tensors
463 * @param[in] modelid The ID of model whose layouts are set
464 * @param[in] in the layout/type info for input tensors
465 * @param[in] out the layout/type info for output tensors
466 * @return @c 0 if no error. otherwise a negative error value
467 * @note if this function is not called, default layout/type will be used.
470 HostHandler::setDataInfo (uint32_t modelid, tensors_data_info *in,
471 tensors_data_info *out)
473 Model *model = models_.find (modelid);
474 if (model == nullptr)
477 return model->setDataInfo (in, out);
481 * @brief Set the inference constraint for next NPU inferences
482 * @param[in] modelid The target model id
483 * @param[in] constraint inference constraint (e.g., timeout, priority)
484 * @return @c 0 if no error. otherwise a negative error value
485 * @note If this function is not called, default values are used.
488 HostHandler::setConstraint (uint32_t modelid, npuConstraint constraint)
490 Model *model = models_.find (modelid);
491 if (model == nullptr)
494 model->setConstraint (constraint);
500 * @brief find and return model instance
501 * @param[in] modelid model id
502 * @return model instance if found. otherwise nullptr
505 HostHandler::getModel (uint32_t modelid)
507 return models_.find (modelid);
510 /** @brief dummay callback for runSync. */
513 callbackSync (output_buffers *output) : output_(output), done_(false) {}
515 static void callback (output_buffers *output, uint64_t sequence, void *data) {
516 callbackSync *sync = static_cast<callbackSync *>(data);
517 sync->callback (output, sequence);
520 void callback (output_buffers *output, uint64_t sequence) {
521 if (output_ != nullptr) {
522 /** just copy internal variables of output buffers */
523 memcpy (output_, output, sizeof (output_buffers));
530 std::unique_lock<std::mutex> lock (m_);
531 cv_.wait (lock, [this]() { return done_; });
536 std::condition_variable cv_;
537 output_buffers *output_;
542 * @brief Execute inference. Wait (block) until the output is available.
543 * @param[in] modelid The model to be inferred.
544 * @param[in] input The input data to be inferred.
545 * @param[out] output The output result.
546 * @return @c 0 if no error. otherwise a negative error value
549 HostHandler::runSync (uint32_t modelid, const input_buffers *input,
550 output_buffers *output)
552 callbackSync sync (output);
553 int status = runAsync (modelid, input, callbackSync::callback,
554 static_cast <void*> (&sync), NPUASYNC_DROP_OLD, nullptr);
556 /** sync needs to wait callback */
563 * @brief Invoke NPU inference. Unblocking call.
564 * @param[in] modelid The model to be inferred.
565 * @param[in] input The input data to be inferred.
566 * @param[in] cb The output buffer handler.
567 * @param[in] cb_data The data given as a parameter to the runNPU_async call.
568 * @param[in] mode Configures how this operation works.
569 * @param[out] sequence The sequence number returned with runNPU_async.
570 * @return @c 0 if no error. otherwise a negative error value
573 HostHandler::runAsync (uint32_t modelid, const input_buffers *input,
574 npuOutputNotify cb, void *cb_data, npu_async_mode mode, uint64_t *sequence)
576 Model *model = nullptr;
578 if (device_->needModel()) {
579 model = getModel (modelid);
580 if (model == nullptr)
584 device_->setAsyncMode (mode);
585 return device_->run (NPUINPUT_HOST, model, input, cb, cb_data, sequence);
589 * @brief get number of available devices
590 * @param[in] type device type
591 * @return number of devices
594 HostHandler::getNumDevices (dev_type type)
596 return DriverAPI::getNumDevices (type);
600 * @brief get device instance
601 * @param[out] dev device instance
602 * @param[in] type device type
603 * @param[in] id device id
604 * @return 0 if no error. otherwise a negative errno
607 HostHandler::getDevice (npudev_h *dev, dev_type type, uint32_t id)
609 int num_devices = getNumDevices (type);
611 /** check the validity of device id */
612 if (!(num_devices > 0 && id < static_cast<uint32_t>(num_devices))) {
613 logerr (TAG, "Invalid arguments provided\n");
617 Device *device = Device::createInstance (type, id);
618 if (device == nullptr) {
619 logerr (TAG, "Failed to create a device with the given type\n");
624 /** This is just for backward-compatility; we don't guarantee its corresness */
631 * @brief allocate generic buffer (just for users)
632 * @param[out] buffer buffer instance
633 * @return 0 if no error. otherwise a negative errno
636 HostHandler::allocGenericBuffer (generic_buffer *buffer)
641 if (buffer->size == 0) {
642 logerr (TAG, "Invalid size\n");
646 if (buffer->size > UINT32_MAX) {
647 logerr (TAG, "Don't support such a large size");
651 switch (buffer->type) {
654 if (buffer->filepath == nullptr)
660 /* now, npu-engine always provides dmabuf-based allocation */
661 void *addr = nullptr;
662 int dmabuf = device_->allocMemory (buffer->size, &addr);
666 buffer->dmabuf = dmabuf;
678 * @brief deallocate generic buffer (just for users)
679 * @param[in] buffer buffer instance
680 * @return 0 if no error. otherwise a negative errno
683 HostHandler::deallocGenericBuffer (generic_buffer *buffer)
689 switch (buffer->type) {
691 status = 0; /** always true cuz nothing to do */
695 status = device_->deallocMemory (buffer->dmabuf, buffer->size, buffer->addr);
706 * @brief allocate multiple generic buffers (just for users)
707 * @param[out] buffers multi-buffer instance
708 * @return 0 if no error. otherwise a negative errno
711 HostHandler::allocGenericBuffer (generic_buffers *buffers)
713 if (buffers == NULL || buffers->num_buffers < 1)
716 buffer_types type = buffers->bufs[0].type;
717 if (type == BUFFER_FILE)
720 uint64_t total_size = 0;
721 for (uint32_t idx = 0; idx < buffers->num_buffers; idx++)
722 total_size += buffers->bufs[idx].size;
724 uint64_t first_size = buffers->bufs[0].size;
725 buffers->bufs[0].size = total_size;
726 int status = allocGenericBuffer (&buffers->bufs[0]);
730 uint64_t offset = first_size;
731 for (uint32_t idx = 1; idx < buffers->num_buffers; idx++) {
732 buffers->bufs[idx].dmabuf = buffers->bufs[0].dmabuf;
733 buffers->bufs[idx].offset = buffers->bufs[0].offset + offset;
734 buffers->bufs[idx].addr = static_cast<char*>(buffers->bufs[0].addr) + offset;
735 buffers->bufs[idx].type = type;
737 offset += buffers->bufs[idx].size;
740 buffers->bufs[0].size = first_size;
746 * @brief deallocate multiple generic buffers (just for users)
747 * @param[in] buffers multi-buffer instance
748 * @return 0 if no error. otherwise a negative errno
751 HostHandler::deallocGenericBuffer (generic_buffers *buffers)
753 if (buffers == NULL || buffers->num_buffers < 1)
756 return deallocGenericBuffer (&buffers->bufs[0]);
760 * @brief get the current memory status
761 * @param[out] alloc_total The size of allocated memory until now
762 * @param[out] free_total The size of freed memory until now
763 * @return 0 if no error. otherwise a negatice error value
766 HostHandler::getMemoryStatus (size_t *alloc_total, size_t *free_total)
768 /** API is always set in initialize () */
769 const DriverAPI * api = device_->getDriverAPI ();
770 assert (api != nullptr);
772 return api->getMemoryStatus (alloc_total, free_total);
776 * @brief Get the current device status to be used
777 * @param[out] status the device status
778 * @param[out] num_requests the number of running requests (or pending)
779 * @return 0 if no error, otherwise a negative errno.
782 HostHandler::getDeviceStatus (npu_status *status, uint32_t *num_requests)
784 /** API is always set in initialize () */
785 const DriverAPI * api = device_->getDriverAPI ();
786 assert (api != nullptr);
788 device_state_t state = api->isReady ();
789 if (state == device_state_t::STATE_READY) {
790 *num_requests = api->numRequests ();
791 if (*num_requests > 0)
803 /** implement methods of Device class */
805 /** @brief constructor of device */
806 Device::Device (dev_type type, int id, bool need_model)
807 : comm_ (CommPlugin::getCommPlugin()), type_ (type), id_ (id), need_model_ (true),
808 mode_ (NPUASYNC_WAIT), initialized_ (false), atomic_flag_ (ATOMIC_FLAG_INIT)
813 * @brief create device instance depending on device type and id
814 * @param[in] type device type
815 * @param[in] id device id
816 * @return device instance
819 Device::createInstance (dev_type type, int id)
821 Device *device = nullptr;
823 switch (type & DEVICETYPE_MASK) {
824 case DEVICETYPE_TRIV:
825 device = new TrinityVision (id);
827 case DEVICETYPE_TRIV2:
828 device = new TrinityVision2 (id);
830 case DEVICETYPE_TRIA:
831 device = new TrinityAsr (id);
837 if (device != nullptr && device->init () != 0) {
846 * @brief device initialization
847 * @return 0 if no error, otherwise a negative errno
848 * @note Init failures come from createDriverAPI() only.
853 /** should be initilizaed only once */
854 if (!atomic_flag_.test_and_set()) {
855 /** create the corresponding driver API */
856 api_ = DriverAPI::createDriverAPI (type_, id_);
857 if (api_.get() == nullptr) {
858 atomic_flag_.clear();
859 logerr (TAG, "Failed to create driver API\n");
863 handler_.reset (new HostHandler (this));
864 scheduler_.reset (new Scheduler (api_.get()));
865 mem_ = MemAllocator::createInstance (api_.get());
867 initialized_ = true; /** c++11 does not provide test() of atomic flag */
874 * @brief stop all requests from this device
875 * @param[in] force_stop indicate the schedduler waits until to handle previous requests
876 * @return 0 if no error, otherwise a negative errno
879 Device::stop (bool force_stop)
881 if (!initialized ()) {
882 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
886 Request *req = new Request (NPUINPUT_STOP);
887 req->setForceStop (force_stop);
888 return scheduler_->submitRequest (req);
892 * @brief allocate generic memory buffer
893 * @param[in] size the size to allocate
894 * @param[out] addr the mapped address
895 * @return dmabuf fd if no error, otherwise a negative errno
898 Device::allocMemory (size_t size, void **addr)
900 if (!initialized ()) {
901 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
905 if (size == 0 || addr == nullptr) {
906 logerr (TAG, "Invalid arguments\n");
910 return mem_->allocMemory (size, addr);
914 * @brief deallocate generic memory buffer
915 * @param[in] dmabuf_fd dmabuf file descriptor
916 * @param[in] size buffer size
917 * @param[in] addr mapped addr
918 * @return 0 if no error, otherwise a negative errno
921 Device::deallocMemory (int dmabuf_fd, size_t size, void * addr)
923 if (!initialized ()) {
924 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
928 if (dmabuf_fd < 0 || size == 0 || addr == nullptr) {
929 logerr (TAG, "Invalid arguments\n");
933 return mem_->deallocMemory (dmabuf_fd, size, addr);
937 * @brief extract the buffer instance from input generic buffers
938 * @param[in] meta the model metadata
939 * @param[in] input the input generic buffers
940 * @return the buffer instance
943 TrinityVision::prepareInputBuffers (const Metadata *meta, const input_buffers *input)
945 if (meta == nullptr || input == nullptr ||
946 meta->getInputNum() != input->num_buffers) {
947 logerr (TAG, "Invalid metadata info provided\n");
952 const generic_buffer *first = &input->bufs[0];
953 if (first->type == BUFFER_DMABUF) {
954 buffer = mem_->allocBuffer (new HWmemExternal);
955 if (buffer == nullptr)
958 buffer->setDmabuf (first->dmabuf);
959 buffer->setOffset (first->offset);
960 buffer->setSize (meta->getBufferSize());
962 buffer = mem_->allocBuffer (new HWmemDevice);
963 if (buffer == nullptr)
966 int status = buffer->alloc (meta->getBufferSize ());
968 logerr (TAG, "Failed to allocate buffer: %d\n", status);
974 int status = buffer->createTensors (meta);
976 logerr (TAG, "Failed to create tensors: %d\n", status);
985 * @brief implementation of TRIV's setModel ()
986 * @param[in] model_buf the model generic buffer
987 * @param[out] model the model instance
988 * @return 0 if no error, otherwise a negative errno
991 TrinityVision::setModel (const generic_buffer *model_buf, Model ** model_ptr)
993 if (!initialized ()) {
994 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
998 if (model_buf == nullptr || model_ptr == nullptr)
1001 Model *model = nullptr;
1002 HWmem * hwmem_prog = nullptr;
1003 HWmem * hwmem_weight = nullptr;
1006 /** In TRIV1, model data (including program/weight) should be contiguous */
1008 switch (model_buf->type) {
1011 model = mem_->allocModel (new HWmemDevice);
1012 if (model == nullptr) {
1013 logerr (TAG, "Failed to allocate model\n");
1017 status = model->alloc (model_buf->size);
1019 logerr (TAG, "Failed to allocate model: %d\n", status);
1023 /** extract the whole model data */
1024 status = comm_.extractGenericBuffer (model_buf, model->getData(), nullptr);
1026 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1034 status = model->setMetadata (model->getData());
1038 /** allocate program (optional; NOP) */
1039 if (model->getMetadata()->getProgramSize() > 0) {
1040 hwmem_prog = new HWmem (new HWmemChunk);
1041 model->setProgramData (hwmem_prog);
1043 hwmem_prog->setParent (model);
1044 hwmem_prog->setOffset (model->getMetadata()->getMetaSize());
1045 status = hwmem_prog->alloc (model->getMetadata()->getProgramSize());
1047 logerr (TAG, "Failed to allocate program\n");
1052 /** allocate weight (optional) */
1053 if (model->getMetadata()->getWeightSize() > 0) {
1054 hwmem_weight = new HWmem (new HWmemChunk);
1055 model->setWeightData (hwmem_weight);
1057 hwmem_weight->setParent (model);
1058 hwmem_weight->setOffset (model->getMetadata()->getMetaSize() +
1059 model->getMetadata()->getProgramSize());
1060 status = hwmem_weight->alloc (model->getMetadata()->getWeightSize());
1062 logerr (TAG, "Failed to allocate program\n");
1067 if (hwmem_prog != nullptr) {
1068 /** register this model to the driver */
1069 model_config_t config;
1070 config.dbuf_fd = hwmem_prog->getDmabuf ();
1071 config.program_size = hwmem_prog->getSize ();
1072 config.program_offset_addr = hwmem_prog->getOffset ();
1073 if (hwmem_weight != nullptr)
1074 config.weight_offset_addr = hwmem_weight->getOffset ();
1076 status = api_->registerModel (&config);
1080 model->setInternalID(config.id);
1092 * @brief implementation of TRIV's unsetModel ()
1093 * @param[in] model the model instance
1094 * @return 0 if no error, otherwise a negative errno
1097 TrinityVision::unsetModel (Model * model)
1099 if (!initialized ()) {
1100 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1104 if (model == nullptr) {
1105 logerr (TAG, "Invalid model instance\n");
1109 if (model->getMetadata()->getProgramSize() > 0)
1110 return api_->deregisterModel (model->getInternalID ());
1116 * @brief implementation of TRIV's run()
1117 * @param[in] opmode input opmode
1118 * @param[in] model the model instance
1119 * @param[in] input generic buffers of input data
1120 * @param[in] cb the output callback
1121 * @param[in] cb_data the output callback data
1122 * @param[out] sequence The sequence number returned with runNPU_async.
1125 TrinityVision::run (npu_input_opmode opmode, const Model *model,
1126 const input_buffers *input, npuOutputNotify cb, void *cb_data,
1129 if (!initialized ()) {
1130 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1134 if (opmode != NPUINPUT_HOST) {
1135 logerr (TAG, "TRIV supports only host inputservice\n");
1139 if (model == nullptr || input == nullptr) {
1140 logerr (TAG, "TRIV requires both model and input buffers\n");
1144 Buffer *buffer = prepareInputBuffers (model->getMetadata(), input);
1145 if (buffer == nullptr) {
1146 logerr (TAG, "Failed to extract buffer instance\n");
1150 if (!buffer->isExternal ()) {
1151 for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
1152 auto func = std::bind (TrinityVision::manipulateData, model, idx, true,
1153 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1154 int status = comm_.extractGenericBuffer (&input->bufs[idx],
1155 buffer->getInputTensor(idx)->getData(), func);
1157 logerr (TAG, "Failed to feed input buffer: %d\n", status);
1163 /** this device uses CMA buffer */
1165 Request *req = new Request (opmode);
1166 req->setModel (model);
1167 req->setBuffer (buffer);
1170 req->setCallback (std::bind (&TrinityVision::callback, this, req, cb, cb_data));
1172 if (sequence != nullptr)
1173 *sequence = req->getID();
1175 return scheduler_->submitRequest (req);
1179 * @brief callback of TRIV2 request
1180 * @param[in] req the request instance
1181 * @param[in] cb callback for completion
1182 * @param[in] cb_data callback data
1183 * @note The callback invoke does not gurantee the request was successful
1184 * @todo Check the request failures
1187 TrinityVision::callback (Request *req, npuOutputNotify cb, void *cb_data)
1189 const Model *model = req->getModel ();
1190 Buffer *buffer = req->getBuffer ();
1191 output_buffers output = {
1192 .num_buffers = buffer->getOutputNum ()
1195 for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
1196 uint32_t output_tensor_size = model->getOutputTensorSize (idx);
1198 if (buffer->isExternal ()) {
1199 output.bufs[idx].type = BUFFER_DMABUF;
1200 output.bufs[idx].size = output_tensor_size;
1201 output.bufs[idx].addr = buffer->getOutputTensor(idx)->getData();
1203 output.bufs[idx].type = BUFFER_MAPPED;
1204 output.bufs[idx].size = output_tensor_size;
1205 /** user needs to free this */
1206 output.bufs[idx].addr = malloc (output_tensor_size);
1208 auto func = std::bind (TrinityVision::manipulateData, model, idx, false,
1209 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1210 int status = comm_.insertGenericBuffer (buffer->getOutputTensor(idx)->getData(),
1211 &output.bufs[idx], func);
1213 logerr (TAG, "Failed to return output buffer: %d\n", status);
1218 cb (&output, req->getID(), cb_data);
1224 * @brief extract the segment table instance from input generic buffers
1225 * @param[in] model the model instance
1226 * @param[in] input the input generic buffers
1227 * @return the segment table instance
1230 TrinityVision2::prepareSegmentTable (const Model *model, const input_buffers *input)
1232 if (model == nullptr || input == nullptr) {
1233 logerr (TAG, "Invalid arguments provided\n");
1237 const Metadata *meta = model->getMetadata ();
1238 if (meta == nullptr ||
1239 meta->getInputNum() != input->num_buffers) {
1240 logerr (TAG, "Invalid metadata info provided\n");
1244 SegmentTable * segt = mem_->allocSegmentTable (new HWmemDevice);
1245 int status = segt->alloc ();
1247 logerr (TAG, "Failed to allocate segment table: %d\n", status);
1251 status = segt->createSegments (model, input);
1253 logerr (TAG, "Failed to create segments: %d\n", status);
1265 * @brief implementation of TRIV2's setModel ()
1266 * @param[in] model_buf the model generic buffer
1267 * @param[out] model the model instance
1268 * @return 0 if no error, otherwise a negative errno
1271 TrinityVision2::setModel (const generic_buffer *model_buf, Model ** model_ptr)
1273 if (!initialized ()) {
1274 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1278 if (model_buf == nullptr || model_ptr == nullptr)
1284 switch (model_buf->type) {
1287 model = mem_->allocModel (new HWmemDevice);
1288 if (model == nullptr) {
1289 logerr (TAG, "Failed to allocate model\n");
1293 status = model->alloc (NPUBIN_META_SIZE);
1295 logerr (TAG, "Failed to allocate model: %d\n", status);
1299 status = comm_.extractGenericBuffer (model_buf, model->getData(), nullptr,
1300 0, NPUBIN_META_SIZE);
1302 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1310 status = model->setMetadata (model->getData());
1314 /** allocate program (optional; NOP) */
1315 if (model->getMetadata()->getProgramSize() > 0) {
1316 HWmem * hwmem_prog = new HWmem (new HWmemDevice);
1317 hwmem_prog->setDriverAPI (api_.get());
1319 model->setProgramData (hwmem_prog);
1321 status = hwmem_prog->alloc (model->getMetadata()->getProgramSize());
1323 logerr (TAG, "Failed to allocate program\n");
1327 status = comm_.extractGenericBuffer (model_buf, hwmem_prog->getData(), nullptr,
1328 model->getMetadata()->getMetaSize(),
1329 model->getMetadata()->getProgramSize());
1331 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1335 /** register this model to the driver */
1336 model_config_t config;
1337 config.dbuf_fd = hwmem_prog->getDmabuf ();
1338 config.program_size = hwmem_prog->getSize ();
1339 config.program_offset_addr = 0;
1341 status = api_->registerModel (&config);
1345 model->setInternalID(config.id);
1348 /** allocate weight (optional) */
1349 if (model->getMetadata()->getWeightSize() > 0) {
1350 HWmem * hwmem_weight = new HWmem (new HWmemDevice);
1351 hwmem_weight->setDriverAPI (api_.get());
1353 model->setWeightData (hwmem_weight);
1355 status = hwmem_weight->alloc (model->getMetadata()->getWeightSize());
1357 logerr (TAG, "Failed to allocate program\n");
1361 status = comm_.extractGenericBuffer (model_buf, hwmem_weight->getData(), nullptr,
1362 model->getMetadata()->getMetaSize() + model->getMetadata()->getProgramSize(),
1363 model->getMetadata()->getWeightSize());
1365 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1379 * @brief implementation of TRIV2's unsetModel ()
1380 * @param[in] model the model instance
1381 * @return 0 if no error, otherwise a negative errno
1384 TrinityVision2::unsetModel (Model * model)
1386 if (!initialized ()) {
1387 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1391 if (model == nullptr) {
1392 logerr (TAG, "Invalid model instance\n");
1396 if (model->getMetadata()->getProgramSize() > 0)
1397 return api_->deregisterModel (model->getInternalID ());
1402 /** @brief implementation of TRIV2's run() */
1404 TrinityVision2::run (npu_input_opmode opmode, const Model *model,
1405 const input_buffers *input, npuOutputNotify cb, void *cb_data,
1408 if (!initialized ()) {
1409 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1413 if (opmode != NPUINPUT_HOST && opmode != NPUINPUT_HW_RECURRING)
1416 /** this device uses segment table */
1417 SegmentTable * segt = prepareSegmentTable (model, input);
1418 if (segt == nullptr) {
1419 logerr (TAG, "Failed to create segment table instance\n");
1423 /** extract input data */
1424 for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
1425 if (!segt->getInputSegment(idx)->isExternal ()) {
1426 auto func = std::bind (TrinityVision2::manipulateData, model, idx, true,
1427 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1428 int status = comm_.extractGenericBuffer (
1430 segt->getInputSegment(idx)->getData() + segt->getInputSegmentOffset(idx),
1433 logerr (TAG, "Failed to feed input segment: %d\n", status);
1439 Request *req = new Request (opmode);
1440 req->setModel (model);
1441 req->setSegmentTable (segt);
1442 req->setCallback (std::bind (&TrinityVision2::callback, this, req, cb, cb_data));
1445 *sequence = req->getID();
1447 return scheduler_->submitRequest (req);
1450 /** @brief callback of TRIV2 request */
1452 TrinityVision2::callback (Request *req, npuOutputNotify cb, void *cb_data)
1454 const Model *model = req->getModel ();
1455 SegmentTable *segt = req->getSegmentTable ();
1456 output_buffers output = {
1457 .num_buffers = segt->getNumOutputSegments ()
1460 for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
1461 uint32_t output_tensor_size = model->getOutputTensorSize (idx);
1463 output.bufs[idx].type = BUFFER_MAPPED;
1464 output.bufs[idx].size = output_tensor_size;
1465 /** user needs to free this */
1466 output.bufs[idx].addr = malloc (output_tensor_size);
1468 auto func = std::bind (TrinityVision2::manipulateData, model, idx, false,
1469 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1470 int status = comm_.insertGenericBuffer (
1471 segt->getOutputSegment(idx)->getData() + segt->getOutputSegmentOffset(idx),
1472 &output.bufs[idx], func);
1474 logerr (TAG, "Failed to return output buffer: %d\n", status);
1478 cb (&output, req->getID(), cb_data);
1483 /** @brief implementation of TRIA's run(): WIP */
1485 TrinityAsr::run (npu_input_opmode opmode, const Model *model,
1486 const input_buffers *input, npuOutputNotify cb, void *cb_data,
1489 if (!initialized ()) {
1490 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1494 if (opmode != NPUINPUT_HOST)
1499 /** ASR does not require model and support only a single tensor */
1500 const generic_buffer *first_buf = &input->bufs[0];
1501 if (first_buf->type == BUFFER_DMABUF) {
1502 buffer = mem_->allocBuffer (new HWmemExternal);
1503 if (buffer == nullptr)
1506 buffer->setDmabuf (first_buf->dmabuf);
1507 buffer->setOffset (first_buf->offset);
1508 buffer->setSize (first_buf->size);
1510 buffer = mem_->allocBuffer (new HWmemDevice);
1511 if (buffer == nullptr)
1514 status = buffer->alloc (first_buf->size);
1521 status = buffer->createTensors ();
1523 logerr (TAG, "Failed to create tensors: %d\n", status);
1528 if (!buffer->isExternal ()) {
1529 status = comm_.extractGenericBuffer (first_buf,
1530 buffer->getInputTensor(0)->getData(), nullptr);
1535 Request *req = new Request (opmode);
1536 req->setBuffer (buffer);
1537 req->setCallback (std::bind (&TrinityAsr::callback, this, req, cb, cb_data));
1540 *sequence = req->getID();
1542 return scheduler_->submitRequest (req);
1545 /** @brief callback of TRIA request: WIP */
1547 TrinityAsr::callback (Request *req, npuOutputNotify cb, void *cb_data)
1551 /** Implement data manipulation (each device may have different impl.) */
1555 #define do_quantized_memcpy(type) do {\
1558 while (idx < num_elems) {\
1559 val = ((type *) src)[idx];\
1560 val = val / _scale;\
1561 val += _zero_point;\
1562 val = (val > 255.0) ? 255.0 : 0.0;\
1563 ((uint8_t *) dst)[idx++] = (uint8_t) val;\
1566 while (idx < num_elems) {\
1567 val = *(uint8_t *) src;\
1568 val -= _zero_point;\
1570 ((type *) dst)[idx++] = (type) val;\
1571 dst = (void*)(((uint8_t *) dst) + data_size);\
1572 src = (void*)(((uint8_t *) src) + 1);\
1578 * @brief memcpy during quantization
1580 static void memcpy_with_quant (bool quant, data_type type, float scale, uint32_t zero_point,
1581 void *dst, const void *src, uint32_t num_elems)
1583 double _scale = (double) scale;
1584 double _zero_point = (double) zero_point;
1586 uint32_t data_size = get_data_size (type);
1590 case DATA_TYPE_INT8:
1591 do_quantized_memcpy (int8_t);
1593 case DATA_TYPE_UINT8:
1594 do_quantized_memcpy (uint8_t);
1596 case DATA_TYPE_INT16:
1597 do_quantized_memcpy (int16_t);
1599 case DATA_TYPE_UINT16:
1600 do_quantized_memcpy (uint16_t);
1602 case DATA_TYPE_INT32:
1603 do_quantized_memcpy (int32_t);
1605 case DATA_TYPE_UINT32:
1606 do_quantized_memcpy (uint32_t);
1608 case DATA_TYPE_INT64:
1609 do_quantized_memcpy (int64_t);
1611 case DATA_TYPE_UINT64:
1612 do_quantized_memcpy (uint64_t);
1614 case DATA_TYPE_FLOAT32:
1615 do_quantized_memcpy (float);
1617 case DATA_TYPE_FLOAT64:
1618 do_quantized_memcpy (double);
1621 logerr (TAG, "Unsupported datatype %d\n", type);
1626 * @brief perform data manipulation
1627 * @param[in] model model instance
1628 * @param[in] idx tensor index
1629 * @param[in] is_input indicate it's input manipulation
1630 * @param[out] dst destination buffer
1631 * @param[in] src source buffer (feature map)
1632 * @param[in] size size to be copied
1633 * @return size of memory copy if no error, otherwise zero
1635 * @note the input data format should be NHWC
1636 * @detail rules for the memory address of activations in NPU HW.
1637 * (https://code.sec.samsung.net/confluence/pages/viewpage.action?pageId=146491864)
1639 * 1) Special case (depth == 3)
1640 * - addr(x,y,z) = addr(0,0,0) + (z) + 3 * (x + width * y)
1643 * - addr(x,y,z) = addr(0,0,0) + (z % MPA_L) + MPA_L * (x + width * (y + height * (z / MPA_L)))
1645 * Thus, if depth is not a multiple of MPA_L (i.e., 64), zero padding is required
1648 TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
1649 void *dst, void *src, size_t size)
1651 const Metadata *meta = model->getMetadata();
1652 const tensor_data_info* info;
1653 const uint32_t *dims;
1654 uint32_t zero_point;
1657 /** extract required information from the metadata */
1659 if (idx >= meta->getInputNum()) {
1660 logerr (TAG, "Wrong information for input tensors in metadata\n");
1664 info = model->getInputDataInfo (idx);
1665 dims = meta->getInputDims (idx);
1666 zero_point = meta->getInputQuantZero (idx);
1667 scale = meta->getInputQuantScale (idx);
1669 if (idx >= meta->getOutputNum()) {
1670 logerr (TAG, "Wrong information for output tensors in metadata\n");
1674 info = model->getOutputDataInfo (idx);
1675 dims = meta->getOutputDims (idx);
1676 zero_point = meta->getOutputQuantZero (idx);
1677 scale = meta->getOutputQuantScale (idx);
1680 if (info == nullptr) {
1681 logerr (TAG, "Unmatched tensors info\n");
1685 uint32_t batch = dims[0];
1686 uint32_t height = dims[1];
1687 uint32_t width = dims[2];
1688 uint32_t depth = dims[3];
1690 uint32_t data_size = get_data_size (info->type);
1691 if (data_size == 0) {
1692 logerr (TAG, "Invalid data size\n");
1696 bool need_quantization = false;
1698 * note that we assume DATA_TYPE_SRNPU is the smallest data type that we consider.
1699 * Also, DATA_TYPE_SRNPU and uint8_t may be regarded as the same in the view of apps.
1701 if (info->type != DATA_TYPE_SRNPU) {
1702 assert (data_size >= get_data_size (DATA_TYPE_SRNPU));
1704 if (data_size > get_data_size (DATA_TYPE_SRNPU) ||
1705 !(zero_point == default_quant_zero && scale == default_quant_scale))
1706 need_quantization = true;
1709 /** check data manipulation is required */
1710 if (depth != 3 && depth != 64 && info->layout != DATA_LAYOUT_SRNPU) {
1711 uint32_t MPA_L = DATA_GRANULARITY;
1712 uint32_t n, h, w, d;
1713 uint32_t std_offset; /* standard offset in NHWC data format */
1714 uint32_t npu_offset; /* npu offset in NPU HW data format*/
1715 uint32_t src_offset;
1716 uint32_t dst_offset;
1717 uint32_t slice_size;
1719 /* @todo we currently support only NHWC */
1720 if (info->layout != DATA_LAYOUT_NHWC) {
1721 logerr (TAG, "data manipulation is supported for NHWC only\n");
1725 for (n = 0; n < batch; n++) {
1726 for (h = 0; h < height; h++) {
1727 for (w = 0; w < width; w++) {
1728 for (d = 0; d < depth; d += MPA_L) {
1729 std_offset = d + depth * (w + width * (h + n * height));
1730 npu_offset = MPA_L * (w + width * (h + (n + d / MPA_L) * height));
1731 slice_size = (depth - d >= MPA_L) ? MPA_L : depth - d;
1734 src_offset = std_offset * data_size;
1735 dst_offset = npu_offset;
1737 src_offset = npu_offset;
1738 dst_offset = std_offset * data_size;
1741 /* if depth is not a multiple of MPA_L, add zero paddings (not exact values) */
1742 if (need_quantization) {
1743 memcpy_with_quant (is_input, info->type, scale, zero_point,
1744 static_cast<char*>(dst) + dst_offset,
1745 static_cast<char*>(src) + src_offset,
1749 static_cast<char*>(dst) + dst_offset,
1750 static_cast<char*>(src) + src_offset,
1757 } else if (need_quantization) {
1758 /** depth == 3 || depth == 64; special cases which can directly copy input tensor data */
1759 memcpy_with_quant (is_input, info->type, scale, zero_point,
1760 dst, src, is_input ? size / data_size : size);
1762 memcpy (dst, src, size);
1771 TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
1772 void *dst, void *src, size_t size)
1774 memcpy (dst, src, size);
1780 /** other device types don't have data manip impl. yet */
1783 TrinityVision2::manipulateData (const Model *model, uint32_t idx, bool is_input,
1784 void *dst, void *src, size_t size)
1786 memcpy (dst, src, size);
1791 TrinityAsr::manipulateData (const Model *model, uint32_t idx, bool is_input,
1792 void *dst, void *src, size_t size)
1794 memcpy (dst, src, size);