3 * Copyright (C) 2020 Samsung Electronics
4 * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
7 * @file ne-host-handler.cc
9 * @brief Implementation of APIs to access NPU from Host
10 * @see https://code.sec.samsung.net/confluence/display/ODLC/2020+Overall+Software+Stack
11 * @author Dongju Chae <dongju.chae@samsung.com>
12 * @bug No known bugs except for NYI items
15 #include "ne-handler.h"
18 #include <libnpuhost.h>
19 #include <npubinfmt.h>
20 #include <NPUdrvAPI.h>
21 #include <CommPlugin.h>
26 #include <condition_variable>
33 #define INIT_HOST_HANDLER(handler, dev) \
34 Device *tdev = static_cast <Device *> (dev); \
35 if (tdev == nullptr) return -EINVAL; \
36 HostHandler *handler = tdev->getHostHandler (); \
37 if (handler == nullptr) return -EINVAL;
39 /** just for backward-compatability */
40 npudev_h HostHandler::latest_dev_ = nullptr;
42 /** implement libnpuhost APIs */
45 * @brief Returns the number of available NPU devices.
46 * @return @c The number of NPU devices.
47 * @retval 0 if no NPU devices available. if positive (number of NPUs) if NPU devices available. otherwise, a negative error value.
48 * @note the caller should call putNPUdevice() to release the device handle
50 int getnumNPUdeviceByType (dev_type type)
52 return HostHandler::getNumDevices (type);
56 * @brief Returns the handle of the chosen NPU devices.
57 * @param[out] dev The NPU device handle
58 * @param[in] id The NPU id to get the handle. 0 <= id < getnumNPUdeviceByType().
59 * @return @c 0 if no error. otherwise a negative error value
60 * @note the caller should call putNPUdevice() to release the device handle
62 int getNPUdeviceByType (npudev_h *dev, dev_type type, uint32_t id)
64 return HostHandler::getDevice (dev, type, id);
68 * @brief release the NPU device instance obtained by getDevice ()
69 * @param[in] dev the NPU device handle
71 void putNPUdevice (npudev_h dev)
74 delete static_cast<Device *> (dev);
78 * @brief Send the NN model to NPU.
79 * @param[in] dev The NPU device handle
80 * @param[in] modelfile The filepath to the compiled NPU NN model in any buffer_type
81 * @param[out] modelid The modelid allocated for this instance of NN model.
82 * @return @c 0 if no error. otherwise a negative error value
84 * @detail For ASR devices, which do not accept models, but have models
85 * embedded in devices, you do not need to call register and
86 * register calls for ASR are ignored.
88 * @todo Add a variation: in-memory model register.
90 int registerNPUmodel (npudev_h dev, generic_buffer *modelfile, uint32_t *modelid)
92 INIT_HOST_HANDLER (host_handler, dev);
94 return host_handler->registerModel (modelfile, modelid);
98 * @brief Remove the NN model from NPU
99 * @param[in] dev The NPU device handle
100 * @param[in] modelid The model to be removed from the NPU.
101 * @return @c 0 if no error. otherwise a negative error value
102 * @detail This may incur some latency with memory compatcion.
104 int unregisterNPUmodel(npudev_h dev, uint32_t modelid)
106 INIT_HOST_HANDLER (host_handler, dev);
108 return host_handler->unregisterModel (modelid);
112 * @brief Remove all NN models from NPU
113 * @param[in] dev The NPU device handle
114 * @return @c 0 if no error. otherwise a negative error value
116 int unregisterNPUmodel_all(npudev_h dev)
118 INIT_HOST_HANDLER (host_handler, dev);
120 return host_handler->unregisterModels ();
124 * @brief [OPTIONAL] Set the data layout for input/output tensors
125 * @param[in] dev The NPU device handle
126 * @param[in] modelid The ID of model whose layouts are set
127 * @param[in] info_in the layout/type info for input tensors
128 * @param[in] info_out the layout/type info for output tensors
129 * @return @c 0 if no error. otherwise a negative error value
130 * @note if this function is not called, default layout/type will be used.
132 int setNPU_dataInfo(npudev_h dev, uint32_t modelid,
133 tensors_data_info *info_in, tensors_data_info *info_out)
135 INIT_HOST_HANDLER (host_handler, dev);
137 return host_handler->setDataInfo (modelid, info_in, info_out);
141 * @brief [OPTIONAL] Set the inference constraint for next NPU inferences
142 * @param[in] dev The NPU device handle
143 * @param[in] modelid The target model id
144 * @param[in] constraint inference constraint (e.g., timeout, priority)
145 * @return @c 0 if no error. otherwise a negative error value
146 * @note If this function is not called, default values are used.
148 int setNPU_constraint(npudev_h dev, uint32_t modelid, npuConstraint constraint)
150 INIT_HOST_HANDLER (host_handler, dev);
152 return host_handler->setConstraint (modelid, constraint);
156 * @brief Execute inference. Wait (block) until the output is available.
157 * @param[in] dev The NPU device handle
158 * @param[in] modelid The model to be inferred.
159 * @param[in] input The input data to be inferred.
160 * @param[out] output The output result. The caller MUST allocate appropriately before calling this.
161 * @return @c 0 if no error. otherwise a negative error value
163 * @detail This is a syntactic sugar of runNPU_async().
164 * CAUTION: There is a memcpy for the output buffer.
166 int runNPU_sync(npudev_h dev, uint32_t modelid, const input_buffers *input,
167 output_buffers *output)
169 INIT_HOST_HANDLER (host_handler, dev);
171 return host_handler->runSync (modelid, input, output);
175 * @brief Invoke NPU inference. Unblocking call.
176 * @param[in] dev The NPU device handle
177 * @param[in] modelid The model to be inferred.
178 * @param[in] input The input data to be inferred.
179 * @param[in] cb The output buffer handler.
180 * @param[out] sequence The sequence number returned with runNPU_async.
181 * @param[in] data The data given as a parameter to the runNPU_async call.
182 * @param[in] mode Configures how this operation works.
183 * @return @c 0 if no error. otherwise a negative error value
185 int runNPU_async(npudev_h dev, uint32_t modelid, const input_buffers *input,
186 npuOutputNotify cb, uint64_t *sequence, void *data,
189 INIT_HOST_HANDLER (host_handler, dev);
191 return host_handler->runAsync (modelid, input, cb, data, mode, sequence);
195 * @brief Let NPU accept input frames from its internal source continuously
196 * @param[in] dev The NPU device handle
197 * @param[in] modelid The model to be inferred.
198 * @param[in] opmode NPU has different opmode with auto-inputs. Choose one.
199 * @param[in] hw_dev The target device feeding input data
200 * @return @c 0 if no error. otherwise a negative error value
202 int runNPU_internalInput(npudev_h dev, uint32_t modelid, npu_input_opmode opmode,
205 INIT_HOST_HANDLER (host_handler, dev);
207 return host_handler->runInternal (modelid, opmode, hw_dev);
211 * @brief Stop the request with the given id
212 * @param[in] dev The NPU device handle
213 * @param[in] id The request id
214 * @return @c 0 if no error. otherwise a negative error value
216 int stopNPU_internalInput(npudev_h dev, int id)
218 INIT_HOST_HANDLER (host_handler, dev);
220 return host_handler->stopInternal (id);
224 * @brief Allocate a generic buffer with the requested buffer type.
225 * @param[in] dev The NPU device handle
226 * @param[in/out] Buffer the buffer pointer where memory is allocated.
227 * @return 0 if no error, otherwise a negative errno.
229 int allocNPU_genericBuffer (npudev_h dev, generic_buffer * buffer)
231 INIT_HOST_HANDLER (host_handler, dev);
233 return host_handler->allocGenericBuffer (buffer);
237 * @brief Free the generic buffer and remove the address mapping
238 * @param[in] dev The NPU device handle
239 * @param[in] buffer the model buffer
240 * @return 0 if no error, otherwise a negative errno.
242 int cleanNPU_genericBuffer (npudev_h dev, generic_buffer * buffer)
244 INIT_HOST_HANDLER (host_handler, dev);
246 return host_handler->deallocGenericBuffer (buffer);
250 * @brief Allocate generic buffers, which have multiple instances of generic_buffer
251 * @param[in] dev The NPU device handle
252 * @param[in/out] buffers generic buffers.
253 * @return 0 if no error, otherwise a negative errno.
254 * @note it reuses allocGenericBuffer().
256 int allocNPU_genericBuffers (npudev_h dev, generic_buffers * buffers)
258 INIT_HOST_HANDLER (host_handler, dev);
260 return host_handler->allocGenericBuffer (buffers);
264 * @brief Free generic buffers allocated by allocGenericBuffers().
265 * @param[in] dev The NPU device handle
266 * @param[in/out] buffers generic buffers.
267 * @note it reuses cleanGenericbuffer().
268 * @return 0 if no error, otherwise a negative errno.
270 int cleanNPU_genericBuffers (npudev_h dev, generic_buffers * buffers)
272 INIT_HOST_HANDLER (host_handler, dev);
274 return host_handler->deallocGenericBuffer (buffers);
278 * @brief alias of allocNPU_genericBuffer for model buffer
280 int allocNPU_modelBuffer (npudev_h dev, generic_buffer * model)
282 return allocNPU_genericBuffer (dev, model);
286 * @brief alias of cleanNPU_genericBuffer for model buffer
288 int cleanNPU_modelBuffer (npudev_h dev, generic_buffer * model)
290 return cleanNPU_genericBuffer (dev, model);
294 * @brief alias of allocNPU_genericBuffer for input buffer
296 int allocNPU_inputBuffer (npudev_h dev, generic_buffer * input)
298 return allocNPU_genericBuffer (dev, input);
302 * @brief alias of cleanNPU_genericBuffer for input buffer
304 int cleanNPU_inputBuffer (npudev_h dev, generic_buffer * input)
306 return cleanNPU_genericBuffer (dev, input);
310 * @brief alias of allocNPU_genericBuffers for input buffers
312 int allocNPU_inputBuffers (npudev_h dev, input_buffers * input)
314 return allocNPU_genericBuffers (dev, input);
318 * @brief alias of cleanNPU_genericBuffers for input buffers
320 int cleanNPU_inputBuffers (npudev_h dev, input_buffers * input)
322 return cleanNPU_genericBuffers (dev, input);
326 * @brief get the current memory status for the given device
327 * @param[in] dev The NPU device handle
328 * @param[out] alloc_total The size of allocated memory until now
329 * @param[out] free_total The size of freed memory until now
330 * @return @c 0 if no error. otherwise a negatice error value
332 int getNPU_memoryStatus(npudev_h dev, size_t *alloc_total, size_t *free_total)
334 INIT_HOST_HANDLER (host_handler, dev);
336 return host_handler->getMemoryStatus (alloc_total, free_total);
340 * @brief Get the current device status to be used
341 * @param[in] dev The NPU device handle
342 * @param[out] status the device status
343 * @param[out] num_requests the number of running requests (or pending)
344 * @return 0 if no error, otherwise a negative errno.
346 int getNPU_deviceStatus(npudev_h dev, npu_status *status, uint32_t *num_requests)
348 INIT_HOST_HANDLER (host_handler, dev);
350 return host_handler->getDeviceStatus (status, num_requests);
354 * @brief Get metadata for NPU model
355 * @param[in] model The path of model binary file
356 * @param[in] need_extra whether you want to extract the extra data in metadata
357 * @return the metadata structure to be filled if no error, otherwise nullptr
359 * @note For most npu-engine users, the extra data is not useful because it will be
360 * used for second-party users (e.g., compiler, simulator).
361 * Also, the caller needs to free the metadata.
363 * @note the caller needs to free the metadata
365 npubin_meta * getNPUmodel_metadata (const char *model, bool need_extra)
374 fp = fopen (model, "rb");
376 logerr (TAG, "Failed to open the model binary: %d\n", -errno);
380 meta = (npubin_meta *) malloc (NPUBIN_META_SIZE);
382 logerr (TAG, "Failed to allocate metadata\n");
386 ret = fread (meta, 1, NPUBIN_META_SIZE, fp);
387 if (ret != NPUBIN_META_SIZE) {
388 logerr (TAG, "Failed to read the metadata\n");
392 if (!CHECK_NPUBIN (meta->magiccode)) {
393 logerr (TAG, "Invalid metadata provided\n");
397 if (need_extra && NPUBIN_META_EXTRA (meta->magiccode) > 0) {
398 npubin_meta *new_meta;
400 new_meta = (npubin_meta *) malloc (NPUBIN_META_TOTAL_SIZE(meta->magiccode));
402 logerr (TAG, "Failed to allocate extra metadata\n");
406 memcpy (new_meta, meta, NPUBIN_META_TOTAL_SIZE(meta->magiccode));
409 ret = fread (new_meta->reserved_extra, 1, NPUBIN_META_EXTRA_SIZE (new_meta->magiccode), fp);
410 if (ret != NPUBIN_META_EXTRA_SIZE (new_meta->magiccode)) {
411 logerr (TAG, "Invalid extra metadata provided\n");
431 /** implement methods of HostHandler class */
433 /** @brief host handler constructor */
434 HostHandler::HostHandler (Device *device)
436 /* ignored as we don't use double buffering anymore, but for backward-compatibility */
437 async_mode_ (NPUASYNC_WAIT)
441 /** @brief host handler destructor */
442 HostHandler::~HostHandler ()
447 * @brief register model from generic buffer
448 * @param[in] model_buf model buffer
449 * @param[out] modelid model id
450 * @return 0 if no error. otherwise a negative errno
453 HostHandler::registerModel (generic_buffer *model_buf, uint32_t *modelid)
455 if (model_buf == nullptr || modelid == nullptr) {
456 logerr (TAG, "Invalid arguments given\n");
460 Model *model = nullptr;
461 int status = device_->setModel (model_buf, &model);
463 logerr (TAG, "Failed to set model: %d\n", status);
467 assert (model != nullptr);
469 status = models_.insert (model->getID(), model);
471 logerr (TAG, "Failed to insert model id\n");
476 *modelid = model->getID();
481 * @brief remove the registered model
482 * @param[in] modelid model id
483 * @return 0 if no error. otherwise a negative errno
486 HostHandler::unregisterModel (uint32_t modelid)
488 Model *model = models_.find (modelid);
489 if (model == nullptr)
492 int status = device_->unsetModel (model);
494 logerr (TAG, "Failed to unset model: %d\n", status);
498 return models_.remove (modelid);
502 * @brief remove all registered models
506 HostHandler::unregisterModels ()
513 * @brief Set the data layout for input/output tensors
514 * @param[in] modelid The ID of model whose layouts are set
515 * @param[in] in the layout/type info for input tensors
516 * @param[in] out the layout/type info for output tensors
517 * @return @c 0 if no error. otherwise a negative error value
518 * @note if this function is not called, default layout/type will be used.
521 HostHandler::setDataInfo (uint32_t modelid, tensors_data_info *in,
522 tensors_data_info *out)
524 Model *model = models_.find (modelid);
525 if (model == nullptr)
528 return model->setDataInfo (in, out);
532 * @brief Set the inference constraint for next NPU inferences
533 * @param[in] modelid The target model id
534 * @param[in] constraint inference constraint (e.g., timeout, priority)
535 * @return @c 0 if no error. otherwise a negative error value
536 * @note If this function is not called, default values are used.
539 HostHandler::setConstraint (uint32_t modelid, npuConstraint constraint)
541 Model *model = models_.find (modelid);
542 if (model == nullptr)
545 model->setConstraint (constraint);
551 * @brief find and return model instance
552 * @param[in] modelid model id
553 * @return model instance if found. otherwise nullptr
556 HostHandler::getModel (uint32_t modelid)
558 return models_.find (modelid);
561 /** @brief dummay callback for runSync. */
564 callbackSync (output_buffers *output) : output_(output), done_(false) {}
566 static void callback (output_buffers *output, uint64_t sequence, void *data) {
567 callbackSync *sync = static_cast<callbackSync *>(data);
568 sync->callback (output, sequence);
571 void callback (output_buffers *output, uint64_t sequence) {
572 if (output_ != nullptr) {
573 /** just copy internal variables of output buffers */
574 memcpy (output_, output, sizeof (output_buffers));
581 std::unique_lock<std::mutex> lock (m_);
582 cv_.wait (lock, [this]() { return done_; });
587 std::condition_variable cv_;
588 output_buffers *output_;
593 * @brief Execute inference. Wait (block) until the output is available.
594 * @param[in] modelid The model to be inferred.
595 * @param[in] input The input data to be inferred.
596 * @param[out] output The output result.
597 * @return @c 0 if no error. otherwise a negative error value
600 HostHandler::runSync (uint32_t modelid, const input_buffers *input,
601 output_buffers *output)
603 callbackSync sync (output);
604 int status = runAsync (modelid, input, callbackSync::callback,
605 static_cast <void*> (&sync), NPUASYNC_DROP_OLD, nullptr);
607 /** sync needs to wait callback */
614 * @brief Invoke NPU inference. Unblocking call.
615 * @param[in] modelid The model to be inferred.
616 * @param[in] input The input data to be inferred.
617 * @param[in] cb The output buffer handler.
618 * @param[in] cb_data The data given as a parameter to the runNPU_async call.
619 * @param[in] mode Configures how this operation works.
620 * @param[out] sequence The sequence number returned with runNPU_async.
621 * @return @c 0 if no error. otherwise a negative error value
624 HostHandler::runAsync (uint32_t modelid, const input_buffers *input,
625 npuOutputNotify cb, void *cb_data, npu_async_mode mode, uint64_t *sequence)
627 Model *model = nullptr;
629 if (device_->needModel()) {
630 model = getModel (modelid);
631 if (model == nullptr)
635 /* check the given model before running */
636 if (model != nullptr && !model->finalize ()) {
637 logerr (TAG, "Failed to finalize the model. Please see the log messages\n");
641 device_->setAsyncMode (mode);
642 return device_->run (NPUINPUT_HOST, model, input, cb, cb_data, sequence);
646 * @brief Let NPU accept input frames from its internal source continuously
647 * @param[in] modelid The model to be inferred.
648 * @param[in] opmode NPU has different opmode with auto-inputs. Choose one.
649 * @param[in] hw_dev The target device feeding input data
650 * @return @c 0 if no error. otherwise a negative error value
653 HostHandler::runInternal (uint32_t modelid, npu_input_opmode opmode,
656 Model *model = nullptr;
658 if (device_->needModel()) {
659 model = getModel (modelid);
660 if (model == nullptr)
664 /* check the given model before running */
665 if (model != nullptr && !model->finalize ()) {
666 logerr (TAG, "Failed to finalize the model. Please see the log messages\n");
670 return device_->runInternal (opmode, model, hw_dev);
674 * @brief Stop the request with the given id
675 * @param[in] dev The NPU device handle
676 * @param[in] id The request id
677 * @return @c 0 if no error. otherwise a negative error value
680 HostHandler::stopInternal (int id)
683 logerr (TAG, "Unable to stop this request with id (%d)\n", id);
687 const DriverAPI * api = device_->getDriverAPI ();
688 assert (api != nullptr);
690 return api->stop_target (id);
694 * @brief get number of available devices
695 * @param[in] type device type
696 * @return number of devices
699 HostHandler::getNumDevices (dev_type type)
701 return DriverAPI::getNumDevices (type);
705 * @brief get device instance
706 * @param[out] dev device instance
707 * @param[in] type device type
708 * @param[in] id device id
709 * @return 0 if no error. otherwise a negative errno
712 HostHandler::getDevice (npudev_h *dev, dev_type type, uint32_t id)
714 int num_devices = getNumDevices (type);
716 /** check the validity of device id */
717 if (!(num_devices > 0 && id < static_cast<uint32_t>(num_devices))) {
718 logerr (TAG, "Invalid arguments provided\n");
722 Device *device = Device::createInstance (type, id);
723 if (device == nullptr) {
724 logerr (TAG, "Failed to create a device with the given type\n");
729 /** This is just for backward-compatility; we don't guarantee its corresness */
736 * @brief allocate generic buffer (just for users)
737 * @param[out] buffer buffer instance
738 * @return 0 if no error. otherwise a negative errno
741 HostHandler::allocGenericBuffer (generic_buffer *buffer)
746 if (buffer->size == 0) {
747 logerr (TAG, "Invalid size\n");
751 if (buffer->size > UINT32_MAX) {
752 logerr (TAG, "Don't support such a large size");
756 switch (buffer->type) {
759 if (buffer->filepath == nullptr)
764 /* now, npu-engine always provides dmabuf-based allocation */
765 void *addr = nullptr;
766 int dmabuf = device_->allocMemory (buffer->size, &addr);
770 buffer->dmabuf = dmabuf;
782 * @brief deallocate generic buffer (just for users)
783 * @param[in] buffer buffer instance
784 * @return 0 if no error. otherwise a negative errno
787 HostHandler::deallocGenericBuffer (generic_buffer *buffer)
792 switch (buffer->type) {
794 /** always true cuz nothing to do */
797 return device_->deallocMemory (buffer->dmabuf, buffer->size, buffer->addr);
806 * @brief allocate multiple generic buffers (just for users)
807 * @param[out] buffers multi-buffer instance
808 * @return 0 if no error. otherwise a negative errno
811 HostHandler::allocGenericBuffer (generic_buffers *buffers)
816 if (buffers == NULL || buffers->num_buffers < 1)
819 for (idx = 0; idx < buffers->num_buffers; idx++) {
820 status = allocGenericBuffer (&buffers->bufs[idx]);
829 deallocGenericBuffer (&buffers->bufs[--idx]);
836 * @brief deallocate multiple generic buffers (just for users)
837 * @param[in] buffers multi-buffer instance
838 * @return 0 if no error. otherwise a negative errno
841 HostHandler::deallocGenericBuffer (generic_buffers *buffers)
843 if (buffers == NULL || buffers->num_buffers < 1)
846 for (uint32_t idx = 0; idx < buffers->num_buffers; idx++)
847 deallocGenericBuffer (&buffers->bufs[idx]);
848 buffers->num_buffers = 0;
854 * @brief get the current memory status
855 * @param[out] alloc_total The size of allocated memory until now
856 * @param[out] free_total The size of freed memory until now
857 * @return 0 if no error. otherwise a negatice error value
860 HostHandler::getMemoryStatus (size_t *alloc_total, size_t *free_total)
862 /** API is always set in initialize () */
863 const DriverAPI * api = device_->getDriverAPI ();
864 assert (api != nullptr);
866 return api->getMemoryStatus (alloc_total, free_total);
870 * @brief Get the current device status to be used
871 * @param[out] status the device status
872 * @param[out] num_requests the number of running requests (or pending)
873 * @return 0 if no error, otherwise a negative errno.
876 HostHandler::getDeviceStatus (npu_status *status, uint32_t *num_requests)
878 /** API is always set in initialize () */
879 const DriverAPI * api = device_->getDriverAPI ();
884 device_state_t state = api->isReady ();
885 if (state == device_state_t::STATE_READY) {
886 *num_requests = api->numRequests ();
887 if (*num_requests > 0)
899 /** implement methods of Device class */
901 /** @brief constructor of device */
902 Device::Device (dev_type type, int id, bool need_model)
903 : comm_ (CommPlugin::getCommPlugin()), type_ (type), id_ (id), need_model_ (true),
904 mode_ (NPUASYNC_WAIT), initialized_ (false), atomic_flag_ (ATOMIC_FLAG_INIT)
909 * @brief create device instance depending on device type and id
910 * @param[in] type device type
911 * @param[in] id device id
912 * @return device instance
915 Device::createInstance (dev_type type, int id)
917 Device *device = nullptr;
919 switch (type & DEVICETYPE_MASK) {
920 case DEVICETYPE_TRIV:
921 device = new TrinityVision (id);
923 case DEVICETYPE_TRIV2:
924 device = new TrinityVision2 (id);
926 case DEVICETYPE_TRIA:
927 device = new TrinityAsr (id);
928 device->setNeedModel (false);
934 if (device != nullptr && device->init () != 0) {
943 * @brief device initialization
944 * @return 0 if no error, otherwise a negative errno
945 * @note Init failures come from createDriverAPI() only.
950 /** should be initilizaed only once */
951 if (!atomic_flag_.test_and_set()) {
952 /** create the corresponding driver API */
953 api_ = DriverAPI::createDriverAPI (type_, id_);
954 if (api_.get() == nullptr) {
955 atomic_flag_.clear();
956 logerr (TAG, "Failed to create driver API\n");
960 handler_.reset (new HostHandler (this));
961 scheduler_.reset (new Scheduler (api_.get()));
962 mem_ = MemAllocator::createInstance (api_.get());
964 initialized_ = true; /** c++11 does not provide test() of atomic flag */
971 * @brief stop all requests from this device
972 * @param[in] force_stop indicate the schedduler waits until to handle previous requests
973 * @return 0 if no error, otherwise a negative errno
976 Device::stop (bool force_stop)
978 if (!initialized ()) {
979 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
983 Request *req = new Request (NPUINPUT_STOP);
984 req->setForceStop (force_stop);
985 return scheduler_->submitRequest (req);
989 * @brief allocate generic memory buffer
990 * @param[in] size the size to allocate
991 * @param[out] addr the mapped address
992 * @return dmabuf fd if no error, otherwise a negative errno
995 Device::allocMemory (size_t size, void **addr)
997 if (!initialized ()) {
998 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1002 if (size == 0 || addr == nullptr) {
1003 logerr (TAG, "Invalid arguments\n");
1007 return mem_->allocMemory (size, addr);
1011 * @brief deallocate generic memory buffer
1012 * @param[in] dmabuf_fd dmabuf file descriptor
1013 * @param[in] size buffer size
1014 * @param[in] addr mapped addr
1015 * @return 0 if no error, otherwise a negative errno
1018 Device::deallocMemory (int dmabuf_fd, size_t size, void * addr)
1020 if (!initialized ()) {
1021 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1025 if (dmabuf_fd < 0 || size == 0 || addr == nullptr) {
1026 logerr (TAG, "Invalid arguments\n");
1030 return mem_->deallocMemory (dmabuf_fd, size, addr);
1034 * @brief extract the buffer instance from input generic buffers
1035 * @param[in] meta the model metadata
1036 * @param[in] input the input generic buffers
1037 * @return the buffer instance
1040 TrinityVision::prepareInputBuffers (const Metadata *meta, const input_buffers *input)
1042 if (meta == nullptr || input == nullptr ||
1043 meta->getInputNum() != input->num_buffers) {
1044 logerr (TAG, "Invalid metadata info provided\n");
1049 const generic_buffer *first = &input->bufs[0];
1050 if (first->type == BUFFER_DMABUF) {
1051 buffer = mem_->allocBuffer (new HWmemExternal);
1052 if (buffer == nullptr)
1055 buffer->setDmabuf (first->dmabuf);
1056 buffer->setOffset (first->offset);
1057 buffer->setSize (meta->getBufferSize());
1059 buffer = mem_->allocBuffer (new HWmemDevice);
1060 if (buffer == nullptr)
1063 int status = buffer->alloc (meta->getBufferSize ());
1065 logerr (TAG, "Failed to allocate buffer: %d\n", status);
1071 int status = buffer->createTensors (meta);
1073 logerr (TAG, "Failed to create tensors: %d\n", status);
1082 * @brief implementation of TRIV's setModel ()
1083 * @param[in] model_buf the model generic buffer
1084 * @param[out] model the model instance
1085 * @return 0 if no error, otherwise a negative errno
1088 TrinityVision::setModel (const generic_buffer *model_buf, Model ** model_ptr)
1090 if (!initialized ()) {
1091 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1095 if (model_buf == nullptr || model_ptr == nullptr)
1098 Model *model = nullptr;
1099 HWmem * hwmem_prog = nullptr;
1100 HWmem * hwmem_weight = nullptr;
1103 /** In TRIV1, model data (including program/weight) should be contiguous */
1105 switch (model_buf->type) {
1108 model = mem_->allocModel (new HWmemDevice);
1109 if (model == nullptr) {
1110 logerr (TAG, "Failed to allocate model\n");
1114 status = model->alloc (model_buf->size);
1116 logerr (TAG, "Failed to allocate model: %d\n", status);
1120 /** extract the whole model data */
1121 status = comm_.extractGenericBuffer (model_buf, model->getData(), nullptr);
1123 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1131 status = model->setMetadata (model->getData());
1135 /** allocate program (optional; NOP) */
1136 if (model->getMetadata()->getProgramSize() > 0) {
1137 hwmem_prog = new HWmem (new HWmemChunk);
1138 model->setProgramData (hwmem_prog);
1140 hwmem_prog->setParent (model);
1141 hwmem_prog->setOffset (model->getMetadata()->getMetaSize());
1142 status = hwmem_prog->alloc (model->getMetadata()->getProgramSize());
1144 logerr (TAG, "Failed to allocate program\n");
1149 /** allocate weight (optional) */
1150 if (model->getMetadata()->getWeightSize() > 0) {
1151 hwmem_weight = new HWmem (new HWmemChunk);
1152 model->setWeightData (hwmem_weight);
1154 hwmem_weight->setParent (model);
1155 hwmem_weight->setOffset (model->getMetadata()->getMetaSize() +
1156 model->getMetadata()->getProgramSize());
1157 status = hwmem_weight->alloc (model->getMetadata()->getWeightSize());
1159 logerr (TAG, "Failed to allocate program\n");
1164 if (hwmem_prog != nullptr) {
1165 /** register this model to the driver */
1166 model_config_t config;
1167 config.dbuf_fd = hwmem_prog->getDmabuf ();
1168 config.program_size = hwmem_prog->getSize ();
1169 config.program_offset_addr = hwmem_prog->getOffset ();
1170 if (hwmem_weight != nullptr)
1171 config.weight_offset_addr = hwmem_weight->getOffset ();
1173 status = api_->registerModel (&config);
1177 model->setInternalID(config.id);
1189 * @brief implementation of TRIV's unsetModel ()
1190 * @param[in] model the model instance
1191 * @return 0 if no error, otherwise a negative errno
1194 TrinityVision::unsetModel (Model * model)
1196 if (!initialized ()) {
1197 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1201 if (model == nullptr) {
1202 logerr (TAG, "Invalid model instance\n");
1206 if (model->getMetadata()->getProgramSize() > 0)
1207 return api_->deregisterModel (model->getInternalID ());
1213 * @brief implementation of TRIV's run()
1214 * @param[in] opmode input opmode
1215 * @param[in] model the model instance
1216 * @param[in] input generic buffers of input data
1217 * @param[in] cb the output callback
1218 * @param[in] cb_data the output callback data
1219 * @param[out] sequence The sequence number returned with runNPU_async.
1222 TrinityVision::run (npu_input_opmode opmode, const Model *model,
1223 const input_buffers *input, npuOutputNotify cb, void *cb_data,
1226 if (!initialized ()) {
1227 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1231 if (opmode != NPUINPUT_HOST) {
1232 logerr (TAG, "TRIV supports only host inputservice\n");
1236 if (model == nullptr || input == nullptr) {
1237 logerr (TAG, "TRIV requires both model and input buffers\n");
1241 const_cast<Model *>(model)->updateDataInfo ();
1243 Buffer *buffer = prepareInputBuffers (model->getMetadata(), input);
1244 if (buffer == nullptr) {
1245 logerr (TAG, "Failed to extract buffer instance\n");
1249 if (!buffer->isExternal ()) {
1250 for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
1251 auto func = std::bind (TrinityVision::manipulateData, model, idx, true,
1252 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1253 int status = comm_.extractGenericBuffer (&input->bufs[idx],
1254 buffer->getInputTensor(idx)->getData(), func);
1256 logerr (TAG, "Failed to feed input buffer: %d\n", status);
1262 /** this device uses CMA buffer */
1264 Request *req = new Request (opmode);
1265 req->setModel (model);
1266 req->setBuffer (buffer);
1269 req->setCallback (std::bind (&TrinityVision::callback, this, req, cb, cb_data));
1271 if (sequence != nullptr)
1272 *sequence = req->getID();
1274 return scheduler_->submitRequest (req);
1278 * @brief callback of TRIV2 request
1279 * @param[in] req the request instance
1280 * @param[in] cb callback for completion
1281 * @param[in] cb_data callback data
1282 * @note The callback invoke does not gurantee the request was successful
1283 * @todo Check the request failures
1286 TrinityVision::callback (Request *req, npuOutputNotify cb, void *cb_data)
1288 const Model *model = req->getModel ();
1289 Buffer *buffer = req->getBuffer ();
1290 output_buffers output = {
1291 .num_buffers = buffer->getOutputNum ()
1294 for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
1295 uint32_t output_tensor_size = model->getOutputTensorSize (idx);
1297 if (buffer->isExternal ()) {
1298 output.bufs[idx].type = BUFFER_DMABUF;
1299 output.bufs[idx].size = output_tensor_size;
1300 output.bufs[idx].addr = buffer->getOutputTensor(idx)->getData();
1302 output.bufs[idx].type = BUFFER_MAPPED;
1303 output.bufs[idx].size = output_tensor_size;
1304 /** user needs to free this */
1305 output.bufs[idx].addr = malloc (output_tensor_size);
1307 auto func = std::bind (TrinityVision::manipulateData, model, idx, false,
1308 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1309 int status = comm_.insertGenericBuffer (buffer->getOutputTensor(idx)->getData(),
1310 &output.bufs[idx], func);
1312 logerr (TAG, "Failed to return output buffer: %d\n", status);
1317 cb (&output, req->getID(), cb_data);
1323 * @brief extract the segment table instance from input generic buffers
1324 * @param[in] model the model instance
1325 * @param[in] input the input generic buffers
1326 * @param[in] output the output generic buffers
1327 * @return the segment table instance
1330 TrinityVision2::prepareSegmentTable (const Model *model, const input_buffers *input,
1331 const output_buffers *output)
1333 const Metadata *meta = model->getMetadata ();
1334 if (meta == nullptr || (input != nullptr &&
1335 meta->getInputNum() != input->num_buffers)) {
1336 logerr (TAG, "Invalid metadata info provided\n");
1340 SegmentTable * segt = mem_->allocSegmentTable (new HWmemDevice);
1341 int status = segt->alloc ();
1343 logerr (TAG, "Failed to allocate segment table: %d\n", status);
1347 status = segt->createSegments (model, input, output);
1349 logerr (TAG, "Failed to create segments: %d\n", status);
1361 * @brief implementation of TRIV2's setModel ()
1362 * @param[in] model_buf the model generic buffer
1363 * @param[out] model the model instance
1364 * @return 0 if no error, otherwise a negative errno
1367 TrinityVision2::setModel (const generic_buffer *model_buf, Model ** model_ptr)
1369 if (!initialized ()) {
1370 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1374 if (model_buf == nullptr || model_ptr == nullptr)
1380 switch (model_buf->type) {
1383 model = mem_->allocModel (new HWmemDevice);
1384 if (model == nullptr) {
1385 logerr (TAG, "Failed to allocate model\n");
1389 status = model->alloc (NPUBIN_META_SIZE);
1391 logerr (TAG, "Failed to allocate model: %d\n", status);
1395 status = comm_.extractGenericBuffer (model_buf, model->getData(), nullptr,
1396 0, NPUBIN_META_SIZE);
1398 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1406 status = model->setMetadata (model->getData());
1410 /** allocate program (optional; NOP) */
1411 if (model->getMetadata()->getProgramSize() > 0) {
1412 HWmem * hwmem_prog = new HWmem (new HWmemDevice);
1413 hwmem_prog->setDriverAPI (api_.get());
1415 model->setProgramData (hwmem_prog);
1417 status = hwmem_prog->alloc (model->getMetadata()->getProgramSize());
1419 logerr (TAG, "Failed to allocate program\n");
1423 status = comm_.extractGenericBuffer (model_buf, hwmem_prog->getData(), nullptr,
1424 model->getMetadata()->getMetaSize(),
1425 model->getMetadata()->getProgramSize());
1427 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1431 /** register this model to the driver */
1432 model_config_t config;
1433 config.dbuf_fd = hwmem_prog->getDmabuf ();
1434 config.program_size = hwmem_prog->getSize ();
1435 config.program_offset_addr = 0;
1437 status = api_->registerModel (&config);
1441 model->setInternalID(config.id);
1444 /** allocate weight (optional) */
1445 if (model->getMetadata()->getWeightSize() > 0) {
1446 HWmem * hwmem_weight = new HWmem (new HWmemDevice);
1447 hwmem_weight->setDriverAPI (api_.get());
1449 model->setWeightData (hwmem_weight);
1451 status = hwmem_weight->alloc (model->getMetadata()->getWeightSize());
1453 logerr (TAG, "Failed to allocate program\n");
1457 status = comm_.extractGenericBuffer (model_buf, hwmem_weight->getData(), nullptr,
1458 model->getMetadata()->getMetaSize() + model->getMetadata()->getProgramSize(),
1459 model->getMetadata()->getWeightSize());
1461 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1475 * @brief implementation of TRIV2's unsetModel ()
1476 * @param[in] model the model instance
1477 * @return 0 if no error, otherwise a negative errno
1480 TrinityVision2::unsetModel (Model * model)
1482 if (!initialized ()) {
1483 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1487 if (model == nullptr) {
1488 logerr (TAG, "Invalid model instance\n");
1492 if (model->getMetadata()->getProgramSize() > 0)
1493 return api_->deregisterModel (model->getInternalID ());
1498 /** @brief implementation of TRIV2's run() */
1500 TrinityVision2::run (npu_input_opmode opmode, const Model *model,
1501 const input_buffers *input, npuOutputNotify cb, void *cb_data,
1504 if (!initialized ()) {
1505 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1509 if (opmode != NPUINPUT_HOST)
1512 if (input == nullptr || input->num_buffers == 0 || model == nullptr)
1515 const_cast<Model *>(model)->updateDataInfo ();
1517 /** this device uses segment table */
1518 SegmentTable * segt = prepareSegmentTable (model, input);
1519 if (segt == nullptr) {
1520 logerr (TAG, "Failed to create segment table instance\n");
1524 /** extract input data */
1525 for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
1526 if (!segt->getInputSegment(idx)->isExternal ()) {
1527 uint32_t seg_offset = segt->getInputSegmentOffset(idx);
1528 auto func = std::bind (TrinityVision2::manipulateData, model, idx, true,
1529 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1530 int status = comm_.extractGenericBuffer (
1532 segt->getInputSegment(idx)->getData() + seg_offset,
1535 logerr (TAG, "Failed to feed input segment: %d\n", status);
1541 Request *req = new Request (opmode);
1542 req->setModel (model);
1543 req->setSegmentTable (segt);
1544 req->setCallback (std::bind (&TrinityVision2::callback, this, req, cb, cb_data));
1547 *sequence = req->getID();
1549 return scheduler_->submitRequest (req);
1552 /** @brief implementation of TRIV2's runInternal() */
1554 TrinityVision2::runInternal (npu_input_opmode opmode, const Model *model,
1557 if (!initialized ()) {
1558 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1562 if (opmode != NPUINPUT_HW_RECURRING)
1565 /** this device uses segment table */
1566 SegmentTable * segt = prepareSegmentTable (model, nullptr, nullptr);
1567 if (segt == nullptr) {
1568 logerr (TAG, "Failed to create segment table instance\n");
1572 Request *req = new Request (opmode);
1573 req->setModel (model);
1574 req->setSegmentTable (segt);
1575 req->setHwDevice (hw_dev);
1577 return scheduler_->submitRequest (req);
1580 /** @brief callback of TRIV2 request */
1582 TrinityVision2::callback (Request *req, npuOutputNotify cb, void *cb_data)
1584 const Model *model = req->getModel ();
1585 SegmentTable *segt = req->getSegmentTable ();
1586 output_buffers output = {
1587 .num_buffers = segt->getNumOutputSegments ()
1590 for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
1591 uint32_t output_tensor_size = model->getOutputTensorSize (idx);
1593 output.bufs[idx].type = BUFFER_MAPPED;
1594 output.bufs[idx].size = output_tensor_size;
1595 /** user needs to free this */
1596 output.bufs[idx].addr = calloc (1, output_tensor_size);
1598 #if defined(ENABLE_FPGA_WORKAROUND)
1600 segt->getOutputSegment(idx)->getDmabuf(),
1601 segt->getOutputSegmentOffset(idx),
1602 output.bufs[idx].addr,
1603 output.bufs[idx].size);
1605 auto func = std::bind (TrinityVision2::manipulateData, model, idx, false,
1606 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1607 int status = comm_.insertGenericBuffer (
1608 segt->getOutputSegment(idx)->getData() + segt->getOutputSegmentOffset(idx),
1609 &output.bufs[idx], func);
1612 logerr (TAG, "Failed to return output buffer: %d\n", status);
1617 cb (&output, req->getID(), cb_data);
1622 /** @brief implementation of TRIA's run(): WIP */
1624 TrinityAsr::run (npu_input_opmode opmode, const Model *model,
1625 const input_buffers *input, npuOutputNotify cb, void *cb_data,
1628 if (!initialized ()) {
1629 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1633 if (opmode != NPUINPUT_HOST)
1636 if (input == nullptr || input->num_buffers != 1)
1641 /** ASR does not require model and support only a single tensor */
1642 const generic_buffer *first_buf = &input->bufs[0];
1643 if (first_buf->type == BUFFER_DMABUF) {
1644 buffer = mem_->allocBuffer (new HWmemExternal);
1645 if (buffer == nullptr)
1648 buffer->setDmabuf (first_buf->dmabuf);
1649 buffer->setOffset (first_buf->offset);
1650 buffer->setSize (first_buf->size);
1652 buffer = mem_->allocBuffer (new HWmemDevice);
1653 if (buffer == nullptr)
1656 status = buffer->alloc (first_buf->size);
1663 status = buffer->createTensors ();
1665 logerr (TAG, "Failed to create tensors: %d\n", status);
1670 if (!buffer->isExternal ()) {
1671 status = comm_.extractGenericBuffer (first_buf,
1672 buffer->getInputTensor(0)->getData(), nullptr);
1677 Request *req = new Request (opmode);
1678 req->setBuffer (buffer);
1679 req->setCallback (std::bind (&TrinityAsr::callback, this, req, cb, cb_data));
1682 *sequence = req->getID();
1684 return scheduler_->submitRequest (req);
1687 /** @brief callback of TRIA request: WIP */
1689 TrinityAsr::callback (Request *req, npuOutputNotify cb, void *cb_data)
1691 Buffer *buffer = req->getBuffer ();
1692 output_buffers output = {
1696 /** TODO: finalize this impl. when the ASR's working scenario is determined */
1697 cb (&output, req->getID(), cb_data);
1702 /** Implement data manipulation (each device may have different impl.) */
1707 * @brief perform data manipulation
1708 * @param[in] model model instance
1709 * @param[in] idx tensor index
1710 * @param[in] is_input indicate it's input manipulation
1711 * @param[out] dst destination buffer
1712 * @param[in] src source buffer (feature map)
1713 * @param[in] size size to be copied
1714 * @return size of memory copy if no error, otherwise zero
1716 * @note the input data format should be NHWC
1717 * @detail rules for the memory address of activations in NPU HW.
1718 * (https://code.sec.samsung.net/confluence/pages/viewpage.action?pageId=146491864)
1720 * 1) Special case (depth == 3)
1721 * - addr(x,y,z) = addr(0,0,0) + (z) + 3 * (x + width * y)
1724 * - addr(x,y,z) = addr(0,0,0) + (z % MPA_L) + MPA_L * (x + width * (y + height * (z / MPA_L)))
1726 * Thus, if depth is not a multiple of MPA_L (i.e., 64), zero padding is required
1729 TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
1730 void *dst, void *src, size_t size)
1732 const Metadata *meta = model->getMetadata();
1733 DataConverter converter (is_input);
1735 converter.setData (src, dst, size);
1738 const tensor_data_info* info = model->getInputDataInfo (idx);
1739 if (info == nullptr)
1742 converter.setDataLayout (info->layout, DATA_LAYOUT_SRNPU);
1743 converter.setDataType (info->type, DATA_TYPE_SRNPU);
1744 converter.setDataDims (meta->getInputDims (idx));
1745 converter.setQuantZero (meta->getInputQuantZero (idx));
1746 converter.setQuantScale (meta->getInputQuantScale (idx));
1748 const tensor_data_info* info = model->getOutputDataInfo (idx);
1749 if (info == nullptr)
1752 converter.setDataLayout (DATA_LAYOUT_SRNPU, info->layout);
1753 converter.setDataType (DATA_TYPE_SRNPU, info->type);
1754 converter.setDataDims (meta->getOutputDims (idx));
1755 converter.setQuantZero (meta->getOutputQuantZero (idx));
1756 converter.setQuantScale (meta->getOutputQuantScale (idx));
1759 return converter.perform ();
1763 * @brief perform data manipulation
1764 * @param[in] model model instance
1765 * @param[in] idx tensor index
1766 * @param[in] is_input indicate it's input manipulation
1767 * @param[out] dst destination buffer
1768 * @param[in] src source buffer (feature map)
1769 * @param[in] size size to be copied
1770 * @return size of memory copy if no error, otherwise zero
1772 * @note the input data format should be NHWC
1774 * @detail Feature map data in TRIV2, (x, y, z) = (width, height, depth)
1776 * 1) Image input (depth == 1 or depth == 3)
1777 * Addr(x,y,z) = Addr(0,0,0) + z + depth * x + ymod * y
1780 * Addr(x,y,z) = Addr(0,0,0) + (z % 64) + (64 * x) + ymod * y + zmod * (z / 64)
1783 TrinityVision2::manipulateData (const Model *model, uint32_t idx, bool is_input,
1784 void *dst, void *src, size_t size)
1786 const Metadata *meta = model->getMetadata();
1787 DataConverter converter (is_input);
1789 converter.setData (src, dst, size);
1792 const tensor_data_info* info = model->getInputDataInfo (idx);
1793 if (info == nullptr)
1796 converter.setDataLayout (info->layout, DATA_LAYOUT_TRIV2);
1797 converter.setDataType (info->type, meta->getInputQuantType (idx));
1798 converter.setDataDims (meta->getInputDims (idx));
1799 converter.setQuantZero (meta->getInputQuantZero (idx));
1800 converter.setQuantScale (meta->getInputQuantScale (idx));
1802 const tensor_data_info* info = model->getOutputDataInfo (idx);
1803 if (info == nullptr)
1806 converter.setDataLayout (DATA_LAYOUT_TRIV2, info->layout);
1807 converter.setDataType (meta->getOutputQuantType (idx), info->type);
1808 converter.setDataDims (meta->getOutputDims (idx));
1809 converter.setQuantZero (meta->getOutputQuantZero (idx));
1810 converter.setQuantScale (meta->getOutputQuantScale (idx));
1813 return converter.perform ();
1819 TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
1820 void *dst, void *src, size_t size)
1822 memcpy (dst, src, size);
1827 TrinityVision2::manipulateData (const Model *model, uint32_t idx, bool is_input,
1828 void *dst, void *src, size_t size)
1830 memcpy (dst, src, size);
1836 /** other device types don't have data manip impl. yet */
1839 TrinityAsr::manipulateData (const Model *model, uint32_t idx, bool is_input,
1840 void *dst, void *src, size_t size)
1842 memcpy (dst, src, size);