3 * Copyright (C) 2020 Samsung Electronics
4 * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
9 * @brief Impelemetation of NPU Engine entrypoint that handles APIs from host
10 * @see https://code.sec.samsung.net/confluence/display/ODLC/2020+Overall+Software+Stack
11 * @author Dongju Chae <dongju.chae@samsung.com>
12 * @bug No known bugs except for NYI items
15 #include "ne-handler.h"
18 #include <npubinfmt.h>
19 #include <NPUdrvAPI.h>
20 #include <CommPlugin.h>
25 #include <condition_variable>
32 /** @brief host handler constructor */
33 HostHandler::HostHandler (Device *device)
35 /* ignored as we don't use double buffering anymore, but for backward-compatibility */
36 async_mode_ (NPUASYNC_WAIT)
40 /** @brief host handler destructor */
41 HostHandler::~HostHandler ()
46 * @brief register model from generic buffer
47 * @param[in] model_buf model buffer
48 * @param[out] modelid model id
49 * @return 0 if no error. otherwise a negative errno
52 HostHandler::registerModel (generic_buffer *model_buf, uint32_t *modelid)
54 if (model_buf == nullptr || modelid == nullptr) {
55 logerr (TAG, "Invalid arguments given\n");
59 Model *model = nullptr;
60 int status = device_->setModel (model_buf, &model);
62 logerr (TAG, "Failed to set model: %d\n", status);
66 assert (model != nullptr);
68 status = models_.insert (model->getID(), model);
70 logerr (TAG, "Failed to insert model id\n");
75 *modelid = model->getID();
80 * @brief remove the registered model
81 * @param[in] modelid model id
82 * @return 0 if no error. otherwise a negative errno
85 HostHandler::unregisterModel (uint32_t modelid)
87 Model *model = models_.find (modelid);
91 int status = device_->unsetModel (model);
93 logerr (TAG, "Failed to unset model: %d\n", status);
97 return models_.remove (modelid);
101 * @brief remove all registered models
105 HostHandler::unregisterModels ()
107 std::function <bool (Model *)> functor =
108 [&] (Model *m) -> bool {
109 bool can_remove = true;
110 int status = device_->unsetModel (m);
112 logwarn (TAG, "Failed to unset model: %d\n", status);
118 models_.for_each (functor);
123 * @brief Get the profile information from NPU
124 * @param[in] task_id The identifier for each inference
125 * @param[out] profile The profile instance
126 * @return 0 if no error, otherwise a negative errno.
129 HostHandler::getProfile (int task_id, npu_profile *profile)
131 if (task_id < 0 || profile == nullptr) {
132 logerr (TAG, "Invalid parameter provided\n");
136 const DriverAPI * api = device_->getDriverAPI ();
137 assert (api != nullptr);
139 profile->num_layers = 0;
140 profile->layers = nullptr;
142 int status = api->getProfile (task_id, profile);
144 logerr (TAG, "Failed to get profile information: %d\n", status);
152 * @brief get the stats for the latest apps of the target device
153 * @param[out] stat The list of app stat
154 * @note The caller has the responsibility to free the resources.
155 * This API is not working on the emulated envionment.
158 HostHandler::getStatApps (npu_stat_apps *stat)
160 const DriverAPI * api = device_->getDriverAPI ();
161 assert (api != nullptr);
163 return api->getStatApps (stat);
167 * @brief get the stats for the latest tasks of the target app
168 * @param[in] appid The identifier of target app
169 * @param[out] stat The list of task stat
170 * @note The caller has the responsibility to free the resources.
171 * This API is not working on the emulated envionment.
174 HostHandler::getStatTasks (int appid, npu_stat_tasks *stat)
176 const DriverAPI * api = device_->getDriverAPI ();
177 assert (api != nullptr);
179 return api->getStatTasks (appid, stat);
183 * @brief Get the driver API level of opened NPU device
184 * @param[out] level driver API level
185 * @return 0 if no error, otherwise a negative errno
188 HostHandler::getAPILevel (uint32_t *level)
190 const DriverAPI * api = device_->getDriverAPI ();
191 assert (api != nullptr);
193 return api->getAPILevel (level);
197 * @brief Get the TOPS of the opened NPU device
198 * @param[in] dev the NPU device handle
199 * @param[out] tops npu tops
200 * @return 0 if no error, otherwise a negative errno
201 * @note this does not support for emulated devices
204 HostHandler::getTops (uint32_t *tops)
206 const DriverAPI * api = device_->getDriverAPI ();
207 assert (api != nullptr);
209 return api->getTops (tops);
213 * @brief Set the data layout for input/output tensors
214 * @param[in] modelid The ID of model whose layouts are set
215 * @param[in] in the layout/type info for input tensors
216 * @param[in] out the layout/type info for output tensors
217 * @return @c 0 if no error. otherwise a negative error value
218 * @note if this function is not called, default layout/type will be used.
221 HostHandler::setDataInfo (uint32_t modelid, tensors_data_info *in,
222 tensors_data_info *out)
224 Model *model = models_.find (modelid);
225 if (model == nullptr)
228 return model->setDataInfo (in, out);
232 * @brief Set the inference constraint for next NPU inferences
233 * @param[in] modelid The target model id
234 * @param[in] constraint inference constraint (e.g., timeout, priority)
235 * @return @c 0 if no error. otherwise a negative error value
236 * @note If this function is not called, default values are used.
239 HostHandler::setConstraint (uint32_t modelid, npuConstraint constraint)
241 Model *model = models_.find (modelid);
242 if (model == nullptr)
245 model->setConstraint (constraint);
251 * @brief find and return model instance
252 * @param[in] modelid model id
253 * @return model instance if found. otherwise nullptr
256 HostHandler::getModel (uint32_t modelid)
258 return models_.find (modelid);
261 /** @brief dummay callback for runSync. */
264 callbackSync (output_buffers *output) : output_(output), done_(false) {}
266 static void callback (output_buffers *output, uint64_t sequence, void *data) {
267 callbackSync *sync = static_cast<callbackSync *>(data);
268 sync->callback (output, sequence);
271 void callback (output_buffers *output, uint64_t sequence) {
272 if (output_ != nullptr) {
273 /** just copy internal variables of output buffers */
274 memcpy (output_, output, sizeof (output_buffers));
281 std::unique_lock<std::mutex> lock (m_);
282 cv_.wait (lock, [this]() { return done_; });
287 std::condition_variable cv_;
288 output_buffers *output_;
293 * @brief Execute inference. Wait (block) until the output is available.
294 * @param[in] modelid The model to be inferred.
295 * @param[in] input The input data to be inferred.
296 * @param[out] output The output result.
297 * @return @c 0 if no error. otherwise a negative error value
300 HostHandler::runSync (uint32_t modelid, const input_buffers *input,
301 output_buffers *output)
303 callbackSync sync (output);
304 int status = runAsync (modelid, input, callbackSync::callback,
305 static_cast <void*> (&sync), NPUASYNC_DROP_OLD, nullptr);
307 /** sync needs to wait callback */
314 * @brief Invoke NPU inference. Unblocking call.
315 * @param[in] modelid The model to be inferred.
316 * @param[in] input The input data to be inferred.
317 * @param[in] cb The output buffer handler.
318 * @param[in] cb_data The data given as a parameter to the runNPU_async call.
319 * @param[in] mode Configures how this operation works.
320 * @param[out] sequence The sequence number returned with runNPU_async.
321 * @return @c 0 if no error. otherwise a negative error value
324 HostHandler::runAsync (uint32_t modelid, const input_buffers *input,
325 npuOutputNotify cb, void *cb_data, npu_async_mode mode, uint64_t *sequence)
327 Model *model = nullptr;
329 if (device_->needModel()) {
330 model = getModel (modelid);
331 if (model == nullptr)
335 /* check the given model before running */
336 if (model != nullptr && !model->finalize ()) {
337 logerr (TAG, "Failed to finalize the model. Please see the log messages\n");
341 device_->setAsyncMode (mode);
342 return device_->run (NPUINPUT_HOST, model, input, cb, cb_data, sequence);
346 * @brief Let NPU accept input frames from its internal source continuously
347 * @param[in] modelid The model to be inferred.
348 * @param[in] opmode NPU has different opmode with auto-inputs. Choose one.
349 * @param[in] hw_dev The target device feeding input data
350 * @return @c 0 if no error. otherwise a negative error value
353 HostHandler::runInternal (uint32_t modelid, npu_input_opmode opmode,
356 Model *model = nullptr;
358 if (device_->needModel()) {
359 model = getModel (modelid);
360 if (model == nullptr)
364 /* check the given model before running */
365 if (model != nullptr && !model->finalize ()) {
366 logerr (TAG, "Failed to finalize the model. Please see the log messages\n");
370 return device_->runInternal (opmode, model, hw_dev);
374 * @brief Stop the request with the given id
375 * @param[in] dev The NPU device handle
376 * @param[in] id The request id
377 * @return @c 0 if no error. otherwise a negative error value
380 HostHandler::stopInternal (int id)
383 logerr (TAG, "Unable to stop this request with id (%d)\n", id);
387 const DriverAPI * api = device_->getDriverAPI ();
388 assert (api != nullptr);
390 return api->stop_target (id);
394 * @brief get number of available devices
395 * @param[in] type device type
396 * @return number of devices
399 HostHandler::getNumDevices (dev_type type)
401 return DriverAPI::getNumDevices (type);
405 * @brief get device instance
406 * @param[out] dev device instance
407 * @param[in] type device type
408 * @param[in] id device id
409 * @return 0 if no error. otherwise a negative errno
412 HostHandler::getDevice (npudev_h *dev, dev_type type, uint32_t id)
414 int num_devices = getNumDevices (type);
416 /** check the validity of device id */
417 if (!(num_devices > 0 && id < static_cast<uint32_t>(num_devices))) {
418 logerr (TAG, "Invalid arguments provided\n");
422 Device *device = Device::createInstance (type, id);
423 if (device == nullptr) {
424 logerr (TAG, "Failed to create a device with the given type\n");
434 * @brief allocate generic buffer (just for users)
435 * @param[out] buffer buffer instance
436 * @return 0 if no error. otherwise a negative errno
439 HostHandler::allocGenericBuffer (generic_buffer *buffer)
444 if (buffer->size == 0) {
445 logerr (TAG, "Invalid size\n");
449 if (buffer->size > UINT32_MAX) {
450 logerr (TAG, "Don't support such a large size");
454 switch (buffer->type) {
457 if (buffer->filepath == nullptr)
462 /* now, npu-engine always provides dmabuf-based allocation */
463 void *addr = nullptr;
464 int dmabuf = device_->allocMemory (buffer->size, &addr);
468 buffer->dmabuf = dmabuf;
480 * @brief deallocate generic buffer (just for users)
481 * @param[in] buffer buffer instance
482 * @return 0 if no error. otherwise a negative errno
485 HostHandler::deallocGenericBuffer (generic_buffer *buffer)
490 switch (buffer->type) {
492 /** always true cuz nothing to do */
495 return device_->deallocMemory (buffer->dmabuf, buffer->size, buffer->addr);
504 * @brief allocate multiple generic buffers (just for users)
505 * @param[out] buffers multi-buffer instance
506 * @return 0 if no error. otherwise a negative errno
509 HostHandler::allocGenericBuffer (generic_buffers *buffers)
514 if (buffers == NULL || buffers->num_buffers < 1)
517 for (idx = 0; idx < buffers->num_buffers; idx++) {
518 status = allocGenericBuffer (&buffers->bufs[idx]);
527 deallocGenericBuffer (&buffers->bufs[--idx]);
534 * @brief deallocate multiple generic buffers (just for users)
535 * @param[in] buffers multi-buffer instance
536 * @return 0 if no error. otherwise a negative errno
539 HostHandler::deallocGenericBuffer (generic_buffers *buffers)
541 if (buffers == NULL || buffers->num_buffers < 1)
544 for (uint32_t idx = 0; idx < buffers->num_buffers; idx++)
545 deallocGenericBuffer (&buffers->bufs[idx]);
546 buffers->num_buffers = 0;
552 * @brief get the current memory status
553 * @param[out] alloc_total The size of allocated memory until now
554 * @param[out] free_total The size of freed memory until now
555 * @return 0 if no error. otherwise a negatice error value
558 HostHandler::getMemoryStatus (size_t *alloc_total, size_t *free_total)
560 /** API is always set in initialize () */
561 const DriverAPI * api = device_->getDriverAPI ();
562 assert (api != nullptr);
564 return api->getMemoryStatus (alloc_total, free_total);
568 * @brief Get the current device status to be used
569 * @param[out] status the device status
570 * @param[out] num_requests the number of running requests (or pending)
571 * @return 0 if no error, otherwise a negative errno.
574 HostHandler::getDeviceStatus (npu_status *status, uint32_t *num_requests)
576 /** API is always set in initialize () */
577 const DriverAPI * api = device_->getDriverAPI ();
582 device_state_t state = api->isReady ();
583 if (state == device_state_t::STATE_READY) {
584 *num_requests = api->numRequests ();
585 if (*num_requests > 0)
597 /** implement methods of Device class */
599 /** @brief constructor of device */
600 Device::Device (dev_type type, int id, bool need_model)
601 : comm_ (CommPlugin::getCommPlugin()), type_ (type), id_ (id), need_model_ (true),
602 mode_ (NPUASYNC_WAIT), initialized_ (false), atomic_flag_ (ATOMIC_FLAG_INIT)
607 * @brief create device instance depending on device type and id
608 * @param[in] type device type
609 * @param[in] id device id
610 * @return device instance
613 Device::createInstance (dev_type type, int id)
615 Device *device = nullptr;
617 switch (type & DEVICETYPE_MASK) {
618 case DEVICETYPE_TRIV:
619 device = new TrinityVision (id);
621 case DEVICETYPE_TRIV2:
622 device = new TrinityVision2 (id);
624 case DEVICETYPE_TRIA:
625 device = new TrinityAsr (id);
626 device->setNeedModel (false);
632 if (device != nullptr && device->init () != 0) {
641 * @brief device initialization
642 * @return 0 if no error, otherwise a negative errno
643 * @note Init failures come from createDriverAPI() only.
648 /** should be initilizaed only once */
649 if (!atomic_flag_.test_and_set()) {
650 /** create the corresponding driver API */
651 api_ = DriverAPI::createDriverAPI (type_, id_);
652 if (api_.get() == nullptr) {
653 atomic_flag_.clear();
654 logerr (TAG, "Failed to create driver API\n");
658 handler_.reset (new HostHandler (this));
659 scheduler_.reset (new Scheduler (api_.get()));
660 mem_ = MemAllocator::createInstance (api_.get());
662 initialized_ = true; /** c++11 does not provide test() of atomic flag */
669 * @brief stop all requests from this device
670 * @param[in] force_stop indicate the schedduler waits until to handle previous requests
671 * @return 0 if no error, otherwise a negative errno
674 Device::stop (bool force_stop)
676 if (!initialized ()) {
677 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
681 Request *req = new Request (NPUINPUT_STOP);
682 req->setForceStop (force_stop);
683 return scheduler_->submitRequest (req);
687 * @brief allocate generic memory buffer
688 * @param[in] size the size to allocate
689 * @param[out] addr the mapped address
690 * @return dmabuf fd if no error, otherwise a negative errno
693 Device::allocMemory (size_t size, void **addr)
695 if (!initialized ()) {
696 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
700 if (size == 0 || addr == nullptr) {
701 logerr (TAG, "Invalid arguments\n");
705 return mem_->allocMemory (size, addr);
709 * @brief deallocate generic memory buffer
710 * @param[in] dmabuf_fd dmabuf file descriptor
711 * @param[in] size buffer size
712 * @param[in] addr mapped addr
713 * @return 0 if no error, otherwise a negative errno
716 Device::deallocMemory (int dmabuf_fd, size_t size, void * addr)
718 if (!initialized ()) {
719 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
723 if (dmabuf_fd < 0 || size == 0 || addr == nullptr) {
724 logerr (TAG, "Invalid arguments\n");
728 return mem_->deallocMemory (dmabuf_fd, size, addr);
732 * @brief extract the buffer instance from input generic buffers
733 * @param[in] meta the model metadata
734 * @param[in] input the input generic buffers
735 * @return the buffer instance
738 TrinityVision::prepareInputBuffers (const Metadata *meta, const input_buffers *input)
740 if (meta == nullptr || input == nullptr ||
741 meta->getInputNum() != input->num_buffers) {
742 logerr (TAG, "Invalid metadata info provided\n");
747 const generic_buffer *first = &input->bufs[0];
748 if (first->type == BUFFER_DMABUF) {
749 buffer = mem_->allocBuffer (new HWmemExternal);
750 if (buffer == nullptr)
753 buffer->setDmabuf (first->dmabuf);
754 buffer->setOffset (first->offset);
755 buffer->setSize (meta->getBufferSize());
757 buffer = mem_->allocBuffer (new HWmemDevice);
758 if (buffer == nullptr)
761 int status = buffer->alloc (meta->getBufferSize ());
763 logerr (TAG, "Failed to allocate buffer: %d\n", status);
769 int status = buffer->createTensors (meta);
771 logerr (TAG, "Failed to create tensors: %d\n", status);
780 * @brief implementation of TRIV's setModel ()
781 * @param[in] model_buf the model generic buffer
782 * @param[out] model the model instance
783 * @return 0 if no error, otherwise a negative errno
786 TrinityVision::setModel (const generic_buffer *model_buf, Model ** model_ptr)
788 if (!initialized ()) {
789 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
793 if (model_buf == nullptr || model_ptr == nullptr)
796 Model *model = nullptr;
797 HWmem * hwmem_prog = nullptr;
798 HWmem * hwmem_weight = nullptr;
801 /** In TRIV1, model data (including program/weight) should be contiguous */
803 switch (model_buf->type) {
806 model = mem_->allocModel (new HWmemDevice);
807 if (model == nullptr) {
808 logerr (TAG, "Failed to allocate model\n");
812 status = model->alloc (model_buf->size);
814 logerr (TAG, "Failed to allocate model: %d\n", status);
818 /** extract the whole model data */
819 status = comm_.extractGenericBuffer (model_buf, model->getData(), nullptr);
821 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
829 status = model->setMetadata (model->getData());
833 /** allocate program (optional; NOP) */
834 if (model->getMetadata()->getProgramSize() > 0) {
835 hwmem_prog = new HWmem (new HWmemChunk);
836 model->setProgramData (hwmem_prog);
838 hwmem_prog->setParent (model);
839 hwmem_prog->setOffset (model->getMetadata()->getMetaSize());
840 status = hwmem_prog->alloc (model->getMetadata()->getProgramSize());
842 logerr (TAG, "Failed to allocate program\n");
847 /** allocate weight (optional) */
848 if (model->getMetadata()->getWeightSize() > 0) {
849 hwmem_weight = new HWmem (new HWmemChunk);
850 model->setWeightData (hwmem_weight);
852 hwmem_weight->setParent (model);
853 hwmem_weight->setOffset (model->getMetadata()->getMetaSize() +
854 model->getMetadata()->getProgramSize());
855 status = hwmem_weight->alloc (model->getMetadata()->getWeightSize());
857 logerr (TAG, "Failed to allocate program\n");
862 if (hwmem_prog != nullptr) {
863 /** register this model to the driver */
864 model_config_t config;
865 config.version = model->getMetadata()->getVersion ();
866 config.dbuf_fd = hwmem_prog->getDmabuf ();
867 config.program_size = hwmem_prog->getSize ();
868 config.program_offset_addr = hwmem_prog->getOffset ();
869 if (hwmem_weight != nullptr)
870 config.weight_offset_addr = hwmem_weight->getOffset ();
872 status = api_->registerModel (&config);
876 model->setInternalID(config.id);
888 * @brief implementation of TRIV's unsetModel ()
889 * @param[in] model the model instance
890 * @return 0 if no error, otherwise a negative errno
893 TrinityVision::unsetModel (Model * model)
895 if (!initialized ()) {
896 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
900 if (model == nullptr) {
901 logerr (TAG, "Invalid model instance\n");
905 if (model->getMetadata()->getProgramSize() > 0)
906 return api_->deregisterModel (model->getInternalID ());
912 * @brief implementation of TRIV's run()
913 * @param[in] opmode input opmode
914 * @param[in] model the model instance
915 * @param[in] input generic buffers of input data
916 * @param[in] cb the output callback
917 * @param[in] cb_data the output callback data
918 * @param[out] sequence The sequence number returned with runNPU_async.
921 TrinityVision::run (npu_input_opmode opmode, const Model *model,
922 const input_buffers *input, npuOutputNotify cb, void *cb_data,
925 if (!initialized ()) {
926 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
930 if (opmode != NPUINPUT_HOST) {
931 logerr (TAG, "TRIV supports only host inputservice\n");
935 if (model == nullptr || input == nullptr) {
936 logerr (TAG, "TRIV requires both model and input buffers\n");
940 const_cast<Model *>(model)->updateDataInfo ();
942 Buffer *buffer = prepareInputBuffers (model->getMetadata(), input);
943 if (buffer == nullptr) {
944 logerr (TAG, "Failed to extract buffer instance\n");
948 if (!buffer->isExternal ()) {
949 for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
950 auto func = std::bind (TrinityVision::manipulateData, model, idx, true,
951 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
952 int status = comm_.extractGenericBuffer (&input->bufs[idx],
953 buffer->getInputTensor(idx)->getData(), func);
955 logerr (TAG, "Failed to feed input buffer: %d\n", status);
961 /** this device uses CMA buffer */
963 Request *req = new Request (opmode);
964 req->setModel (model);
965 req->setBuffer (buffer);
968 req->setCallback (std::bind (&TrinityVision::callback, this, req, cb, cb_data));
970 if (sequence != nullptr)
971 *sequence = req->getID();
973 return scheduler_->submitRequest (req);
977 * @brief callback of TRIV2 request
978 * @param[in] req the request instance
979 * @param[in] cb callback for completion
980 * @param[in] cb_data callback data
981 * @note The callback invoke does not gurantee the request was successful
982 * @todo Check the request failures
985 TrinityVision::callback (Request *req, npuOutputNotify cb, void *cb_data)
987 const Model *model = req->getModel ();
988 Buffer *buffer = req->getBuffer ();
989 output_buffers output = {
990 .num_buffers = buffer->getOutputNum ()
993 for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
994 uint32_t output_tensor_size = model->getOutputTensorSize (idx);
996 if (buffer->isExternal ()) {
997 output.bufs[idx].type = BUFFER_DMABUF;
998 output.bufs[idx].size = output_tensor_size;
999 output.bufs[idx].addr = buffer->getOutputTensor(idx)->getData();
1001 output.bufs[idx].type = BUFFER_MAPPED;
1002 output.bufs[idx].size = output_tensor_size;
1003 /** user needs to free this */
1004 output.bufs[idx].addr = malloc (output_tensor_size);
1006 auto func = std::bind (TrinityVision::manipulateData, model, idx, false,
1007 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1008 int status = comm_.insertGenericBuffer (buffer->getOutputTensor(idx)->getData(),
1009 &output.bufs[idx], func);
1011 logerr (TAG, "Failed to return output buffer: %d\n", status);
1016 cb (&output, req->getID(), cb_data);
1022 * @brief extract the segment table instance from input generic buffers
1023 * @param[in] model the model instance
1024 * @param[in] input the input generic buffers
1025 * @param[in] output the output generic buffers
1026 * @return the segment table instance
1029 TrinityVision2::prepareSegmentTable (const Model *model, const input_buffers *input,
1030 const output_buffers *output)
1032 const Metadata *meta = model->getMetadata ();
1033 if (meta == nullptr || (input != nullptr &&
1034 meta->getInputNum() != input->num_buffers)) {
1035 logerr (TAG, "Invalid metadata info provided\n");
1039 SegmentTable * segt = mem_->allocSegmentTable (new HWmemDevice);
1040 int status = segt->alloc ();
1042 logerr (TAG, "Failed to allocate segment table: %d\n", status);
1046 status = segt->createSegments (model, input, output);
1048 logerr (TAG, "Failed to create segments: %d\n", status);
1060 * @brief implementation of TRIV2's setModel ()
1061 * @param[in] model_buf the model generic buffer
1062 * @param[out] model the model instance
1063 * @return 0 if no error, otherwise a negative errno
1066 TrinityVision2::setModel (const generic_buffer *model_buf, Model ** model_ptr)
1068 if (!initialized ()) {
1069 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1073 if (model_buf == nullptr || model_ptr == nullptr)
1079 switch (model_buf->type) {
1082 model = mem_->allocModel (new HWmemDevice);
1083 if (model == nullptr) {
1084 logerr (TAG, "Failed to allocate model\n");
1088 status = model->alloc (NPUBIN_META_SIZE);
1090 logerr (TAG, "Failed to allocate model: %d\n", status);
1094 status = comm_.extractGenericBuffer (model_buf, model->getData(), nullptr,
1095 0, NPUBIN_META_SIZE);
1097 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1105 status = model->setMetadata (model->getData());
1109 /** allocate program (optional; NOP) */
1110 if (model->getMetadata()->getProgramSize() > 0) {
1111 HWmem * hwmem_prog = new HWmem (new HWmemDevice);
1112 hwmem_prog->setDriverAPI (api_.get());
1114 model->setProgramData (hwmem_prog);
1116 status = hwmem_prog->alloc (model->getMetadata()->getProgramSize());
1118 logerr (TAG, "Failed to allocate program\n");
1122 status = comm_.extractGenericBuffer (model_buf, hwmem_prog->getData(), nullptr,
1123 model->getMetadata()->getMetaSize(),
1124 model->getMetadata()->getProgramSize());
1126 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1130 /** register this model to the driver */
1131 model_config_t config;
1132 config.version = model->getMetadata()->getVersion ();
1133 config.dbuf_fd = hwmem_prog->getDmabuf ();
1134 config.program_size = hwmem_prog->getSize ();
1135 config.program_offset_addr = 0;
1137 /** for metadata extended section */
1138 config.metadata_dbuf_fd = model->getDmabuf ();
1139 config.metadata_extra_addr = NPUBIN_META_SIZE;
1140 config.metadata_extra_size = model->getMetadata()->getMetaExtendedSize ();
1142 status = api_->registerModel (&config, model->getMetadata()->getNPUVersion());
1146 model->setInternalID(config.id);
1149 /** allocate weight (optional) */
1150 if (model->getMetadata()->getWeightSize() > 0) {
1151 HWmem * hwmem_weight = new HWmem (new HWmemDevice);
1152 hwmem_weight->setDriverAPI (api_.get());
1154 model->setWeightData (hwmem_weight);
1156 status = hwmem_weight->alloc (model->getMetadata()->getWeightSize());
1158 logerr (TAG, "Failed to allocate program\n");
1162 status = comm_.extractGenericBuffer (model_buf, hwmem_weight->getData(), nullptr,
1163 model->getMetadata()->getMetaSize() + model->getMetadata()->getProgramSize(),
1164 model->getMetadata()->getWeightSize());
1166 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1180 * @brief implementation of TRIV2's unsetModel ()
1181 * @param[in] model the model instance
1182 * @return 0 if no error, otherwise a negative errno
1185 TrinityVision2::unsetModel (Model * model)
1187 if (!initialized ()) {
1188 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1192 if (model == nullptr) {
1193 logerr (TAG, "Invalid model instance\n");
1197 if (model->getMetadata()->getProgramSize() > 0)
1198 return api_->deregisterModel (model->getInternalID ());
1203 /** @brief implementation of TRIV2's run() */
1205 TrinityVision2::run (npu_input_opmode opmode, const Model *model,
1206 const input_buffers *input, npuOutputNotify cb, void *cb_data,
1209 if (!initialized ()) {
1210 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1214 if (opmode != NPUINPUT_HOST)
1217 if (input == nullptr || input->num_buffers == 0 || model == nullptr)
1220 const_cast<Model *>(model)->updateDataInfo ();
1222 /** this device uses segment table */
1223 SegmentTable * segt = prepareSegmentTable (model, input);
1224 if (segt == nullptr) {
1225 logerr (TAG, "Failed to create segment table instance\n");
1229 /** extract input data */
1230 for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
1231 if (!segt->getInputSegment(idx)->isExternal ()) {
1232 uint32_t seg_offset = segt->getInputSegmentOffset(idx);
1233 auto func = std::bind (TrinityVision2::manipulateData, model, idx, true,
1234 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1235 int status = comm_.extractGenericBuffer (
1237 segt->getInputSegment(idx)->getData() + seg_offset,
1240 logerr (TAG, "Failed to feed input segment: %d\n", status);
1246 Request *req = new Request (opmode);
1247 req->setModel (model);
1248 req->setSegmentTable (segt);
1249 req->setCallback (std::bind (&TrinityVision2::callback, this, req, cb, cb_data));
1252 *sequence = req->getID();
1254 return scheduler_->submitRequest (req);
1257 /** @brief implementation of TRIV2's runInternal() */
1259 TrinityVision2::runInternal (npu_input_opmode opmode, const Model *model,
1262 if (!initialized ()) {
1263 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1267 if (opmode != NPUINPUT_HW_RECURRING)
1270 /** this device uses segment table */
1271 SegmentTable * segt = prepareSegmentTable (model, nullptr, nullptr);
1272 if (segt == nullptr) {
1273 logerr (TAG, "Failed to create segment table instance\n");
1277 Request *req = new Request (opmode);
1278 req->setModel (model);
1279 req->setSegmentTable (segt);
1280 req->setHwDevice (hw_dev);
1282 return scheduler_->submitRequest (req);
1285 /** @brief callback of TRIV2 request */
1287 TrinityVision2::callback (Request *req, npuOutputNotify cb, void *cb_data)
1289 const Model *model = req->getModel ();
1290 SegmentTable *segt = req->getSegmentTable ();
1291 output_buffers output = {
1292 .num_buffers = segt->getNumOutputSegments ()
1295 for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
1296 uint32_t output_tensor_size = model->getOutputTensorSize (idx);
1298 output.bufs[idx].type = BUFFER_MAPPED;
1299 output.bufs[idx].size = output_tensor_size;
1300 /** user needs to free this */
1301 output.bufs[idx].addr = calloc (1, output_tensor_size);
1303 #if defined(ENABLE_FPGA_WORKAROUND)
1305 segt->getOutputSegment(idx)->getDmabuf(),
1306 segt->getOutputSegmentOffset(idx),
1307 output.bufs[idx].addr,
1308 output.bufs[idx].size);
1310 auto func = std::bind (TrinityVision2::manipulateData, model, idx, false,
1311 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1312 int status = comm_.insertGenericBuffer (
1313 segt->getOutputSegment(idx)->getData() + segt->getOutputSegmentOffset(idx),
1314 &output.bufs[idx], func);
1317 logerr (TAG, "Failed to return output buffer: %d\n", status);
1322 cb (&output, req->getID(), cb_data);
1327 /** @brief implementation of TRIA's run(): WIP */
1329 TrinityAsr::run (npu_input_opmode opmode, const Model *model,
1330 const input_buffers *input, npuOutputNotify cb, void *cb_data,
1333 if (!initialized ()) {
1334 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1338 if (opmode != NPUINPUT_HOST)
1341 if (input == nullptr || input->num_buffers != 1)
1346 /** ASR does not require model and support only a single tensor */
1347 const generic_buffer *first_buf = &input->bufs[0];
1348 if (first_buf->type == BUFFER_DMABUF) {
1349 buffer = mem_->allocBuffer (new HWmemExternal);
1350 if (buffer == nullptr)
1353 buffer->setDmabuf (first_buf->dmabuf);
1354 buffer->setOffset (first_buf->offset);
1355 buffer->setSize (first_buf->size);
1357 buffer = mem_->allocBuffer (new HWmemDevice);
1358 if (buffer == nullptr)
1361 status = buffer->alloc (first_buf->size);
1368 status = buffer->createTensors ();
1370 logerr (TAG, "Failed to create tensors: %d\n", status);
1375 if (!buffer->isExternal ()) {
1376 status = comm_.extractGenericBuffer (first_buf,
1377 buffer->getInputTensor(0)->getData(), nullptr);
1382 Request *req = new Request (opmode);
1383 req->setBuffer (buffer);
1384 req->setCallback (std::bind (&TrinityAsr::callback, this, req, cb, cb_data));
1387 *sequence = req->getID();
1389 return scheduler_->submitRequest (req);
1392 /** @brief callback of TRIA request: WIP */
1394 TrinityAsr::callback (Request *req, npuOutputNotify cb, void *cb_data)
1396 Buffer *buffer = req->getBuffer ();
1397 output_buffers output = {
1401 /** TODO: finalize this impl. when the ASR's working scenario is determined */
1402 cb (&output, req->getID(), cb_data);
1407 /** Implement data manipulation (each device may have different impl.) */
1412 * @brief perform data manipulation
1413 * @param[in] model model instance
1414 * @param[in] idx tensor index
1415 * @param[in] is_input indicate it's input manipulation
1416 * @param[out] dst destination buffer
1417 * @param[in] src source buffer (feature map)
1418 * @param[in] size size to be copied
1419 * @return size of memory copy if no error, otherwise zero
1421 * @note the input data format should be NHWC
1422 * @detail rules for the memory address of activations in NPU HW.
1423 * (https://code.sec.samsung.net/confluence/pages/viewpage.action?pageId=146491864)
1425 * 1) Special case (depth == 3)
1426 * - addr(x,y,z) = addr(0,0,0) + (z) + 3 * (x + width * y)
1429 * - addr(x,y,z) = addr(0,0,0) + (z % MPA_L) + MPA_L * (x + width * (y + height * (z / MPA_L)))
1431 * Thus, if depth is not a multiple of MPA_L (i.e., 64), zero padding is required
1434 TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
1435 void *dst, void *src, size_t size)
1437 const Metadata *meta = model->getMetadata();
1438 DataConverter converter (is_input);
1440 converter.setData (src, dst, size);
1443 const tensor_data_info* info = model->getInputDataInfo (idx);
1444 if (info == nullptr)
1447 converter.setDataLayout (info->layout, DATA_LAYOUT_SRNPU);
1448 converter.setDataType (info->type, DATA_TYPE_SRNPU);
1449 converter.setDataDims (meta->getInputDims (idx));
1450 converter.setQuantZero (meta->getInputQuantZero (idx));
1451 converter.setQuantScale (meta->getInputQuantScale (idx));
1453 const tensor_data_info* info = model->getOutputDataInfo (idx);
1454 if (info == nullptr)
1457 converter.setDataLayout (DATA_LAYOUT_SRNPU, info->layout);
1458 converter.setDataType (DATA_TYPE_SRNPU, info->type);
1459 converter.setDataDims (meta->getOutputDims (idx));
1460 converter.setQuantZero (meta->getOutputQuantZero (idx));
1461 converter.setQuantScale (meta->getOutputQuantScale (idx));
1464 return converter.perform ();
1468 * @brief perform data manipulation
1469 * @param[in] model model instance
1470 * @param[in] idx tensor index
1471 * @param[in] is_input indicate it's input manipulation
1472 * @param[out] dst destination buffer
1473 * @param[in] src source buffer (feature map)
1474 * @param[in] size size to be copied
1475 * @return size of memory copy if no error, otherwise zero
1477 * @note the input data format should be NHWC
1479 * @detail Feature map data in TRIV2, (x, y, z) = (width, height, depth)
1481 * 1) Image input (depth == 1 or depth == 3)
1482 * Addr(x,y,z) = Addr(0,0,0) + z + depth * x + ymod * y
1485 * Addr(x,y,z) = Addr(0,0,0) + (z % 64) + (64 * x) + ymod * y + zmod * (z / 64)
1488 TrinityVision2::manipulateData (const Model *model, uint32_t idx, bool is_input,
1489 void *dst, void *src, size_t size)
1491 const Metadata *meta = model->getMetadata ();
1492 DataConverter converter (is_input);
1494 converter.setData (src, dst, size);
1495 converter.setTops (meta->getTops ());
1497 const tensor_data_info* info = model->getInputDataInfo (idx);
1498 if (info == nullptr)
1501 converter.setDataLayout (info->layout, DATA_LAYOUT_TRIV2);
1502 converter.setDataType (info->type, meta->getInputQuantType (idx));
1503 converter.setDataDims (meta->getInputDims (idx));
1504 converter.setQuantZero (meta->getInputQuantZero (idx));
1505 converter.setQuantScale (meta->getInputQuantScale (idx));
1507 const tensor_data_info* info = model->getOutputDataInfo (idx);
1508 if (info == nullptr)
1511 converter.setDataLayout (DATA_LAYOUT_TRIV2, info->layout);
1512 converter.setDataType (meta->getOutputQuantType (idx), info->type);
1513 converter.setDataDims (meta->getOutputDims (idx));
1514 converter.setQuantZero (meta->getOutputQuantZero (idx));
1515 converter.setQuantScale (meta->getOutputQuantScale (idx));
1518 return converter.perform ();
1524 TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
1525 void *dst, void *src, size_t size)
1527 memcpy (dst, src, size);
1532 TrinityVision2::manipulateData (const Model *model, uint32_t idx, bool is_input,
1533 void *dst, void *src, size_t size)
1535 memcpy (dst, src, size);
1541 /** other device types don't have data manip impl. yet */
1544 TrinityAsr::manipulateData (const Model *model, uint32_t idx, bool is_input,
1545 void *dst, void *src, size_t size)
1547 memcpy (dst, src, size);