3 * Copyright (C) 2020 Samsung Electronics
4 * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
9 * @brief Impelemetation of NPU Engine entrypoint that handles APIs from host
10 * @see https://code.sec.samsung.net/confluence/display/ODLC/2020+Overall+Software+Stack
11 * @author Dongju Chae <dongju.chae@samsung.com>
12 * @bug No known bugs except for NYI items
15 #include "ne-handler.h"
18 #include <npubinfmt.h>
19 #include <NPUdrvAPI.h>
20 #include <CommPlugin.h>
25 #include <condition_variable>
32 /** @brief host handler constructor */
33 HostHandler::HostHandler (Device *device)
35 /* ignored as we don't use double buffering anymore, but for backward-compatibility */
36 async_mode_ (NPUASYNC_WAIT)
40 /** @brief host handler destructor */
41 HostHandler::~HostHandler ()
46 * @brief register model from generic buffer
47 * @param[in] model_buf model buffer
48 * @param[out] modelid model id
49 * @return 0 if no error. otherwise a negative errno
52 HostHandler::registerModel (generic_buffer *model_buf, uint32_t *modelid)
54 if (model_buf == nullptr || modelid == nullptr) {
55 logerr (TAG, "Invalid arguments given\n");
59 Model *model = nullptr;
60 int status = device_->setModel (model_buf, &model);
62 logerr (TAG, "Failed to set model: %d\n", status);
66 assert (model != nullptr);
68 status = models_.insert (model->getID(), model);
70 logerr (TAG, "Failed to insert model id\n");
75 *modelid = model->getID();
80 * @brief remove the registered model
81 * @param[in] modelid model id
82 * @return 0 if no error. otherwise a negative errno
85 HostHandler::unregisterModel (uint32_t modelid)
87 Model *model = models_.find (modelid);
91 int status = device_->unsetModel (model);
93 logerr (TAG, "Failed to unset model: %d\n", status);
97 return models_.remove (modelid);
101 * @brief remove all registered models
105 HostHandler::unregisterModels ()
107 std::function <bool (Model *)> functor =
108 [&] (Model *m) -> bool {
109 bool can_remove = true;
110 int status = device_->unsetModel (m);
112 logwarn (TAG, "Failed to unset model: %d\n", status);
118 models_.for_each (functor);
123 * @brief Get the profile information from NPU
124 * @param[in] task_id The identifier for each inference
125 * @param[out] profile The profile instance
126 * @return 0 if no error, otherwise a negative errno.
129 HostHandler::getProfile (int task_id, npu_profile *profile)
131 if (task_id < 0 || profile == nullptr) {
132 logerr (TAG, "Invalid parameter provided\n");
136 const DriverAPI * api = device_->getDriverAPI ();
137 assert (api != nullptr);
139 void *profile_buffer;
141 int status = api->getProfile (task_id, &profile_buffer, &profile_size);
143 logerr (TAG, "Failed to get profile information: %d\n", status);
147 profile->num_layers = 0;
148 profile->layers = nullptr;
149 if (profile_buffer != nullptr) {
150 // TODO: Perform parsing
157 * @brief get the stats for the latest apps of the target device
158 * @param[out] stat The list of app stat
159 * @note The caller has the responsibility to free the resources.
160 * This API is not working on the emulated envionment.
163 HostHandler::getStatApps (npu_stat_apps *stat)
165 const DriverAPI * api = device_->getDriverAPI ();
166 assert (api != nullptr);
168 return api->getStatApps (stat);
172 * @brief get the stats for the latest tasks of the target app
173 * @param[in] appid The identifier of target app
174 * @param[out] stat The list of task stat
175 * @note The caller has the responsibility to free the resources.
176 * This API is not working on the emulated envionment.
179 HostHandler::getStatTasks (int appid, npu_stat_tasks *stat)
181 const DriverAPI * api = device_->getDriverAPI ();
182 assert (api != nullptr);
184 return api->getStatTasks (appid, stat);
188 * @brief Get the driver API level of opened NPU device
189 * @param[out] level driver API level
190 * @return 0 if no error, otherwise a negative errno
193 HostHandler::getAPILevel (uint32_t *level)
195 const DriverAPI * api = device_->getDriverAPI ();
196 assert (api != nullptr);
198 return api->getAPILevel (level);
202 * @brief Get the TOPS of the opened NPU device
203 * @param[in] dev the NPU device handle
204 * @param[out] tops npu tops
205 * @return 0 if no error, otherwise a negative errno
206 * @note this does not support for emulated devices
209 HostHandler::getTops (uint32_t *tops)
211 const DriverAPI * api = device_->getDriverAPI ();
212 assert (api != nullptr);
214 return api->getTops (tops);
218 * @brief Set the data layout for input/output tensors
219 * @param[in] modelid The ID of model whose layouts are set
220 * @param[in] in the layout/type info for input tensors
221 * @param[in] out the layout/type info for output tensors
222 * @return @c 0 if no error. otherwise a negative error value
223 * @note if this function is not called, default layout/type will be used.
226 HostHandler::setDataInfo (uint32_t modelid, tensors_data_info *in,
227 tensors_data_info *out)
229 Model *model = models_.find (modelid);
230 if (model == nullptr)
233 return model->setDataInfo (in, out);
237 * @brief Set the inference constraint for next NPU inferences
238 * @param[in] modelid The target model id
239 * @param[in] constraint inference constraint (e.g., timeout, priority)
240 * @return @c 0 if no error. otherwise a negative error value
241 * @note If this function is not called, default values are used.
244 HostHandler::setConstraint (uint32_t modelid, npuConstraint constraint)
246 Model *model = models_.find (modelid);
247 if (model == nullptr)
250 model->setConstraint (constraint);
256 * @brief find and return model instance
257 * @param[in] modelid model id
258 * @return model instance if found. otherwise nullptr
261 HostHandler::getModel (uint32_t modelid)
263 return models_.find (modelid);
266 /** @brief dummay callback for runSync. */
269 callbackSync (output_buffers *output) : output_(output), done_(false) {}
271 static void callback (output_buffers *output, uint64_t sequence, void *data) {
272 callbackSync *sync = static_cast<callbackSync *>(data);
273 sync->callback (output, sequence);
276 void callback (output_buffers *output, uint64_t sequence) {
277 if (output_ != nullptr) {
278 /** just copy internal variables of output buffers */
279 memcpy (output_, output, sizeof (output_buffers));
286 std::unique_lock<std::mutex> lock (m_);
287 cv_.wait (lock, [this]() { return done_; });
292 std::condition_variable cv_;
293 output_buffers *output_;
298 * @brief Execute inference. Wait (block) until the output is available.
299 * @param[in] modelid The model to be inferred.
300 * @param[in] input The input data to be inferred.
301 * @param[out] output The output result.
302 * @return @c 0 if no error. otherwise a negative error value
305 HostHandler::runSync (uint32_t modelid, const input_buffers *input,
306 output_buffers *output)
308 callbackSync sync (output);
309 int status = runAsync (modelid, input, callbackSync::callback,
310 static_cast <void*> (&sync), NPUASYNC_DROP_OLD, nullptr);
312 /** sync needs to wait callback */
319 * @brief Invoke NPU inference. Unblocking call.
320 * @param[in] modelid The model to be inferred.
321 * @param[in] input The input data to be inferred.
322 * @param[in] cb The output buffer handler.
323 * @param[in] cb_data The data given as a parameter to the runNPU_async call.
324 * @param[in] mode Configures how this operation works.
325 * @param[out] sequence The sequence number returned with runNPU_async.
326 * @return @c 0 if no error. otherwise a negative error value
329 HostHandler::runAsync (uint32_t modelid, const input_buffers *input,
330 npuOutputNotify cb, void *cb_data, npu_async_mode mode, uint64_t *sequence)
332 Model *model = nullptr;
334 if (device_->needModel()) {
335 model = getModel (modelid);
336 if (model == nullptr)
340 /* check the given model before running */
341 if (model != nullptr && !model->finalize ()) {
342 logerr (TAG, "Failed to finalize the model. Please see the log messages\n");
346 device_->setAsyncMode (mode);
347 return device_->run (NPUINPUT_HOST, model, input, cb, cb_data, sequence);
351 * @brief Let NPU accept input frames from its internal source continuously
352 * @param[in] modelid The model to be inferred.
353 * @param[in] opmode NPU has different opmode with auto-inputs. Choose one.
354 * @param[in] hw_dev The target device feeding input data
355 * @return @c 0 if no error. otherwise a negative error value
358 HostHandler::runInternal (uint32_t modelid, npu_input_opmode opmode,
361 Model *model = nullptr;
363 if (device_->needModel()) {
364 model = getModel (modelid);
365 if (model == nullptr)
369 /* check the given model before running */
370 if (model != nullptr && !model->finalize ()) {
371 logerr (TAG, "Failed to finalize the model. Please see the log messages\n");
375 return device_->runInternal (opmode, model, hw_dev);
379 * @brief Stop the request with the given id
380 * @param[in] dev The NPU device handle
381 * @param[in] id The request id
382 * @return @c 0 if no error. otherwise a negative error value
385 HostHandler::stopInternal (int id)
388 logerr (TAG, "Unable to stop this request with id (%d)\n", id);
392 const DriverAPI * api = device_->getDriverAPI ();
393 assert (api != nullptr);
395 return api->stop_target (id);
399 * @brief get number of available devices
400 * @param[in] type device type
401 * @return number of devices
404 HostHandler::getNumDevices (dev_type type)
406 return DriverAPI::getNumDevices (type);
410 * @brief get device instance
411 * @param[out] dev device instance
412 * @param[in] type device type
413 * @param[in] id device id
414 * @return 0 if no error. otherwise a negative errno
417 HostHandler::getDevice (npudev_h *dev, dev_type type, uint32_t id)
419 int num_devices = getNumDevices (type);
421 /** check the validity of device id */
422 if (!(num_devices > 0 && id < static_cast<uint32_t>(num_devices))) {
423 logerr (TAG, "Invalid arguments provided\n");
427 Device *device = Device::createInstance (type, id);
428 if (device == nullptr) {
429 logerr (TAG, "Failed to create a device with the given type\n");
439 * @brief allocate generic buffer (just for users)
440 * @param[out] buffer buffer instance
441 * @return 0 if no error. otherwise a negative errno
444 HostHandler::allocGenericBuffer (generic_buffer *buffer)
449 if (buffer->size == 0) {
450 logerr (TAG, "Invalid size\n");
454 if (buffer->size > UINT32_MAX) {
455 logerr (TAG, "Don't support such a large size");
459 switch (buffer->type) {
462 if (buffer->filepath == nullptr)
467 /* now, npu-engine always provides dmabuf-based allocation */
468 void *addr = nullptr;
469 int dmabuf = device_->allocMemory (buffer->size, &addr);
473 buffer->dmabuf = dmabuf;
485 * @brief deallocate generic buffer (just for users)
486 * @param[in] buffer buffer instance
487 * @return 0 if no error. otherwise a negative errno
490 HostHandler::deallocGenericBuffer (generic_buffer *buffer)
495 switch (buffer->type) {
497 /** always true cuz nothing to do */
500 return device_->deallocMemory (buffer->dmabuf, buffer->size, buffer->addr);
509 * @brief allocate multiple generic buffers (just for users)
510 * @param[out] buffers multi-buffer instance
511 * @return 0 if no error. otherwise a negative errno
514 HostHandler::allocGenericBuffer (generic_buffers *buffers)
519 if (buffers == NULL || buffers->num_buffers < 1)
522 for (idx = 0; idx < buffers->num_buffers; idx++) {
523 status = allocGenericBuffer (&buffers->bufs[idx]);
532 deallocGenericBuffer (&buffers->bufs[--idx]);
539 * @brief deallocate multiple generic buffers (just for users)
540 * @param[in] buffers multi-buffer instance
541 * @return 0 if no error. otherwise a negative errno
544 HostHandler::deallocGenericBuffer (generic_buffers *buffers)
546 if (buffers == NULL || buffers->num_buffers < 1)
549 for (uint32_t idx = 0; idx < buffers->num_buffers; idx++)
550 deallocGenericBuffer (&buffers->bufs[idx]);
551 buffers->num_buffers = 0;
557 * @brief get the current memory status
558 * @param[out] alloc_total The size of allocated memory until now
559 * @param[out] free_total The size of freed memory until now
560 * @return 0 if no error. otherwise a negatice error value
563 HostHandler::getMemoryStatus (size_t *alloc_total, size_t *free_total)
565 /** API is always set in initialize () */
566 const DriverAPI * api = device_->getDriverAPI ();
567 assert (api != nullptr);
569 return api->getMemoryStatus (alloc_total, free_total);
573 * @brief Get the current device status to be used
574 * @param[out] status the device status
575 * @param[out] num_requests the number of running requests (or pending)
576 * @return 0 if no error, otherwise a negative errno.
579 HostHandler::getDeviceStatus (npu_status *status, uint32_t *num_requests)
581 /** API is always set in initialize () */
582 const DriverAPI * api = device_->getDriverAPI ();
587 device_state_t state = api->isReady ();
588 if (state == device_state_t::STATE_READY) {
589 *num_requests = api->numRequests ();
590 if (*num_requests > 0)
602 /** implement methods of Device class */
604 /** @brief constructor of device */
605 Device::Device (dev_type type, int id, bool need_model)
606 : comm_ (CommPlugin::getCommPlugin()), type_ (type), id_ (id), need_model_ (true),
607 mode_ (NPUASYNC_WAIT), initialized_ (false), atomic_flag_ (ATOMIC_FLAG_INIT)
612 * @brief create device instance depending on device type and id
613 * @param[in] type device type
614 * @param[in] id device id
615 * @return device instance
618 Device::createInstance (dev_type type, int id)
620 Device *device = nullptr;
622 switch (type & DEVICETYPE_MASK) {
623 case DEVICETYPE_TRIV:
624 device = new TrinityVision (id);
626 case DEVICETYPE_TRIV2:
627 device = new TrinityVision2 (id);
629 case DEVICETYPE_TRIA:
630 device = new TrinityAsr (id);
631 device->setNeedModel (false);
637 if (device != nullptr && device->init () != 0) {
646 * @brief device initialization
647 * @return 0 if no error, otherwise a negative errno
648 * @note Init failures come from createDriverAPI() only.
653 /** should be initilizaed only once */
654 if (!atomic_flag_.test_and_set()) {
655 /** create the corresponding driver API */
656 api_ = DriverAPI::createDriverAPI (type_, id_);
657 if (api_.get() == nullptr) {
658 atomic_flag_.clear();
659 logerr (TAG, "Failed to create driver API\n");
663 handler_.reset (new HostHandler (this));
664 scheduler_.reset (new Scheduler (api_.get()));
665 mem_ = MemAllocator::createInstance (api_.get());
667 initialized_ = true; /** c++11 does not provide test() of atomic flag */
674 * @brief stop all requests from this device
675 * @param[in] force_stop indicate the schedduler waits until to handle previous requests
676 * @return 0 if no error, otherwise a negative errno
679 Device::stop (bool force_stop)
681 if (!initialized ()) {
682 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
686 Request *req = new Request (NPUINPUT_STOP);
687 req->setForceStop (force_stop);
688 return scheduler_->submitRequest (req);
692 * @brief allocate generic memory buffer
693 * @param[in] size the size to allocate
694 * @param[out] addr the mapped address
695 * @return dmabuf fd if no error, otherwise a negative errno
698 Device::allocMemory (size_t size, void **addr)
700 if (!initialized ()) {
701 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
705 if (size == 0 || addr == nullptr) {
706 logerr (TAG, "Invalid arguments\n");
710 return mem_->allocMemory (size, addr);
714 * @brief deallocate generic memory buffer
715 * @param[in] dmabuf_fd dmabuf file descriptor
716 * @param[in] size buffer size
717 * @param[in] addr mapped addr
718 * @return 0 if no error, otherwise a negative errno
721 Device::deallocMemory (int dmabuf_fd, size_t size, void * addr)
723 if (!initialized ()) {
724 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
728 if (dmabuf_fd < 0 || size == 0 || addr == nullptr) {
729 logerr (TAG, "Invalid arguments\n");
733 return mem_->deallocMemory (dmabuf_fd, size, addr);
737 * @brief extract the buffer instance from input generic buffers
738 * @param[in] meta the model metadata
739 * @param[in] input the input generic buffers
740 * @return the buffer instance
743 TrinityVision::prepareInputBuffers (const Metadata *meta, const input_buffers *input)
745 if (meta == nullptr || input == nullptr ||
746 meta->getInputNum() != input->num_buffers) {
747 logerr (TAG, "Invalid metadata info provided\n");
752 const generic_buffer *first = &input->bufs[0];
753 if (first->type == BUFFER_DMABUF) {
754 buffer = mem_->allocBuffer (new HWmemExternal);
755 if (buffer == nullptr)
758 buffer->setDmabuf (first->dmabuf);
759 buffer->setOffset (first->offset);
760 buffer->setSize (meta->getBufferSize());
762 buffer = mem_->allocBuffer (new HWmemDevice);
763 if (buffer == nullptr)
766 int status = buffer->alloc (meta->getBufferSize ());
768 logerr (TAG, "Failed to allocate buffer: %d\n", status);
774 int status = buffer->createTensors (meta);
776 logerr (TAG, "Failed to create tensors: %d\n", status);
785 * @brief implementation of TRIV's setModel ()
786 * @param[in] model_buf the model generic buffer
787 * @param[out] model the model instance
788 * @return 0 if no error, otherwise a negative errno
791 TrinityVision::setModel (const generic_buffer *model_buf, Model ** model_ptr)
793 if (!initialized ()) {
794 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
798 if (model_buf == nullptr || model_ptr == nullptr)
801 Model *model = nullptr;
802 HWmem * hwmem_prog = nullptr;
803 HWmem * hwmem_weight = nullptr;
806 /** In TRIV1, model data (including program/weight) should be contiguous */
808 switch (model_buf->type) {
811 model = mem_->allocModel (new HWmemDevice);
812 if (model == nullptr) {
813 logerr (TAG, "Failed to allocate model\n");
817 status = model->alloc (model_buf->size);
819 logerr (TAG, "Failed to allocate model: %d\n", status);
823 /** extract the whole model data */
824 status = comm_.extractGenericBuffer (model_buf, model->getData(), nullptr);
826 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
834 status = model->setMetadata (model->getData());
838 /** allocate program (optional; NOP) */
839 if (model->getMetadata()->getProgramSize() > 0) {
840 hwmem_prog = new HWmem (new HWmemChunk);
841 model->setProgramData (hwmem_prog);
843 hwmem_prog->setParent (model);
844 hwmem_prog->setOffset (model->getMetadata()->getMetaSize());
845 status = hwmem_prog->alloc (model->getMetadata()->getProgramSize());
847 logerr (TAG, "Failed to allocate program\n");
852 /** allocate weight (optional) */
853 if (model->getMetadata()->getWeightSize() > 0) {
854 hwmem_weight = new HWmem (new HWmemChunk);
855 model->setWeightData (hwmem_weight);
857 hwmem_weight->setParent (model);
858 hwmem_weight->setOffset (model->getMetadata()->getMetaSize() +
859 model->getMetadata()->getProgramSize());
860 status = hwmem_weight->alloc (model->getMetadata()->getWeightSize());
862 logerr (TAG, "Failed to allocate program\n");
867 if (hwmem_prog != nullptr) {
868 /** register this model to the driver */
869 model_config_t config;
870 config.dbuf_fd = hwmem_prog->getDmabuf ();
871 config.program_size = hwmem_prog->getSize ();
872 config.program_offset_addr = hwmem_prog->getOffset ();
873 if (hwmem_weight != nullptr)
874 config.weight_offset_addr = hwmem_weight->getOffset ();
876 status = api_->registerModel (&config);
880 model->setInternalID(config.id);
892 * @brief implementation of TRIV's unsetModel ()
893 * @param[in] model the model instance
894 * @return 0 if no error, otherwise a negative errno
897 TrinityVision::unsetModel (Model * model)
899 if (!initialized ()) {
900 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
904 if (model == nullptr) {
905 logerr (TAG, "Invalid model instance\n");
909 if (model->getMetadata()->getProgramSize() > 0)
910 return api_->deregisterModel (model->getInternalID ());
916 * @brief implementation of TRIV's run()
917 * @param[in] opmode input opmode
918 * @param[in] model the model instance
919 * @param[in] input generic buffers of input data
920 * @param[in] cb the output callback
921 * @param[in] cb_data the output callback data
922 * @param[out] sequence The sequence number returned with runNPU_async.
925 TrinityVision::run (npu_input_opmode opmode, const Model *model,
926 const input_buffers *input, npuOutputNotify cb, void *cb_data,
929 if (!initialized ()) {
930 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
934 if (opmode != NPUINPUT_HOST) {
935 logerr (TAG, "TRIV supports only host inputservice\n");
939 if (model == nullptr || input == nullptr) {
940 logerr (TAG, "TRIV requires both model and input buffers\n");
944 const_cast<Model *>(model)->updateDataInfo ();
946 Buffer *buffer = prepareInputBuffers (model->getMetadata(), input);
947 if (buffer == nullptr) {
948 logerr (TAG, "Failed to extract buffer instance\n");
952 if (!buffer->isExternal ()) {
953 for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
954 auto func = std::bind (TrinityVision::manipulateData, model, idx, true,
955 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
956 int status = comm_.extractGenericBuffer (&input->bufs[idx],
957 buffer->getInputTensor(idx)->getData(), func);
959 logerr (TAG, "Failed to feed input buffer: %d\n", status);
965 /** this device uses CMA buffer */
967 Request *req = new Request (opmode);
968 req->setModel (model);
969 req->setBuffer (buffer);
972 req->setCallback (std::bind (&TrinityVision::callback, this, req, cb, cb_data));
974 if (sequence != nullptr)
975 *sequence = req->getID();
977 return scheduler_->submitRequest (req);
981 * @brief callback of TRIV2 request
982 * @param[in] req the request instance
983 * @param[in] cb callback for completion
984 * @param[in] cb_data callback data
985 * @note The callback invoke does not gurantee the request was successful
986 * @todo Check the request failures
989 TrinityVision::callback (Request *req, npuOutputNotify cb, void *cb_data)
991 const Model *model = req->getModel ();
992 Buffer *buffer = req->getBuffer ();
993 output_buffers output = {
994 .num_buffers = buffer->getOutputNum ()
997 for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
998 uint32_t output_tensor_size = model->getOutputTensorSize (idx);
1000 if (buffer->isExternal ()) {
1001 output.bufs[idx].type = BUFFER_DMABUF;
1002 output.bufs[idx].size = output_tensor_size;
1003 output.bufs[idx].addr = buffer->getOutputTensor(idx)->getData();
1005 output.bufs[idx].type = BUFFER_MAPPED;
1006 output.bufs[idx].size = output_tensor_size;
1007 /** user needs to free this */
1008 output.bufs[idx].addr = malloc (output_tensor_size);
1010 auto func = std::bind (TrinityVision::manipulateData, model, idx, false,
1011 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1012 int status = comm_.insertGenericBuffer (buffer->getOutputTensor(idx)->getData(),
1013 &output.bufs[idx], func);
1015 logerr (TAG, "Failed to return output buffer: %d\n", status);
1020 cb (&output, req->getID(), cb_data);
1026 * @brief extract the segment table instance from input generic buffers
1027 * @param[in] model the model instance
1028 * @param[in] input the input generic buffers
1029 * @param[in] output the output generic buffers
1030 * @return the segment table instance
1033 TrinityVision2::prepareSegmentTable (const Model *model, const input_buffers *input,
1034 const output_buffers *output)
1036 const Metadata *meta = model->getMetadata ();
1037 if (meta == nullptr || (input != nullptr &&
1038 meta->getInputNum() != input->num_buffers)) {
1039 logerr (TAG, "Invalid metadata info provided\n");
1043 SegmentTable * segt = mem_->allocSegmentTable (new HWmemDevice);
1044 int status = segt->alloc ();
1046 logerr (TAG, "Failed to allocate segment table: %d\n", status);
1050 status = segt->createSegments (model, input, output);
1052 logerr (TAG, "Failed to create segments: %d\n", status);
1064 * @brief implementation of TRIV2's setModel ()
1065 * @param[in] model_buf the model generic buffer
1066 * @param[out] model the model instance
1067 * @return 0 if no error, otherwise a negative errno
1070 TrinityVision2::setModel (const generic_buffer *model_buf, Model ** model_ptr)
1072 if (!initialized ()) {
1073 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1077 if (model_buf == nullptr || model_ptr == nullptr)
1083 switch (model_buf->type) {
1086 model = mem_->allocModel (new HWmemDevice);
1087 if (model == nullptr) {
1088 logerr (TAG, "Failed to allocate model\n");
1092 status = model->alloc (NPUBIN_META_SIZE);
1094 logerr (TAG, "Failed to allocate model: %d\n", status);
1098 status = comm_.extractGenericBuffer (model_buf, model->getData(), nullptr,
1099 0, NPUBIN_META_SIZE);
1101 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1109 status = model->setMetadata (model->getData());
1113 /** allocate program (optional; NOP) */
1114 if (model->getMetadata()->getProgramSize() > 0) {
1115 HWmem * hwmem_prog = new HWmem (new HWmemDevice);
1116 hwmem_prog->setDriverAPI (api_.get());
1118 model->setProgramData (hwmem_prog);
1120 status = hwmem_prog->alloc (model->getMetadata()->getProgramSize());
1122 logerr (TAG, "Failed to allocate program\n");
1126 status = comm_.extractGenericBuffer (model_buf, hwmem_prog->getData(), nullptr,
1127 model->getMetadata()->getMetaSize(),
1128 model->getMetadata()->getProgramSize());
1130 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1134 /** register this model to the driver */
1135 model_config_t config;
1136 config.dbuf_fd = hwmem_prog->getDmabuf ();
1137 config.program_size = hwmem_prog->getSize ();
1138 config.program_offset_addr = 0;
1140 /** for metadata extra section */
1141 config.metadata_dbuf_fd = model->getDmabuf ();
1142 config.metadata_extra_addr = NPUBIN_META_SIZE;
1143 config.metadata_extra_size = model->getMetadata()->getMetaExtraSize ();
1145 status = api_->registerModel (&config, model->getMetadata()->getNPUVersion());
1149 model->setInternalID(config.id);
1152 /** allocate weight (optional) */
1153 if (model->getMetadata()->getWeightSize() > 0) {
1154 HWmem * hwmem_weight = new HWmem (new HWmemDevice);
1155 hwmem_weight->setDriverAPI (api_.get());
1157 model->setWeightData (hwmem_weight);
1159 status = hwmem_weight->alloc (model->getMetadata()->getWeightSize());
1161 logerr (TAG, "Failed to allocate program\n");
1165 status = comm_.extractGenericBuffer (model_buf, hwmem_weight->getData(), nullptr,
1166 model->getMetadata()->getMetaSize() + model->getMetadata()->getProgramSize(),
1167 model->getMetadata()->getWeightSize());
1169 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1183 * @brief implementation of TRIV2's unsetModel ()
1184 * @param[in] model the model instance
1185 * @return 0 if no error, otherwise a negative errno
1188 TrinityVision2::unsetModel (Model * model)
1190 if (!initialized ()) {
1191 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1195 if (model == nullptr) {
1196 logerr (TAG, "Invalid model instance\n");
1200 if (model->getMetadata()->getProgramSize() > 0)
1201 return api_->deregisterModel (model->getInternalID ());
1206 /** @brief implementation of TRIV2's run() */
1208 TrinityVision2::run (npu_input_opmode opmode, const Model *model,
1209 const input_buffers *input, npuOutputNotify cb, void *cb_data,
1212 if (!initialized ()) {
1213 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1217 if (opmode != NPUINPUT_HOST)
1220 if (input == nullptr || input->num_buffers == 0 || model == nullptr)
1223 const_cast<Model *>(model)->updateDataInfo ();
1225 /** this device uses segment table */
1226 SegmentTable * segt = prepareSegmentTable (model, input);
1227 if (segt == nullptr) {
1228 logerr (TAG, "Failed to create segment table instance\n");
1232 /** extract input data */
1233 for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
1234 if (!segt->getInputSegment(idx)->isExternal ()) {
1235 uint32_t seg_offset = segt->getInputSegmentOffset(idx);
1236 auto func = std::bind (TrinityVision2::manipulateData, model, idx, true,
1237 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1238 int status = comm_.extractGenericBuffer (
1240 segt->getInputSegment(idx)->getData() + seg_offset,
1243 logerr (TAG, "Failed to feed input segment: %d\n", status);
1249 Request *req = new Request (opmode);
1250 req->setModel (model);
1251 req->setSegmentTable (segt);
1252 req->setCallback (std::bind (&TrinityVision2::callback, this, req, cb, cb_data));
1255 *sequence = req->getID();
1257 return scheduler_->submitRequest (req);
1260 /** @brief implementation of TRIV2's runInternal() */
1262 TrinityVision2::runInternal (npu_input_opmode opmode, const Model *model,
1265 if (!initialized ()) {
1266 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1270 if (opmode != NPUINPUT_HW_RECURRING)
1273 /** this device uses segment table */
1274 SegmentTable * segt = prepareSegmentTable (model, nullptr, nullptr);
1275 if (segt == nullptr) {
1276 logerr (TAG, "Failed to create segment table instance\n");
1280 Request *req = new Request (opmode);
1281 req->setModel (model);
1282 req->setSegmentTable (segt);
1283 req->setHwDevice (hw_dev);
1285 return scheduler_->submitRequest (req);
1288 /** @brief callback of TRIV2 request */
1290 TrinityVision2::callback (Request *req, npuOutputNotify cb, void *cb_data)
1292 const Model *model = req->getModel ();
1293 SegmentTable *segt = req->getSegmentTable ();
1294 output_buffers output = {
1295 .num_buffers = segt->getNumOutputSegments ()
1298 for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
1299 uint32_t output_tensor_size = model->getOutputTensorSize (idx);
1301 output.bufs[idx].type = BUFFER_MAPPED;
1302 output.bufs[idx].size = output_tensor_size;
1303 /** user needs to free this */
1304 output.bufs[idx].addr = calloc (1, output_tensor_size);
1306 #if defined(ENABLE_FPGA_WORKAROUND)
1308 segt->getOutputSegment(idx)->getDmabuf(),
1309 segt->getOutputSegmentOffset(idx),
1310 output.bufs[idx].addr,
1311 output.bufs[idx].size);
1313 auto func = std::bind (TrinityVision2::manipulateData, model, idx, false,
1314 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1315 int status = comm_.insertGenericBuffer (
1316 segt->getOutputSegment(idx)->getData() + segt->getOutputSegmentOffset(idx),
1317 &output.bufs[idx], func);
1320 logerr (TAG, "Failed to return output buffer: %d\n", status);
1325 cb (&output, req->getID(), cb_data);
1330 /** @brief implementation of TRIA's run(): WIP */
1332 TrinityAsr::run (npu_input_opmode opmode, const Model *model,
1333 const input_buffers *input, npuOutputNotify cb, void *cb_data,
1336 if (!initialized ()) {
1337 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1341 if (opmode != NPUINPUT_HOST)
1344 if (input == nullptr || input->num_buffers != 1)
1349 /** ASR does not require model and support only a single tensor */
1350 const generic_buffer *first_buf = &input->bufs[0];
1351 if (first_buf->type == BUFFER_DMABUF) {
1352 buffer = mem_->allocBuffer (new HWmemExternal);
1353 if (buffer == nullptr)
1356 buffer->setDmabuf (first_buf->dmabuf);
1357 buffer->setOffset (first_buf->offset);
1358 buffer->setSize (first_buf->size);
1360 buffer = mem_->allocBuffer (new HWmemDevice);
1361 if (buffer == nullptr)
1364 status = buffer->alloc (first_buf->size);
1371 status = buffer->createTensors ();
1373 logerr (TAG, "Failed to create tensors: %d\n", status);
1378 if (!buffer->isExternal ()) {
1379 status = comm_.extractGenericBuffer (first_buf,
1380 buffer->getInputTensor(0)->getData(), nullptr);
1385 Request *req = new Request (opmode);
1386 req->setBuffer (buffer);
1387 req->setCallback (std::bind (&TrinityAsr::callback, this, req, cb, cb_data));
1390 *sequence = req->getID();
1392 return scheduler_->submitRequest (req);
1395 /** @brief callback of TRIA request: WIP */
1397 TrinityAsr::callback (Request *req, npuOutputNotify cb, void *cb_data)
1399 Buffer *buffer = req->getBuffer ();
1400 output_buffers output = {
1404 /** TODO: finalize this impl. when the ASR's working scenario is determined */
1405 cb (&output, req->getID(), cb_data);
1410 /** Implement data manipulation (each device may have different impl.) */
1415 * @brief perform data manipulation
1416 * @param[in] model model instance
1417 * @param[in] idx tensor index
1418 * @param[in] is_input indicate it's input manipulation
1419 * @param[out] dst destination buffer
1420 * @param[in] src source buffer (feature map)
1421 * @param[in] size size to be copied
1422 * @return size of memory copy if no error, otherwise zero
1424 * @note the input data format should be NHWC
1425 * @detail rules for the memory address of activations in NPU HW.
1426 * (https://code.sec.samsung.net/confluence/pages/viewpage.action?pageId=146491864)
1428 * 1) Special case (depth == 3)
1429 * - addr(x,y,z) = addr(0,0,0) + (z) + 3 * (x + width * y)
1432 * - addr(x,y,z) = addr(0,0,0) + (z % MPA_L) + MPA_L * (x + width * (y + height * (z / MPA_L)))
1434 * Thus, if depth is not a multiple of MPA_L (i.e., 64), zero padding is required
1437 TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
1438 void *dst, void *src, size_t size)
1440 const Metadata *meta = model->getMetadata();
1441 DataConverter converter (is_input);
1443 converter.setData (src, dst, size);
1446 const tensor_data_info* info = model->getInputDataInfo (idx);
1447 if (info == nullptr)
1450 converter.setDataLayout (info->layout, DATA_LAYOUT_SRNPU);
1451 converter.setDataType (info->type, DATA_TYPE_SRNPU);
1452 converter.setDataDims (meta->getInputDims (idx));
1453 converter.setQuantZero (meta->getInputQuantZero (idx));
1454 converter.setQuantScale (meta->getInputQuantScale (idx));
1456 const tensor_data_info* info = model->getOutputDataInfo (idx);
1457 if (info == nullptr)
1460 converter.setDataLayout (DATA_LAYOUT_SRNPU, info->layout);
1461 converter.setDataType (DATA_TYPE_SRNPU, info->type);
1462 converter.setDataDims (meta->getOutputDims (idx));
1463 converter.setQuantZero (meta->getOutputQuantZero (idx));
1464 converter.setQuantScale (meta->getOutputQuantScale (idx));
1467 return converter.perform ();
1471 * @brief perform data manipulation
1472 * @param[in] model model instance
1473 * @param[in] idx tensor index
1474 * @param[in] is_input indicate it's input manipulation
1475 * @param[out] dst destination buffer
1476 * @param[in] src source buffer (feature map)
1477 * @param[in] size size to be copied
1478 * @return size of memory copy if no error, otherwise zero
1480 * @note the input data format should be NHWC
1482 * @detail Feature map data in TRIV2, (x, y, z) = (width, height, depth)
1484 * 1) Image input (depth == 1 or depth == 3)
1485 * Addr(x,y,z) = Addr(0,0,0) + z + depth * x + ymod * y
1488 * Addr(x,y,z) = Addr(0,0,0) + (z % 64) + (64 * x) + ymod * y + zmod * (z / 64)
1491 TrinityVision2::manipulateData (const Model *model, uint32_t idx, bool is_input,
1492 void *dst, void *src, size_t size)
1494 const Metadata *meta = model->getMetadata();
1495 DataConverter converter (is_input);
1497 converter.setData (src, dst, size);
1500 const tensor_data_info* info = model->getInputDataInfo (idx);
1501 if (info == nullptr)
1504 converter.setDataLayout (info->layout, DATA_LAYOUT_TRIV2);
1505 converter.setDataType (info->type, meta->getInputQuantType (idx));
1506 converter.setDataDims (meta->getInputDims (idx));
1507 converter.setQuantZero (meta->getInputQuantZero (idx));
1508 converter.setQuantScale (meta->getInputQuantScale (idx));
1510 const tensor_data_info* info = model->getOutputDataInfo (idx);
1511 if (info == nullptr)
1514 converter.setDataLayout (DATA_LAYOUT_TRIV2, info->layout);
1515 converter.setDataType (meta->getOutputQuantType (idx), info->type);
1516 converter.setDataDims (meta->getOutputDims (idx));
1517 converter.setQuantZero (meta->getOutputQuantZero (idx));
1518 converter.setQuantScale (meta->getOutputQuantScale (idx));
1521 return converter.perform ();
1527 TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
1528 void *dst, void *src, size_t size)
1530 memcpy (dst, src, size);
1535 TrinityVision2::manipulateData (const Model *model, uint32_t idx, bool is_input,
1536 void *dst, void *src, size_t size)
1538 memcpy (dst, src, size);
1544 /** other device types don't have data manip impl. yet */
1547 TrinityAsr::manipulateData (const Model *model, uint32_t idx, bool is_input,
1548 void *dst, void *src, size_t size)
1550 memcpy (dst, src, size);