3 * Copyright (C) 2020 Samsung Electronics
4 * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
7 * @file ne-host-handler.cc
9 * @brief Implementation of APIs to access NPU from Host
10 * @see https://code.sec.samsung.net/confluence/display/ODLC/2020+Overall+Software+Stack
11 * @author Dongju Chae <dongju.chae@samsung.com>
12 * @bug No known bugs except for NYI items
15 #include "ne-handler.h"
17 #include <libnpuhost.h>
18 #include <npubinfmt.h>
19 #include <NPUdrvAPI.h>
20 #include <CommPlugin.h>
25 #include <condition_variable>
32 #define INIT_HOST_HANDLER(handler, dev) \
33 Device *tdev = static_cast <Device *> (dev); \
34 if (tdev == nullptr) return -EINVAL; \
35 HostHandler *handler = tdev->getHostHandler (); \
36 if (handler == nullptr) return -EINVAL;
38 /** just for backward-compatability */
39 npudev_h HostHandler::latest_dev_ = nullptr;
41 /** implement libnpuhost APIs */
44 * @brief Returns the number of available NPU devices.
45 * @return @c The number of NPU devices.
46 * @retval 0 if no NPU devices available. if positive (number of NPUs) if NPU devices available. otherwise, a negative error value.
47 * @note the caller should call putNPUdevice() to release the device handle
49 int getnumNPUdeviceByType (dev_type type)
51 return HostHandler::getNumDevices (type);
55 * @brief Returns the handle of the chosen NPU devices.
56 * @param[out] dev The NPU device handle
57 * @param[in] id The NPU id to get the handle. 0 <= id < getnumNPUdeviceByType().
58 * @return @c 0 if no error. otherwise a negative error value
59 * @note the caller should call putNPUdevice() to release the device handle
61 int getNPUdeviceByType (npudev_h *dev, dev_type type, uint32_t id)
63 return HostHandler::getDevice (dev, type, id);
67 * @brief release the NPU device instance obtained by getDevice ()
68 * @param[in] dev the NPU device handle
70 void putNPUdevice (npudev_h dev)
73 delete static_cast<Device *> (dev);
77 * @brief Send the NN model to NPU.
78 * @param[in] dev The NPU device handle
79 * @param[in] modelfile The filepath to the compiled NPU NN model in any buffer_type
80 * @param[out] modelid The modelid allocated for this instance of NN model.
81 * @return @c 0 if no error. otherwise a negative error value
83 * @detail For ASR devices, which do not accept models, but have models
84 * embedded in devices, you do not need to call register and
85 * register calls for ASR are ignored.
87 * @todo Add a variation: in-memory model register.
89 int registerNPUmodel (npudev_h dev, generic_buffer *modelfile, uint32_t *modelid)
91 INIT_HOST_HANDLER (host_handler, dev);
93 return host_handler->registerModel (modelfile, modelid);
97 * @brief Remove the NN model from NPU
98 * @param[in] dev The NPU device handle
99 * @param[in] modelid The model to be removed from the NPU.
100 * @return @c 0 if no error. otherwise a negative error value
101 * @detail This may incur some latency with memory compatcion.
103 int unregisterNPUmodel(npudev_h dev, uint32_t modelid)
105 INIT_HOST_HANDLER (host_handler, dev);
107 return host_handler->unregisterModel (modelid);
111 * @brief Remove all NN models from NPU
112 * @param[in] dev The NPU device handle
113 * @return @c 0 if no error. otherwise a negative error value
115 int unregisterNPUmodel_all(npudev_h dev)
117 INIT_HOST_HANDLER (host_handler, dev);
119 return host_handler->unregisterModels ();
123 * @brief [OPTIONAL] Set the data layout for input/output tensors
124 * @param[in] dev The NPU device handle
125 * @param[in] modelid The ID of model whose layouts are set
126 * @param[in] info_in the layout/type info for input tensors
127 * @param[in] info_out the layout/type info for output tensors
128 * @return @c 0 if no error. otherwise a negative error value
129 * @note if this function is not called, default layout/type will be used.
131 int setNPU_dataInfo(npudev_h dev, uint32_t modelid,
132 tensors_data_info *info_in, tensors_data_info *info_out)
134 INIT_HOST_HANDLER (host_handler, dev);
136 return host_handler->setDataInfo (modelid, info_in, info_out);
140 * @brief [OPTIONAL] Set the inference constraint for next NPU inferences
141 * @param[in] dev The NPU device handle
142 * @param[in] modelid The target model id
143 * @param[in] constraint inference constraint (e.g., timeout, priority)
144 * @return @c 0 if no error. otherwise a negative error value
145 * @note If this function is not called, default values are used.
147 int setNPU_constraint(npudev_h dev, uint32_t modelid, npuConstraint constraint)
149 INIT_HOST_HANDLER (host_handler, dev);
151 return host_handler->setConstraint (modelid, constraint);
155 * @brief Execute inference. Wait (block) until the output is available.
156 * @param[in] dev The NPU device handle
157 * @param[in] modelid The model to be inferred.
158 * @param[in] input The input data to be inferred.
159 * @param[out] output The output result. The caller MUST allocate appropriately before calling this.
160 * @return @c 0 if no error. otherwise a negative error value
162 * @detail This is a syntactic sugar of runNPU_async().
163 * CAUTION: There is a memcpy for the output buffer.
165 int runNPU_sync(npudev_h dev, uint32_t modelid, const input_buffers *input,
166 output_buffers *output)
168 INIT_HOST_HANDLER (host_handler, dev);
170 return host_handler->runSync (modelid, input, output);
174 * @brief Invoke NPU inference. Unblocking call.
175 * @param[in] dev The NPU device handle
176 * @param[in] modelid The model to be inferred.
177 * @param[in] input The input data to be inferred.
178 * @param[in] cb The output buffer handler.
179 * @param[out] sequence The sequence number returned with runNPU_async.
180 * @param[in] data The data given as a parameter to the runNPU_async call.
181 * @param[in] mode Configures how this operation works.
182 * @return @c 0 if no error. otherwise a negative error value
184 int runNPU_async(npudev_h dev, uint32_t modelid, const input_buffers *input,
185 npuOutputNotify cb, uint64_t *sequence, void *data,
188 INIT_HOST_HANDLER (host_handler, dev);
190 return host_handler->runAsync (modelid, input, cb, data, mode, sequence);
194 * @brief Let NPU accept input frames from its internal source continuously
195 * @param[in] dev The NPU device handle
196 * @param[in] modelid The model to be inferred.
197 * @param[in] opmode NPU has different opmode with auto-inputs. Choose one.
198 * @param[in] hw_dev The target device feeding input data
199 * @return @c 0 if no error. otherwise a negative error value
201 int runNPU_internalInput(npudev_h dev, uint32_t modelid, npu_input_opmode opmode,
204 INIT_HOST_HANDLER (host_handler, dev);
206 return host_handler->runInternal (modelid, opmode, hw_dev);
210 * @brief Stop the request with the given id
211 * @param[in] dev The NPU device handle
212 * @param[in] id The request id
213 * @return @c 0 if no error. otherwise a negative error value
215 int stopNPU_internalInput(npudev_h dev, int id)
217 INIT_HOST_HANDLER (host_handler, dev);
219 return host_handler->stopInternal (id);
223 * @brief Allocate a generic buffer with the requested buffer type.
224 * @param[in] dev The NPU device handle
225 * @param[in/out] Buffer the buffer pointer where memory is allocated.
226 * @return 0 if no error, otherwise a negative errno.
228 int allocNPU_genericBuffer (npudev_h dev, generic_buffer * buffer)
230 INIT_HOST_HANDLER (host_handler, dev);
232 return host_handler->allocGenericBuffer (buffer);
236 * @brief Free the generic buffer and remove the address mapping
237 * @param[in] dev The NPU device handle
238 * @param[in] buffer the model buffer
239 * @return 0 if no error, otherwise a negative errno.
241 int cleanNPU_genericBuffer (npudev_h dev, generic_buffer * buffer)
243 INIT_HOST_HANDLER (host_handler, dev);
245 return host_handler->deallocGenericBuffer (buffer);
249 * @brief Allocate generic buffers, which have multiple instances of generic_buffer
250 * @param[in] dev The NPU device handle
251 * @param[in/out] buffers generic buffers.
252 * @return 0 if no error, otherwise a negative errno.
253 * @note it reuses allocGenericBuffer().
255 int allocNPU_genericBuffers (npudev_h dev, generic_buffers * buffers)
257 INIT_HOST_HANDLER (host_handler, dev);
259 return host_handler->allocGenericBuffer (buffers);
263 * @brief Free generic buffers allocated by allocGenericBuffers().
264 * @param[in] dev The NPU device handle
265 * @param[in/out] buffers generic buffers.
266 * @note it reuses cleanGenericbuffer().
267 * @return 0 if no error, otherwise a negative errno.
269 int cleanNPU_genericBuffers (npudev_h dev, generic_buffers * buffers)
271 INIT_HOST_HANDLER (host_handler, dev);
273 return host_handler->deallocGenericBuffer (buffers);
277 * @brief alias of allocNPU_genericBuffer for model buffer
279 int allocNPU_modelBuffer (npudev_h dev, generic_buffer * model)
281 return allocNPU_genericBuffer (dev, model);
285 * @brief alias of cleanNPU_genericBuffer for model buffer
287 int cleanNPU_modelBuffer (npudev_h dev, generic_buffer * model)
289 return cleanNPU_genericBuffer (dev, model);
293 * @brief alias of allocNPU_genericBuffer for input buffer
295 int allocNPU_inputBuffer (npudev_h dev, generic_buffer * input)
297 return allocNPU_genericBuffer (dev, input);
301 * @brief alias of cleanNPU_genericBuffer for input buffer
303 int cleanNPU_inputBuffer (npudev_h dev, generic_buffer * input)
305 return cleanNPU_genericBuffer (dev, input);
309 * @brief alias of allocNPU_genericBuffers for input buffers
311 int allocNPU_inputBuffers (npudev_h dev, input_buffers * input)
313 return allocNPU_genericBuffers (dev, input);
317 * @brief alias of cleanNPU_genericBuffers for input buffers
319 int cleanNPU_inputBuffers (npudev_h dev, input_buffers * input)
321 return cleanNPU_genericBuffers (dev, input);
325 * @brief get the current memory status for the given device
326 * @param[in] dev The NPU device handle
327 * @param[out] alloc_total The size of allocated memory until now
328 * @param[out] free_total The size of freed memory until now
329 * @return @c 0 if no error. otherwise a negatice error value
331 int getNPU_memoryStatus(npudev_h dev, size_t *alloc_total, size_t *free_total)
333 INIT_HOST_HANDLER (host_handler, dev);
335 return host_handler->getMemoryStatus (alloc_total, free_total);
339 * @brief Get the current device status to be used
340 * @param[in] dev The NPU device handle
341 * @param[out] status the device status
342 * @param[out] num_requests the number of running requests (or pending)
343 * @return 0 if no error, otherwise a negative errno.
345 int getNPU_deviceStatus(npudev_h dev, npu_status *status, uint32_t *num_requests)
347 INIT_HOST_HANDLER (host_handler, dev);
349 return host_handler->getDeviceStatus (status, num_requests);
353 * @brief Get metadata for NPU model
354 * @param[in] model The path of model binary file
355 * @param[in] need_extra whether you want to extract the extra data in metadata
356 * @return the metadata structure to be filled if no error, otherwise nullptr
358 * @note For most npu-engine users, the extra data is not useful because it will be
359 * used for second-party users (e.g., compiler, simulator).
360 * Also, the caller needs to free the metadata.
362 * @note the caller needs to free the metadata
364 npubin_meta * getNPUmodel_metadata (const char *model, bool need_extra)
373 fp = fopen (model, "rb");
375 logerr (TAG, "Failed to open the model binary: %d\n", -errno);
379 meta = (npubin_meta *) malloc (NPUBIN_META_SIZE);
381 logerr (TAG, "Failed to allocate metadata\n");
385 ret = fread (meta, 1, NPUBIN_META_SIZE, fp);
386 if (ret != NPUBIN_META_SIZE) {
387 logerr (TAG, "Failed to read the metadata\n");
391 if (!CHECK_NPUBIN (meta->magiccode)) {
392 logerr (TAG, "Invalid metadata provided\n");
396 if (need_extra && NPUBIN_META_EXTRA (meta->magiccode) > 0) {
397 npubin_meta *new_meta;
399 new_meta = (npubin_meta *) malloc (NPUBIN_META_TOTAL_SIZE(meta->magiccode));
401 logerr (TAG, "Failed to allocate extra metadata\n");
405 memcpy (new_meta, meta, NPUBIN_META_TOTAL_SIZE(meta->magiccode));
408 ret = fread (new_meta->reserved_extra, 1, NPUBIN_META_EXTRA_SIZE (new_meta->magiccode), fp);
409 if (ret != NPUBIN_META_EXTRA_SIZE (new_meta->magiccode)) {
410 logerr (TAG, "Invalid extra metadata provided\n");
430 /** implement methods of HostHandler class */
432 /** @brief host handler constructor */
433 HostHandler::HostHandler (Device *device)
435 /* ignored as we don't use double buffering anymore, but for backward-compatibility */
436 async_mode_ (NPUASYNC_WAIT)
440 /** @brief host handler destructor */
441 HostHandler::~HostHandler ()
446 * @brief register model from generic buffer
447 * @param[in] model_buf model buffer
448 * @param[out] modelid model id
449 * @return 0 if no error. otherwise a negative errno
452 HostHandler::registerModel (generic_buffer *model_buf, uint32_t *modelid)
454 if (model_buf == nullptr || modelid == nullptr) {
455 logerr (TAG, "Invalid arguments given\n");
459 Model *model = nullptr;
460 int status = device_->setModel (model_buf, &model);
462 logerr (TAG, "Failed to set model: %d\n", status);
466 assert (model != nullptr);
468 status = models_.insert (model->getID(), model);
470 logerr (TAG, "Failed to insert model id\n");
475 *modelid = model->getID();
480 * @brief remove the registered model
481 * @param[in] modelid model id
482 * @return 0 if no error. otherwise a negative errno
485 HostHandler::unregisterModel (uint32_t modelid)
487 Model *model = models_.find (modelid);
488 if (model == nullptr)
491 int status = device_->unsetModel (model);
493 logerr (TAG, "Failed to unset model: %d\n", status);
497 return models_.remove (modelid);
501 * @brief remove all registered models
505 HostHandler::unregisterModels ()
512 * @brief Set the data layout for input/output tensors
513 * @param[in] modelid The ID of model whose layouts are set
514 * @param[in] in the layout/type info for input tensors
515 * @param[in] out the layout/type info for output tensors
516 * @return @c 0 if no error. otherwise a negative error value
517 * @note if this function is not called, default layout/type will be used.
520 HostHandler::setDataInfo (uint32_t modelid, tensors_data_info *in,
521 tensors_data_info *out)
523 Model *model = models_.find (modelid);
524 if (model == nullptr)
527 return model->setDataInfo (in, out);
531 * @brief Set the inference constraint for next NPU inferences
532 * @param[in] modelid The target model id
533 * @param[in] constraint inference constraint (e.g., timeout, priority)
534 * @return @c 0 if no error. otherwise a negative error value
535 * @note If this function is not called, default values are used.
538 HostHandler::setConstraint (uint32_t modelid, npuConstraint constraint)
540 Model *model = models_.find (modelid);
541 if (model == nullptr)
544 model->setConstraint (constraint);
550 * @brief find and return model instance
551 * @param[in] modelid model id
552 * @return model instance if found. otherwise nullptr
555 HostHandler::getModel (uint32_t modelid)
557 return models_.find (modelid);
560 /** @brief dummay callback for runSync. */
563 callbackSync (output_buffers *output) : output_(output), done_(false) {}
565 static void callback (output_buffers *output, uint64_t sequence, void *data) {
566 callbackSync *sync = static_cast<callbackSync *>(data);
567 sync->callback (output, sequence);
570 void callback (output_buffers *output, uint64_t sequence) {
571 if (output_ != nullptr) {
572 /** just copy internal variables of output buffers */
573 memcpy (output_, output, sizeof (output_buffers));
580 std::unique_lock<std::mutex> lock (m_);
581 cv_.wait (lock, [this]() { return done_; });
586 std::condition_variable cv_;
587 output_buffers *output_;
592 * @brief Execute inference. Wait (block) until the output is available.
593 * @param[in] modelid The model to be inferred.
594 * @param[in] input The input data to be inferred.
595 * @param[out] output The output result.
596 * @return @c 0 if no error. otherwise a negative error value
599 HostHandler::runSync (uint32_t modelid, const input_buffers *input,
600 output_buffers *output)
602 callbackSync sync (output);
603 int status = runAsync (modelid, input, callbackSync::callback,
604 static_cast <void*> (&sync), NPUASYNC_DROP_OLD, nullptr);
606 /** sync needs to wait callback */
613 * @brief Invoke NPU inference. Unblocking call.
614 * @param[in] modelid The model to be inferred.
615 * @param[in] input The input data to be inferred.
616 * @param[in] cb The output buffer handler.
617 * @param[in] cb_data The data given as a parameter to the runNPU_async call.
618 * @param[in] mode Configures how this operation works.
619 * @param[out] sequence The sequence number returned with runNPU_async.
620 * @return @c 0 if no error. otherwise a negative error value
623 HostHandler::runAsync (uint32_t modelid, const input_buffers *input,
624 npuOutputNotify cb, void *cb_data, npu_async_mode mode, uint64_t *sequence)
626 Model *model = nullptr;
628 if (device_->needModel()) {
629 model = getModel (modelid);
630 if (model == nullptr)
634 /* check the given model before running */
635 if (model != nullptr && !model->finalize ()) {
636 logerr (TAG, "Failed to finalize the model. Please see the log messages\n");
640 device_->setAsyncMode (mode);
641 return device_->run (NPUINPUT_HOST, model, input, cb, cb_data, sequence);
645 * @brief Let NPU accept input frames from its internal source continuously
646 * @param[in] modelid The model to be inferred.
647 * @param[in] opmode NPU has different opmode with auto-inputs. Choose one.
648 * @param[in] hw_dev The target device feeding input data
649 * @return @c 0 if no error. otherwise a negative error value
652 HostHandler::runInternal (uint32_t modelid, npu_input_opmode opmode,
655 Model *model = nullptr;
657 if (device_->needModel()) {
658 model = getModel (modelid);
659 if (model == nullptr)
663 /* check the given model before running */
664 if (model != nullptr && !model->finalize ()) {
665 logerr (TAG, "Failed to finalize the model. Please see the log messages\n");
669 return device_->runInternal (opmode, model, hw_dev);
673 * @brief Stop the request with the given id
674 * @param[in] dev The NPU device handle
675 * @param[in] id The request id
676 * @return @c 0 if no error. otherwise a negative error value
679 HostHandler::stopInternal (int id)
682 logerr (TAG, "Unable to stop this request with id (%d)\n", id);
686 const DriverAPI * api = device_->getDriverAPI ();
687 assert (api != nullptr);
689 return api->stop_target (id);
693 * @brief get number of available devices
694 * @param[in] type device type
695 * @return number of devices
698 HostHandler::getNumDevices (dev_type type)
700 return DriverAPI::getNumDevices (type);
704 * @brief get device instance
705 * @param[out] dev device instance
706 * @param[in] type device type
707 * @param[in] id device id
708 * @return 0 if no error. otherwise a negative errno
711 HostHandler::getDevice (npudev_h *dev, dev_type type, uint32_t id)
713 int num_devices = getNumDevices (type);
715 /** check the validity of device id */
716 if (!(num_devices > 0 && id < static_cast<uint32_t>(num_devices))) {
717 logerr (TAG, "Invalid arguments provided\n");
721 Device *device = Device::createInstance (type, id);
722 if (device == nullptr) {
723 logerr (TAG, "Failed to create a device with the given type\n");
728 /** This is just for backward-compatility; we don't guarantee its corresness */
735 * @brief allocate generic buffer (just for users)
736 * @param[out] buffer buffer instance
737 * @return 0 if no error. otherwise a negative errno
740 HostHandler::allocGenericBuffer (generic_buffer *buffer)
745 if (buffer->size == 0) {
746 logerr (TAG, "Invalid size\n");
750 if (buffer->size > UINT32_MAX) {
751 logerr (TAG, "Don't support such a large size");
755 switch (buffer->type) {
758 if (buffer->filepath == nullptr)
763 /* now, npu-engine always provides dmabuf-based allocation */
764 void *addr = nullptr;
765 int dmabuf = device_->allocMemory (buffer->size, &addr);
769 buffer->dmabuf = dmabuf;
781 * @brief deallocate generic buffer (just for users)
782 * @param[in] buffer buffer instance
783 * @return 0 if no error. otherwise a negative errno
786 HostHandler::deallocGenericBuffer (generic_buffer *buffer)
791 switch (buffer->type) {
793 /** always true cuz nothing to do */
796 return device_->deallocMemory (buffer->dmabuf, buffer->size, buffer->addr);
805 * @brief allocate multiple generic buffers (just for users)
806 * @param[out] buffers multi-buffer instance
807 * @return 0 if no error. otherwise a negative errno
810 HostHandler::allocGenericBuffer (generic_buffers *buffers)
815 if (buffers == NULL || buffers->num_buffers < 1)
818 for (idx = 0; idx < buffers->num_buffers; idx++) {
819 status = allocGenericBuffer (&buffers->bufs[idx]);
828 deallocGenericBuffer (&buffers->bufs[--idx]);
835 * @brief deallocate multiple generic buffers (just for users)
836 * @param[in] buffers multi-buffer instance
837 * @return 0 if no error. otherwise a negative errno
840 HostHandler::deallocGenericBuffer (generic_buffers *buffers)
842 if (buffers == NULL || buffers->num_buffers < 1)
845 for (uint32_t idx = 0; idx < buffers->num_buffers; idx++)
846 deallocGenericBuffer (&buffers->bufs[idx]);
847 buffers->num_buffers = 0;
853 * @brief get the current memory status
854 * @param[out] alloc_total The size of allocated memory until now
855 * @param[out] free_total The size of freed memory until now
856 * @return 0 if no error. otherwise a negatice error value
859 HostHandler::getMemoryStatus (size_t *alloc_total, size_t *free_total)
861 /** API is always set in initialize () */
862 const DriverAPI * api = device_->getDriverAPI ();
863 assert (api != nullptr);
865 return api->getMemoryStatus (alloc_total, free_total);
869 * @brief Get the current device status to be used
870 * @param[out] status the device status
871 * @param[out] num_requests the number of running requests (or pending)
872 * @return 0 if no error, otherwise a negative errno.
875 HostHandler::getDeviceStatus (npu_status *status, uint32_t *num_requests)
877 /** API is always set in initialize () */
878 const DriverAPI * api = device_->getDriverAPI ();
883 device_state_t state = api->isReady ();
884 if (state == device_state_t::STATE_READY) {
885 *num_requests = api->numRequests ();
886 if (*num_requests > 0)
898 /** implement methods of Device class */
900 /** @brief constructor of device */
901 Device::Device (dev_type type, int id, bool need_model)
902 : comm_ (CommPlugin::getCommPlugin()), type_ (type), id_ (id), need_model_ (true),
903 mode_ (NPUASYNC_WAIT), initialized_ (false), atomic_flag_ (ATOMIC_FLAG_INIT)
908 * @brief create device instance depending on device type and id
909 * @param[in] type device type
910 * @param[in] id device id
911 * @return device instance
914 Device::createInstance (dev_type type, int id)
916 Device *device = nullptr;
918 switch (type & DEVICETYPE_MASK) {
919 case DEVICETYPE_TRIV:
920 device = new TrinityVision (id);
922 case DEVICETYPE_TRIV2:
923 device = new TrinityVision2 (id);
925 case DEVICETYPE_TRIA:
926 device = new TrinityAsr (id);
927 device->setNeedModel (false);
933 if (device != nullptr && device->init () != 0) {
942 * @brief device initialization
943 * @return 0 if no error, otherwise a negative errno
944 * @note Init failures come from createDriverAPI() only.
949 /** should be initilizaed only once */
950 if (!atomic_flag_.test_and_set()) {
951 /** create the corresponding driver API */
952 api_ = DriverAPI::createDriverAPI (type_, id_);
953 if (api_.get() == nullptr) {
954 atomic_flag_.clear();
955 logerr (TAG, "Failed to create driver API\n");
959 handler_.reset (new HostHandler (this));
960 scheduler_.reset (new Scheduler (api_.get()));
961 mem_ = MemAllocator::createInstance (api_.get());
963 initialized_ = true; /** c++11 does not provide test() of atomic flag */
970 * @brief stop all requests from this device
971 * @param[in] force_stop indicate the schedduler waits until to handle previous requests
972 * @return 0 if no error, otherwise a negative errno
975 Device::stop (bool force_stop)
977 if (!initialized ()) {
978 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
982 Request *req = new Request (NPUINPUT_STOP);
983 req->setForceStop (force_stop);
984 return scheduler_->submitRequest (req);
988 * @brief allocate generic memory buffer
989 * @param[in] size the size to allocate
990 * @param[out] addr the mapped address
991 * @return dmabuf fd if no error, otherwise a negative errno
994 Device::allocMemory (size_t size, void **addr)
996 if (!initialized ()) {
997 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1001 if (size == 0 || addr == nullptr) {
1002 logerr (TAG, "Invalid arguments\n");
1006 return mem_->allocMemory (size, addr);
1010 * @brief deallocate generic memory buffer
1011 * @param[in] dmabuf_fd dmabuf file descriptor
1012 * @param[in] size buffer size
1013 * @param[in] addr mapped addr
1014 * @return 0 if no error, otherwise a negative errno
1017 Device::deallocMemory (int dmabuf_fd, size_t size, void * addr)
1019 if (!initialized ()) {
1020 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1024 if (dmabuf_fd < 0 || size == 0 || addr == nullptr) {
1025 logerr (TAG, "Invalid arguments\n");
1029 return mem_->deallocMemory (dmabuf_fd, size, addr);
1033 * @brief extract the buffer instance from input generic buffers
1034 * @param[in] meta the model metadata
1035 * @param[in] input the input generic buffers
1036 * @return the buffer instance
1039 TrinityVision::prepareInputBuffers (const Metadata *meta, const input_buffers *input)
1041 if (meta == nullptr || input == nullptr ||
1042 meta->getInputNum() != input->num_buffers) {
1043 logerr (TAG, "Invalid metadata info provided\n");
1048 const generic_buffer *first = &input->bufs[0];
1049 if (first->type == BUFFER_DMABUF) {
1050 buffer = mem_->allocBuffer (new HWmemExternal);
1051 if (buffer == nullptr)
1054 buffer->setDmabuf (first->dmabuf);
1055 buffer->setOffset (first->offset);
1056 buffer->setSize (meta->getBufferSize());
1058 buffer = mem_->allocBuffer (new HWmemDevice);
1059 if (buffer == nullptr)
1062 int status = buffer->alloc (meta->getBufferSize ());
1064 logerr (TAG, "Failed to allocate buffer: %d\n", status);
1070 int status = buffer->createTensors (meta);
1072 logerr (TAG, "Failed to create tensors: %d\n", status);
1081 * @brief implementation of TRIV's setModel ()
1082 * @param[in] model_buf the model generic buffer
1083 * @param[out] model the model instance
1084 * @return 0 if no error, otherwise a negative errno
1087 TrinityVision::setModel (const generic_buffer *model_buf, Model ** model_ptr)
1089 if (!initialized ()) {
1090 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1094 if (model_buf == nullptr || model_ptr == nullptr)
1097 Model *model = nullptr;
1098 HWmem * hwmem_prog = nullptr;
1099 HWmem * hwmem_weight = nullptr;
1102 /** In TRIV1, model data (including program/weight) should be contiguous */
1104 switch (model_buf->type) {
1107 model = mem_->allocModel (new HWmemDevice);
1108 if (model == nullptr) {
1109 logerr (TAG, "Failed to allocate model\n");
1113 status = model->alloc (model_buf->size);
1115 logerr (TAG, "Failed to allocate model: %d\n", status);
1119 /** extract the whole model data */
1120 status = comm_.extractGenericBuffer (model_buf, model->getData(), nullptr);
1122 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1130 status = model->setMetadata (model->getData());
1134 /** allocate program (optional; NOP) */
1135 if (model->getMetadata()->getProgramSize() > 0) {
1136 hwmem_prog = new HWmem (new HWmemChunk);
1137 model->setProgramData (hwmem_prog);
1139 hwmem_prog->setParent (model);
1140 hwmem_prog->setOffset (model->getMetadata()->getMetaSize());
1141 status = hwmem_prog->alloc (model->getMetadata()->getProgramSize());
1143 logerr (TAG, "Failed to allocate program\n");
1148 /** allocate weight (optional) */
1149 if (model->getMetadata()->getWeightSize() > 0) {
1150 hwmem_weight = new HWmem (new HWmemChunk);
1151 model->setWeightData (hwmem_weight);
1153 hwmem_weight->setParent (model);
1154 hwmem_weight->setOffset (model->getMetadata()->getMetaSize() +
1155 model->getMetadata()->getProgramSize());
1156 status = hwmem_weight->alloc (model->getMetadata()->getWeightSize());
1158 logerr (TAG, "Failed to allocate program\n");
1163 if (hwmem_prog != nullptr) {
1164 /** register this model to the driver */
1165 model_config_t config;
1166 config.dbuf_fd = hwmem_prog->getDmabuf ();
1167 config.program_size = hwmem_prog->getSize ();
1168 config.program_offset_addr = hwmem_prog->getOffset ();
1169 if (hwmem_weight != nullptr)
1170 config.weight_offset_addr = hwmem_weight->getOffset ();
1172 status = api_->registerModel (&config);
1176 model->setInternalID(config.id);
1188 * @brief implementation of TRIV's unsetModel ()
1189 * @param[in] model the model instance
1190 * @return 0 if no error, otherwise a negative errno
1193 TrinityVision::unsetModel (Model * model)
1195 if (!initialized ()) {
1196 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1200 if (model == nullptr) {
1201 logerr (TAG, "Invalid model instance\n");
1205 if (model->getMetadata()->getProgramSize() > 0)
1206 return api_->deregisterModel (model->getInternalID ());
1212 * @brief implementation of TRIV's run()
1213 * @param[in] opmode input opmode
1214 * @param[in] model the model instance
1215 * @param[in] input generic buffers of input data
1216 * @param[in] cb the output callback
1217 * @param[in] cb_data the output callback data
1218 * @param[out] sequence The sequence number returned with runNPU_async.
1221 TrinityVision::run (npu_input_opmode opmode, const Model *model,
1222 const input_buffers *input, npuOutputNotify cb, void *cb_data,
1225 if (!initialized ()) {
1226 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1230 if (opmode != NPUINPUT_HOST) {
1231 logerr (TAG, "TRIV supports only host inputservice\n");
1235 if (model == nullptr || input == nullptr) {
1236 logerr (TAG, "TRIV requires both model and input buffers\n");
1240 Buffer *buffer = prepareInputBuffers (model->getMetadata(), input);
1241 if (buffer == nullptr) {
1242 logerr (TAG, "Failed to extract buffer instance\n");
1246 if (!buffer->isExternal ()) {
1247 for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
1248 auto func = std::bind (TrinityVision::manipulateData, model, idx, true,
1249 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1250 int status = comm_.extractGenericBuffer (&input->bufs[idx],
1251 buffer->getInputTensor(idx)->getData(), func);
1253 logerr (TAG, "Failed to feed input buffer: %d\n", status);
1259 /** this device uses CMA buffer */
1261 Request *req = new Request (opmode);
1262 req->setModel (model);
1263 req->setBuffer (buffer);
1266 req->setCallback (std::bind (&TrinityVision::callback, this, req, cb, cb_data));
1268 if (sequence != nullptr)
1269 *sequence = req->getID();
1271 return scheduler_->submitRequest (req);
1275 * @brief callback of TRIV2 request
1276 * @param[in] req the request instance
1277 * @param[in] cb callback for completion
1278 * @param[in] cb_data callback data
1279 * @note The callback invoke does not gurantee the request was successful
1280 * @todo Check the request failures
1283 TrinityVision::callback (Request *req, npuOutputNotify cb, void *cb_data)
1285 const Model *model = req->getModel ();
1286 Buffer *buffer = req->getBuffer ();
1287 output_buffers output = {
1288 .num_buffers = buffer->getOutputNum ()
1291 for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
1292 uint32_t output_tensor_size = model->getOutputTensorSize (idx);
1294 if (buffer->isExternal ()) {
1295 output.bufs[idx].type = BUFFER_DMABUF;
1296 output.bufs[idx].size = output_tensor_size;
1297 output.bufs[idx].addr = buffer->getOutputTensor(idx)->getData();
1299 output.bufs[idx].type = BUFFER_MAPPED;
1300 output.bufs[idx].size = output_tensor_size;
1301 /** user needs to free this */
1302 output.bufs[idx].addr = malloc (output_tensor_size);
1304 auto func = std::bind (TrinityVision::manipulateData, model, idx, false,
1305 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1306 int status = comm_.insertGenericBuffer (buffer->getOutputTensor(idx)->getData(),
1307 &output.bufs[idx], func);
1309 logerr (TAG, "Failed to return output buffer: %d\n", status);
1314 cb (&output, req->getID(), cb_data);
1320 * @brief extract the segment table instance from input generic buffers
1321 * @param[in] model the model instance
1322 * @param[in] input the input generic buffers
1323 * @param[in] output the output generic buffers
1324 * @return the segment table instance
1327 TrinityVision2::prepareSegmentTable (const Model *model, const input_buffers *input,
1328 const output_buffers *output)
1330 if (model == nullptr) {
1331 logerr (TAG, "Invalid arguments provided\n");
1335 const Metadata *meta = model->getMetadata ();
1336 if (meta == nullptr || (input != nullptr &&
1337 meta->getInputNum() != input->num_buffers)) {
1338 logerr (TAG, "Invalid metadata info provided\n");
1342 SegmentTable * segt = mem_->allocSegmentTable (new HWmemDevice);
1343 int status = segt->alloc ();
1345 logerr (TAG, "Failed to allocate segment table: %d\n", status);
1349 status = segt->createSegments (model, input, output);
1351 logerr (TAG, "Failed to create segments: %d\n", status);
1363 * @brief implementation of TRIV2's setModel ()
1364 * @param[in] model_buf the model generic buffer
1365 * @param[out] model the model instance
1366 * @return 0 if no error, otherwise a negative errno
1369 TrinityVision2::setModel (const generic_buffer *model_buf, Model ** model_ptr)
1371 if (!initialized ()) {
1372 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1376 if (model_buf == nullptr || model_ptr == nullptr)
1382 switch (model_buf->type) {
1385 model = mem_->allocModel (new HWmemDevice);
1386 if (model == nullptr) {
1387 logerr (TAG, "Failed to allocate model\n");
1391 status = model->alloc (NPUBIN_META_SIZE);
1393 logerr (TAG, "Failed to allocate model: %d\n", status);
1397 status = comm_.extractGenericBuffer (model_buf, model->getData(), nullptr,
1398 0, NPUBIN_META_SIZE);
1400 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1408 status = model->setMetadata (model->getData());
1412 /** allocate program (optional; NOP) */
1413 if (model->getMetadata()->getProgramSize() > 0) {
1414 HWmem * hwmem_prog = new HWmem (new HWmemDevice);
1415 hwmem_prog->setDriverAPI (api_.get());
1417 model->setProgramData (hwmem_prog);
1419 status = hwmem_prog->alloc (model->getMetadata()->getProgramSize());
1421 logerr (TAG, "Failed to allocate program\n");
1425 status = comm_.extractGenericBuffer (model_buf, hwmem_prog->getData(), nullptr,
1426 model->getMetadata()->getMetaSize(),
1427 model->getMetadata()->getProgramSize());
1429 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1433 /** register this model to the driver */
1434 model_config_t config;
1435 config.dbuf_fd = hwmem_prog->getDmabuf ();
1436 config.program_size = hwmem_prog->getSize ();
1437 config.program_offset_addr = 0;
1439 status = api_->registerModel (&config);
1443 model->setInternalID(config.id);
1446 /** allocate weight (optional) */
1447 if (model->getMetadata()->getWeightSize() > 0) {
1448 HWmem * hwmem_weight = new HWmem (new HWmemDevice);
1449 hwmem_weight->setDriverAPI (api_.get());
1451 model->setWeightData (hwmem_weight);
1453 status = hwmem_weight->alloc (model->getMetadata()->getWeightSize());
1455 logerr (TAG, "Failed to allocate program\n");
1459 status = comm_.extractGenericBuffer (model_buf, hwmem_weight->getData(), nullptr,
1460 model->getMetadata()->getMetaSize() + model->getMetadata()->getProgramSize(),
1461 model->getMetadata()->getWeightSize());
1463 logerr (TAG, "Failed to extract generic buffer: %d\n", status);
1477 * @brief implementation of TRIV2's unsetModel ()
1478 * @param[in] model the model instance
1479 * @return 0 if no error, otherwise a negative errno
1482 TrinityVision2::unsetModel (Model * model)
1484 if (!initialized ()) {
1485 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1489 if (model == nullptr) {
1490 logerr (TAG, "Invalid model instance\n");
1494 if (model->getMetadata()->getProgramSize() > 0)
1495 return api_->deregisterModel (model->getInternalID ());
1500 /** @brief implementation of TRIV2's run() */
1502 TrinityVision2::run (npu_input_opmode opmode, const Model *model,
1503 const input_buffers *input, npuOutputNotify cb, void *cb_data,
1506 if (!initialized ()) {
1507 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1511 if (opmode != NPUINPUT_HOST)
1514 if (input == nullptr || input->num_buffers == 0)
1517 /** this device uses segment table */
1518 SegmentTable * segt = prepareSegmentTable (model, input);
1519 if (segt == nullptr) {
1520 logerr (TAG, "Failed to create segment table instance\n");
1524 /** extract input data */
1525 for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
1526 size_t max_seg_size = segt->getInputSegment(idx)->getSize();
1527 uint32_t seg_offset = segt->getInputSegmentOffset(idx);
1529 if (input->bufs[idx].size + seg_offset > max_seg_size) {
1530 logerr (TAG, "Too large input data provided: max segment size (%zu)\n",
1535 if (!segt->getInputSegment(idx)->isExternal ()) {
1536 auto func = std::bind (TrinityVision2::manipulateData, model, idx, true,
1537 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1538 int status = comm_.extractGenericBuffer (
1540 segt->getInputSegment(idx)->getData() + seg_offset,
1543 logerr (TAG, "Failed to feed input segment: %d\n", status);
1549 Request *req = new Request (opmode);
1550 req->setModel (model);
1551 req->setSegmentTable (segt);
1552 req->setCallback (std::bind (&TrinityVision2::callback, this, req, cb, cb_data));
1555 *sequence = req->getID();
1557 return scheduler_->submitRequest (req);
1560 /** @brief implementation of TRIV2's runInternal() */
1562 TrinityVision2::runInternal (npu_input_opmode opmode, const Model *model,
1565 if (!initialized ()) {
1566 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1570 if (opmode != NPUINPUT_HW_RECURRING)
1573 /** this device uses segment table */
1574 SegmentTable * segt = prepareSegmentTable (model, nullptr, nullptr);
1575 if (segt == nullptr) {
1576 logerr (TAG, "Failed to create segment table instance\n");
1580 Request *req = new Request (opmode);
1581 req->setModel (model);
1582 req->setSegmentTable (segt);
1583 req->setHwDevice (hw_dev);
1585 return scheduler_->submitRequest (req);
1588 /** @brief callback of TRIV2 request */
1590 TrinityVision2::callback (Request *req, npuOutputNotify cb, void *cb_data)
1592 const Model *model = req->getModel ();
1593 SegmentTable *segt = req->getSegmentTable ();
1594 output_buffers output = {
1595 .num_buffers = segt->getNumOutputSegments ()
1598 for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
1599 uint32_t output_tensor_size = model->getOutputTensorSize (idx);
1601 output.bufs[idx].type = BUFFER_MAPPED;
1602 output.bufs[idx].size = output_tensor_size;
1603 /** user needs to free this */
1604 output.bufs[idx].addr = calloc (1, output_tensor_size);
1606 auto func = std::bind (TrinityVision2::manipulateData, model, idx, false,
1607 std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
1608 int status = comm_.insertGenericBuffer (
1609 segt->getOutputSegment(idx)->getData() + segt->getOutputSegmentOffset(idx),
1610 &output.bufs[idx], func);
1613 logerr (TAG, "Failed to return output buffer: %d\n", status);
1617 cb (&output, req->getID(), cb_data);
1622 /** @brief implementation of TRIA's run(): WIP */
1624 TrinityAsr::run (npu_input_opmode opmode, const Model *model,
1625 const input_buffers *input, npuOutputNotify cb, void *cb_data,
1628 if (!initialized ()) {
1629 logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
1633 if (opmode != NPUINPUT_HOST)
1636 if (input == nullptr || input->num_buffers != 1)
1641 /** ASR does not require model and support only a single tensor */
1642 const generic_buffer *first_buf = &input->bufs[0];
1643 if (first_buf->type == BUFFER_DMABUF) {
1644 buffer = mem_->allocBuffer (new HWmemExternal);
1645 if (buffer == nullptr)
1648 buffer->setDmabuf (first_buf->dmabuf);
1649 buffer->setOffset (first_buf->offset);
1650 buffer->setSize (first_buf->size);
1652 buffer = mem_->allocBuffer (new HWmemDevice);
1653 if (buffer == nullptr)
1656 status = buffer->alloc (first_buf->size);
1663 status = buffer->createTensors ();
1665 logerr (TAG, "Failed to create tensors: %d\n", status);
1670 if (!buffer->isExternal ()) {
1671 status = comm_.extractGenericBuffer (first_buf,
1672 buffer->getInputTensor(0)->getData(), nullptr);
1677 Request *req = new Request (opmode);
1678 req->setBuffer (buffer);
1679 req->setCallback (std::bind (&TrinityAsr::callback, this, req, cb, cb_data));
1682 *sequence = req->getID();
1684 return scheduler_->submitRequest (req);
1687 /** @brief callback of TRIA request: WIP */
1689 TrinityAsr::callback (Request *req, npuOutputNotify cb, void *cb_data)
1691 Buffer *buffer = req->getBuffer ();
1692 output_buffers output = {
1696 /** TODO: finalize this impl. when the ASR's working scenario is determined */
1697 cb (&output, req->getID(), cb_data);
1702 /** Implement data manipulation (each device may have different impl.) */
1706 #define do_quantized_memcpy(type) do {\
1709 while (idx < num_elems) {\
1710 val = ((type *) src)[idx];\
1711 val = val / _scale;\
1712 val += _zero_point;\
1713 val = (val > 255.0) ? 255.0 : 0.0;\
1714 ((uint8_t *) dst)[idx++] = (uint8_t) val;\
1717 while (idx < num_elems) {\
1718 val = *(uint8_t *) src;\
1719 val -= _zero_point;\
1721 ((type *) dst)[idx++] = (type) val;\
1722 dst = (void*)(((uint8_t *) dst) + data_size);\
1723 src = (void*)(((uint8_t *) src) + 1);\
1729 * @brief memcpy during quantization
1731 static void memcpy_with_quant (bool quant, data_type type, float scale, uint32_t zero_point,
1732 void *dst, const void *src, uint32_t num_elems)
1734 double _scale = (double) scale;
1735 double _zero_point = (double) zero_point;
1737 uint32_t data_size = get_data_size (type);
1741 case DATA_TYPE_INT8:
1742 do_quantized_memcpy (int8_t);
1744 case DATA_TYPE_UINT8:
1745 do_quantized_memcpy (uint8_t);
1747 case DATA_TYPE_INT16:
1748 do_quantized_memcpy (int16_t);
1750 case DATA_TYPE_UINT16:
1751 do_quantized_memcpy (uint16_t);
1753 case DATA_TYPE_INT32:
1754 do_quantized_memcpy (int32_t);
1756 case DATA_TYPE_UINT32:
1757 do_quantized_memcpy (uint32_t);
1759 case DATA_TYPE_INT64:
1760 do_quantized_memcpy (int64_t);
1762 case DATA_TYPE_UINT64:
1763 do_quantized_memcpy (uint64_t);
1765 case DATA_TYPE_FLOAT32:
1766 do_quantized_memcpy (float);
1768 case DATA_TYPE_FLOAT64:
1769 do_quantized_memcpy (double);
1772 logerr (TAG, "Unsupported datatype %d\n", type);
1777 * @brief perform data manipulation
1778 * @param[in] model model instance
1779 * @param[in] idx tensor index
1780 * @param[in] is_input indicate it's input manipulation
1781 * @param[out] dst destination buffer
1782 * @param[in] src source buffer (feature map)
1783 * @param[in] size size to be copied
1784 * @return size of memory copy if no error, otherwise zero
1786 * @note the input data format should be NHWC
1787 * @detail rules for the memory address of activations in NPU HW.
1788 * (https://code.sec.samsung.net/confluence/pages/viewpage.action?pageId=146491864)
1790 * 1) Special case (depth == 3)
1791 * - addr(x,y,z) = addr(0,0,0) + (z) + 3 * (x + width * y)
1794 * - addr(x,y,z) = addr(0,0,0) + (z % MPA_L) + MPA_L * (x + width * (y + height * (z / MPA_L)))
1796 * Thus, if depth is not a multiple of MPA_L (i.e., 64), zero padding is required
1799 TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
1800 void *dst, void *src, size_t size)
1802 const Metadata *meta = model->getMetadata();
1803 const tensor_data_info* info;
1804 const uint32_t *dims;
1805 uint32_t zero_point;
1808 /** extract required information from the metadata */
1810 if (idx >= meta->getInputNum()) {
1811 logerr (TAG, "Wrong information for input tensors in metadata\n");
1815 info = model->getInputDataInfo (idx);
1816 dims = meta->getInputDims (idx);
1817 zero_point = meta->getInputQuantZero (idx);
1818 scale = meta->getInputQuantScale (idx);
1820 if (idx >= meta->getOutputNum()) {
1821 logerr (TAG, "Wrong information for output tensors in metadata\n");
1825 info = model->getOutputDataInfo (idx);
1826 dims = meta->getOutputDims (idx);
1827 zero_point = meta->getOutputQuantZero (idx);
1828 scale = meta->getOutputQuantScale (idx);
1831 if (info == nullptr) {
1832 logerr (TAG, "Unmatched tensors info\n");
1836 uint32_t batch = dims[0];
1837 uint32_t height = dims[1];
1838 uint32_t width = dims[2];
1839 uint32_t depth = dims[3];
1841 uint32_t data_size = get_data_size (info->type);
1842 if (data_size == 0) {
1843 logerr (TAG, "Invalid data size\n");
1847 bool need_quantization = false;
1849 * note that we assume DATA_TYPE_SRNPU is the smallest data type that we consider.
1850 * Also, DATA_TYPE_SRNPU and uint8_t may be regarded as the same in the view of apps.
1852 if (info->type != DATA_TYPE_SRNPU) {
1853 assert (data_size >= get_data_size (DATA_TYPE_SRNPU));
1855 if (data_size > get_data_size (DATA_TYPE_SRNPU) ||
1856 !(zero_point == default_quant_zero && scale == default_quant_scale))
1857 need_quantization = true;
1860 /** check data manipulation is required */
1861 if (depth != 3 && depth != 64 && info->layout != DATA_LAYOUT_SRNPU) {
1862 uint32_t MPA_L = DATA_GRANULARITY;
1863 uint32_t n, h, w, d;
1864 uint32_t std_offset; /* standard offset in NHWC data format */
1865 uint32_t npu_offset; /* npu offset in NPU HW data format*/
1866 uint32_t src_offset;
1867 uint32_t dst_offset;
1868 uint32_t slice_size;
1870 /* @todo we currently support only NHWC */
1871 if (info->layout != DATA_LAYOUT_NHWC) {
1872 logerr (TAG, "data manipulation is supported for NHWC only\n");
1876 for (n = 0; n < batch; n++) {
1877 for (h = 0; h < height; h++) {
1878 for (w = 0; w < width; w++) {
1879 for (d = 0; d < depth; d += MPA_L) {
1880 std_offset = d + depth * (w + width * (h + n * height));
1881 npu_offset = MPA_L * (w + width * (h + (n + d / MPA_L) * height));
1882 slice_size = (depth - d >= MPA_L) ? MPA_L : depth - d;
1885 src_offset = std_offset * data_size;
1886 dst_offset = npu_offset;
1888 src_offset = npu_offset;
1889 dst_offset = std_offset * data_size;
1892 /* if depth is not a multiple of MPA_L, add zero paddings (not exact values) */
1893 if (need_quantization) {
1894 memcpy_with_quant (is_input, info->type, scale, zero_point,
1895 static_cast<char*>(dst) + dst_offset,
1896 static_cast<char*>(src) + src_offset,
1900 static_cast<char*>(dst) + dst_offset,
1901 static_cast<char*>(src) + src_offset,
1908 } else if (need_quantization) {
1909 /** depth == 3 || depth == 64; special cases which can directly copy input tensor data */
1910 memcpy_with_quant (is_input, info->type, scale, zero_point,
1911 dst, src, is_input ? size / data_size : size);
1913 memcpy (dst, src, size);
1922 TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
1923 void *dst, void *src, size_t size)
1925 memcpy (dst, src, size);
1931 /** other device types don't have data manip impl. yet */
1934 TrinityVision2::manipulateData (const Model *model, uint32_t idx, bool is_input,
1935 void *dst, void *src, size_t size)
1937 memcpy (dst, src, size);
1942 TrinityAsr::manipulateData (const Model *model, uint32_t idx, bool is_input,
1943 void *dst, void *src, size_t size)
1945 memcpy (dst, src, size);