* Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
*/
/**
- * @file ne-host-handler.cc
+ * @file ne-handler.cc
* @date 03 Apr 2020
- * @brief Implementation of APIs to access NPU from Host
+ * @brief Impelemetation of NPU Engine entrypoint that handles APIs from host
* @see https://code.sec.samsung.net/confluence/display/ODLC/2020+Overall+Software+Stack
* @author Dongju Chae <dongju.chae@samsung.com>
* @bug No known bugs except for NYI items
*/
#include "ne-handler.h"
+#include "ne-data.h"
-#include <libnpuhost.h>
#include <npubinfmt.h>
#include <NPUdrvAPI.h>
#include <CommPlugin.h>
#define TAG _N2
-#define INIT_HOST_HANDLER(handler, dev) \
- Device *tdev = static_cast <Device *> (dev); \
- if (tdev == nullptr) return -EINVAL; \
- HostHandler *handler = tdev->getHostHandler (); \
- if (handler == nullptr) return -EINVAL;
-
-/** just for backward-compatability */
-npudev_h HostHandler::latest_dev_ = nullptr;
-
-/** implement libnpuhost APIs */
-
-/**
- * @brief Returns the number of available NPU devices.
- * @return @c The number of NPU devices.
- * @retval 0 if no NPU devices available. if positive (number of NPUs) if NPU devices available. otherwise, a negative error value.
- * @note the caller should call putNPUdevice() to release the device handle
- */
-int getnumNPUdeviceByType (dev_type type)
-{
- return HostHandler::getNumDevices (type);
-}
-
-/**
- * @brief Returns the handle of the chosen NPU devices.
- * @param[out] dev The NPU device handle
- * @param[in] id The NPU id to get the handle. 0 <= id < getnumNPUdeviceByType().
- * @return @c 0 if no error. otherwise a negative error value
- * @note the caller should call putNPUdevice() to release the device handle
- */
-int getNPUdeviceByType (npudev_h *dev, dev_type type, uint32_t id)
-{
- return HostHandler::getDevice (dev, type, id);
-}
-
-/**
- * @brief release the NPU device instance obtained by getDevice ()
- * @param[in] dev the NPU device handle
- */
-void putNPUdevice (npudev_h dev)
-{
- if (dev != nullptr)
- delete static_cast<Device *> (dev);
-}
-
-/**
- * @brief Send the NN model to NPU.
- * @param[in] dev The NPU device handle
- * @param[in] modelfile The filepath to the compiled NPU NN model in any buffer_type
- * @param[out] modelid The modelid allocated for this instance of NN model.
- * @return @c 0 if no error. otherwise a negative error value
- *
- * @detail For ASR devices, which do not accept models, but have models
- * embedded in devices, you do not need to call register and
- * register calls for ASR are ignored.
- *
- * @todo Add a variation: in-memory model register.
- */
-int registerNPUmodel (npudev_h dev, generic_buffer *modelfile, uint32_t *modelid)
-{
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->registerModel (modelfile, modelid);
-}
-
-/**
- * @brief Remove the NN model from NPU
- * @param[in] dev The NPU device handle
- * @param[in] modelid The model to be removed from the NPU.
- * @return @c 0 if no error. otherwise a negative error value
- * @detail This may incur some latency with memory compatcion.
- */
-int unregisterNPUmodel(npudev_h dev, uint32_t modelid)
-{
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->unregisterModel (modelid);
-}
-
-/**
- * @brief Remove all NN models from NPU
- * @param[in] dev The NPU device handle
- * @return @c 0 if no error. otherwise a negative error value
- */
-int unregisterNPUmodel_all(npudev_h dev)
-{
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->unregisterModels ();
-}
-
-/**
- * @brief [OPTIONAL] Set the data layout for input/output tensors
- * @param[in] dev The NPU device handle
- * @param[in] modelid The ID of model whose layouts are set
- * @param[in] info_in the layout/type info for input tensors
- * @param[in] info_out the layout/type info for output tensors
- * @return @c 0 if no error. otherwise a negative error value
- * @note if this function is not called, default layout/type will be used.
- */
-int setNPU_dataInfo(npudev_h dev, uint32_t modelid,
- tensors_data_info *info_in, tensors_data_info *info_out)
-{
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->setDataInfo (modelid, info_in, info_out);
-}
-
-/**
- * @brief [OPTIONAL] Set the inference constraint for next NPU inferences
- * @param[in] dev The NPU device handle
- * @param[in] modelid The target model id
- * @param[in] constraint inference constraint (e.g., timeout, priority)
- * @return @c 0 if no error. otherwise a negative error value
- * @note If this function is not called, default values are used.
- */
-int setNPU_constraint(npudev_h dev, uint32_t modelid, npuConstraint constraint)
-{
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->setConstraint (modelid, constraint);
-}
-
-/**
- * @brief Execute inference. Wait (block) until the output is available.
- * @param[in] dev The NPU device handle
- * @param[in] modelid The model to be inferred.
- * @param[in] input The input data to be inferred.
- * @param[out] output The output result. The caller MUST allocate appropriately before calling this.
- * @return @c 0 if no error. otherwise a negative error value
- *
- * @detail This is a syntactic sugar of runNPU_async().
- * CAUTION: There is a memcpy for the output buffer.
- */
-int runNPU_sync(npudev_h dev, uint32_t modelid, const input_buffers *input,
- output_buffers *output)
-{
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->runSync (modelid, input, output);
-}
-
-/**
- * @brief Invoke NPU inference. Unblocking call.
- * @param[in] dev The NPU device handle
- * @param[in] modelid The model to be inferred.
- * @param[in] input The input data to be inferred.
- * @param[in] cb The output buffer handler.
- * @param[out] sequence The sequence number returned with runNPU_async.
- * @param[in] data The data given as a parameter to the runNPU_async call.
- * @param[in] mode Configures how this operation works.
- * @return @c 0 if no error. otherwise a negative error value
- */
-int runNPU_async(npudev_h dev, uint32_t modelid, const input_buffers *input,
- npuOutputNotify cb, uint64_t *sequence, void *data,
- npu_async_mode mode)
-{
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->runAsync (modelid, input, cb, data, mode, sequence);
-}
-
-/**
- * @brief Let NPU accept input frames from its internal source continuously
- * @param[in] dev The NPU device handle
- * @param[in] modelid The model to be inferred.
- * @param[in] opmode NPU has different opmode with auto-inputs. Choose one.
- * @param[in] input The input buffer where input data comes.
- * @param[in] output The output buffer where output data is filled.
- * @return @c 0 if no error. otherwise a negative error value
- */
-int runNPU_internalInput(npudev_h dev, uint32_t modelid, npu_input_opmode opmode,
- const input_buffers *input, const output_buffers *output)
-{
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->runInternal (modelid, opmode, input, output);
-}
-
-/**
- * @brief Stop the request with the given id
- * @param[in] dev The NPU device handle
- * @param[in] id The request id
- * @return @c 0 if no error. otherwise a negative error value
- */
-int stopNPU_internalInput(npudev_h dev, int id)
-{
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->stopInternal (id);
-}
-
-/**
- * @brief Allocate a generic buffer with the requested buffer type.
- * @param[in] dev The NPU device handle
- * @param[in/out] Buffer the buffer pointer where memory is allocated.
- * @return 0 if no error, otherwise a negative errno.
- */
-int allocNPU_genericBuffer (npudev_h dev, generic_buffer * buffer)
-{
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->allocGenericBuffer (buffer);
-}
-
-/**
- * @brief Free the generic buffer and remove the address mapping
- * @param[in] dev The NPU device handle
- * @param[in] buffer the model buffer
- * @return 0 if no error, otherwise a negative errno.
- */
-int cleanNPU_genericBuffer (npudev_h dev, generic_buffer * buffer)
-{
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->deallocGenericBuffer (buffer);
-}
-
-/**
- * @brief Allocate generic buffers, which have multiple instances of generic_buffer
- * @param[in] dev The NPU device handle
- * @param[in/out] buffers generic buffers.
- * @return 0 if no error, otherwise a negative errno.
- * @note it reuses allocGenericBuffer().
- */
-int allocNPU_genericBuffers (npudev_h dev, generic_buffers * buffers)
-{
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->allocGenericBuffer (buffers);
-}
-
-/**
- * @brief Free generic buffers allocated by allocGenericBuffers().
- * @param[in] dev The NPU device handle
- * @param[in/out] buffers generic buffers.
- * @note it reuses cleanGenericbuffer().
- * @return 0 if no error, otherwise a negative errno.
- */
-int cleanNPU_genericBuffers (npudev_h dev, generic_buffers * buffers)
-{
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->deallocGenericBuffer (buffers);
-}
-
-/**
- * @brief alias of allocNPU_genericBuffer for model buffer
- */
-int allocNPU_modelBuffer (npudev_h dev, generic_buffer * model)
-{
- return allocNPU_genericBuffer (dev, model);
-}
-
-/**
- * @brief alias of cleanNPU_genericBuffer for model buffer
- */
-int cleanNPU_modelBuffer (npudev_h dev, generic_buffer * model)
-{
- return cleanNPU_genericBuffer (dev, model);
-}
-
-/**
- * @brief alias of allocNPU_genericBuffer for input buffer
- */
-int allocNPU_inputBuffer (npudev_h dev, generic_buffer * input)
-{
- return allocNPU_genericBuffer (dev, input);
-}
-
-/**
- * @brief alias of cleanNPU_genericBuffer for input buffer
- */
-int cleanNPU_inputBuffer (npudev_h dev, generic_buffer * input)
-{
- return cleanNPU_genericBuffer (dev, input);
-}
-
-/**
- * @brief alias of allocNPU_genericBuffers for input buffers
- */
-int allocNPU_inputBuffers (npudev_h dev, input_buffers * input)
-{
- return allocNPU_genericBuffers (dev, input);
-}
-
-/**
- * @brief alias of cleanNPU_genericBuffers for input buffers
- */
-int cleanNPU_inputBuffers (npudev_h dev, input_buffers * input)
-{
- return cleanNPU_genericBuffers (dev, input);
-}
-
-/**
- * @brief get the current memory status for the given device
- * @param[in] dev The NPU device handle
- * @param[out] alloc_total The size of allocated memory until now
- * @param[out] free_total The size of freed memory until now
- * @return @c 0 if no error. otherwise a negatice error value
- */
-int getNPU_memoryStatus(npudev_h dev, size_t *alloc_total, size_t *free_total)
-{
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->getMemoryStatus (alloc_total, free_total);
-}
-
-/**
- * @brief Get the current device status to be used
- * @param[in] dev The NPU device handle
- * @param[out] status the device status
- * @param[out] num_requests the number of running requests (or pending)
- * @return 0 if no error, otherwise a negative errno.
- */
-int getNPU_deviceStatus(npudev_h dev, npu_status *status, uint32_t *num_requests)
-{
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->getDeviceStatus (status, num_requests);
-}
-
-/**
- * @brief Get metadata for NPU model
- * @param[in] model The path of model binary file
- * @param[in] need_extra whether you want to extract the extra data in metadata
- * @return the metadata structure to be filled if no error, otherwise nullptr
- *
- * @note For most npu-engine users, the extra data is not useful because it will be
- * used for second-party users (e.g., compiler, simulator).
- * Also, the caller needs to free the metadata.
- *
- * @note the caller needs to free the metadata
- */
-npubin_meta * getNPUmodel_metadata (const char *model, bool need_extra)
-{
- npubin_meta *meta;
- FILE *fp;
- size_t ret;
-
- if (!model)
- return nullptr;
-
- fp = fopen (model, "rb");
- if (!fp) {
- logerr (TAG, "Failed to open the model binary: %d\n", -errno);
- return nullptr;
- }
-
- meta = (npubin_meta *) malloc (NPUBIN_META_SIZE);
- if (!meta) {
- logerr (TAG, "Failed to allocate metadata\n");
- goto exit_err;
- }
-
- ret = fread (meta, 1, NPUBIN_META_SIZE, fp);
- if (ret != NPUBIN_META_SIZE) {
- logerr (TAG, "Failed to read the metadata\n");
- goto exit_free;
- }
-
- if (!CHECK_NPUBIN (meta->magiccode)) {
- logerr (TAG, "Invalid metadata provided\n");
- goto exit_free;
- }
-
- if (need_extra && NPUBIN_META_EXTRA (meta->magiccode) > 0) {
- npubin_meta *new_meta;
-
- new_meta = (npubin_meta *) realloc (meta, NPUBIN_META_TOTAL_SIZE(meta->magiccode));
- if (!new_meta) {
- logerr (TAG, "Failed to allocate extra metadata\n");
- goto exit_free;
- }
-
- ret = fread (new_meta->reserved_extra, 1, NPUBIN_META_EXTRA_SIZE (meta->magiccode), fp);
- if (ret != NPUBIN_META_EXTRA_SIZE (meta->magiccode)) {
- logerr (TAG, "Invalid extra metadata provided\n");
- free (new_meta);
- goto exit_err;
- }
-
- meta = new_meta;
- }
-
- fclose (fp);
-
- return meta;
-
-exit_free:
- free (meta);
-exit_err:
- fclose (fp);
-
- return nullptr;
-}
-
-/** implement methods of HostHandler class */
-
/** @brief host handler constructor */
HostHandler::HostHandler (Device *device)
: device_(device),
int
HostHandler::unregisterModels ()
{
- models_.clear ();
+ std::function <bool (Model *)> functor =
+ [&] (Model *m) -> bool {
+ bool can_remove = true;
+ int status = device_->unsetModel (m);
+ if (status != 0) {
+ logwarn (TAG, "Failed to unset model: %d\n", status);
+ can_remove = false;
+ }
+ return can_remove;
+ };
+
+ models_.for_each (functor);
return 0;
}
/**
+ * @brief Get the profile information from NPU
+ * @param[in] task_id The identifier for each inference
+ * @param[out] profile The profile instance
+ * @return 0 if no error, otherwise a negative errno.
+ */
+int
+HostHandler::getProfile (int task_id, npu_profile *profile)
+{
+ if (task_id < 0 || profile == nullptr) {
+ logerr (TAG, "Invalid parameter provided\n");
+ return -EINVAL;
+ }
+
+ const DriverAPI * api = device_->getDriverAPI ();
+ assert (api != nullptr);
+
+ profile->num_layers = 0;
+ profile->layers = nullptr;
+
+ int status = api->getProfile (task_id, profile);
+ if (status != 0) {
+ logerr (TAG, "Failed to get profile information: %d\n", status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief get the stats for the latest apps of the target device
+ * @param[out] stat The list of app stat
+ * @note The caller has the responsibility to free the resources.
+ * This API is not working on the emulated envionment.
+ */
+int
+HostHandler::getStatApps (npu_stat_apps *stat)
+{
+ const DriverAPI * api = device_->getDriverAPI ();
+ assert (api != nullptr);
+
+ return api->getStatApps (stat);
+}
+
+/**
+ * @brief get the stats for the latest tasks of the target app
+ * @param[in] appid The identifier of target app
+ * @param[out] stat The list of task stat
+ * @note The caller has the responsibility to free the resources.
+ * This API is not working on the emulated envionment.
+ */
+int
+HostHandler::getStatTasks (int appid, npu_stat_tasks *stat)
+{
+ const DriverAPI * api = device_->getDriverAPI ();
+ assert (api != nullptr);
+
+ return api->getStatTasks (appid, stat);
+}
+
+/**
+ * @brief Get the driver API level of opened NPU device
+ * @param[out] level driver API level
+ * @return 0 if no error, otherwise a negative errno
+ */
+int
+HostHandler::getAPILevel (uint32_t *level)
+{
+ const DriverAPI * api = device_->getDriverAPI ();
+ assert (api != nullptr);
+
+ return api->getAPILevel (level);
+}
+
+/**
+ * @brief Get the TOPS of the opened NPU device
+ * @param[in] dev the NPU device handle
+ * @param[out] tops npu tops
+ * @return 0 if no error, otherwise a negative errno
+ * @note this does not support for emulated devices
+ */
+int
+HostHandler::getTops (uint32_t *tops)
+{
+ const DriverAPI * api = device_->getDriverAPI ();
+ assert (api != nullptr);
+
+ return api->getTops (tops);
+}
+
+/**
+ * @brief Get the DSP DSPM size of the opened NPU device
+ * @param[in] dev the NPU device handle
+ * @param[out] dspm dspm size
+ * @return 0 if no error, otherwise a negative errno
+ * @note this does not support for emulated devices
+ */
+int
+HostHandler::getDspmSize (uint32_t *dspm)
+{
+ const DriverAPI * api = device_->getDriverAPI ();
+ assert (api != nullptr);
+
+ return api->getDspmSize (dspm);
+}
+/**
* @brief Set the data layout for input/output tensors
* @param[in] modelid The ID of model whose layouts are set
* @param[in] in the layout/type info for input tensors
}
void callback (output_buffers *output, uint64_t sequence) {
- if (output_ != nullptr) {
+ if (output_ != nullptr && output != nullptr) {
/** just copy internal variables of output buffers */
memcpy (output_, output, sizeof (output_buffers));
}
* @param[in] modelid The model to be inferred.
* @param[in] input The input data to be inferred.
* @param[out] output The output result.
- * @return @c 0 if no error. otherwise a negative error value
+ * @return @c positive id if no error. otherwise a negative error value
*/
int
HostHandler::runSync (uint32_t modelid, const input_buffers *input,
callbackSync sync (output);
int status = runAsync (modelid, input, callbackSync::callback,
static_cast <void*> (&sync), NPUASYNC_DROP_OLD, nullptr);
- if (status == 0) {
+ if (status > 0) {
/** sync needs to wait callback */
sync.wait ();
}
* @param[in] cb_data The data given as a parameter to the runNPU_async call.
* @param[in] mode Configures how this operation works.
* @param[out] sequence The sequence number returned with runNPU_async.
- * @return @c 0 if no error. otherwise a negative error value
+ * @return @c positive id if no error. otherwise a negative error value
*/
int
HostHandler::runAsync (uint32_t modelid, const input_buffers *input,
* @brief Let NPU accept input frames from its internal source continuously
* @param[in] modelid The model to be inferred.
* @param[in] opmode NPU has different opmode with auto-inputs. Choose one.
- * @param[in] input The input buffer where input data comes.
- * @param[in] output The output buffer where output data is filled.
+ * @param[in] hw_dev The target device feeding input data
* @return @c 0 if no error. otherwise a negative error value
*/
int
HostHandler::runInternal (uint32_t modelid, npu_input_opmode opmode,
- const input_buffers *input, const output_buffers *output)
+ std::string hw_dev)
{
Model *model = nullptr;
return -EINVAL;
}
- return device_->runInternal (opmode, model, input, output);
+ return device_->runInternal (opmode, model, hw_dev);
}
/**
}
*dev = device;
- /** This is just for backward-compatility; we don't guarantee its corresness */
- latest_dev_ = *dev;
return 0;
}
{
/** API is always set in initialize () */
const DriverAPI * api = device_->getDriverAPI ();
- assert (api != nullptr);
+
+ if (!api)
+ return -EINVAL;
device_state_t state = api->isReady ();
if (state == device_state_t::STATE_READY) {
Device *device = nullptr;
switch (type & DEVICETYPE_MASK) {
- case DEVICETYPE_TRIV:
- device = new TrinityVision (id);
- break;
case DEVICETYPE_TRIV2:
device = new TrinityVision2 (id);
break;
- case DEVICETYPE_TRIA:
- device = new TrinityAsr (id);
+ case DEVICETYPE_DEPR:
+ logwarn (TAG, "You're trying to open deprecated devices..\n");
break;
default:
break;
- }
-
- if (device != nullptr && device->init () != 0) {
- delete device;
- device = nullptr;
- }
-
- return device;
-}
-
-/**
- * @brief device initialization
- * @return 0 if no error, otherwise a negative errno
- * @note Init failures come from createDriverAPI() only.
- */
-int
-Device::init ()
-{
- /** should be initilizaed only once */
- if (!atomic_flag_.test_and_set()) {
- /** create the corresponding driver API */
- api_ = DriverAPI::createDriverAPI (type_, id_);
- if (api_.get() == nullptr) {
- atomic_flag_.clear();
- logerr (TAG, "Failed to create driver API\n");
- return -EINVAL;
- }
-
- handler_.reset (new HostHandler (this));
- scheduler_.reset (new Scheduler (api_.get()));
- mem_ = MemAllocator::createInstance (api_.get());
-
- initialized_ = true; /** c++11 does not provide test() of atomic flag */
- }
-
- return 0;
-}
-
-/**
- * @brief stop all requests from this device
- * @param[in] force_stop indicate the schedduler waits until to handle previous requests
- * @return 0 if no error, otherwise a negative errno
- */
-int
-Device::stop (bool force_stop)
-{
- if (!initialized ()) {
- logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
- return -EPERM;
- }
-
- Request *req = new Request (NPUINPUT_STOP);
- req->setForceStop (force_stop);
- return scheduler_->submitRequest (req);
-}
-
-/**
- * @brief allocate generic memory buffer
- * @param[in] size the size to allocate
- * @param[out] addr the mapped address
- * @return dmabuf fd if no error, otherwise a negative errno
- */
-int
-Device::allocMemory (size_t size, void **addr)
-{
- if (!initialized ()) {
- logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
- return -EPERM;
- }
-
- if (size == 0 || addr == nullptr) {
- logerr (TAG, "Invalid arguments\n");
- return -EINVAL;
- }
-
- return mem_->allocMemory (size, addr);
-}
-
-/**
- * @brief deallocate generic memory buffer
- * @param[in] dmabuf_fd dmabuf file descriptor
- * @param[in] size buffer size
- * @param[in] addr mapped addr
- * @return 0 if no error, otherwise a negative errno
- */
-int
-Device::deallocMemory (int dmabuf_fd, size_t size, void * addr)
-{
- if (!initialized ()) {
- logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
- return -EPERM;
- }
-
- if (dmabuf_fd < 0 || size == 0 || addr == nullptr) {
- logerr (TAG, "Invalid arguments\n");
- return -EINVAL;
- }
-
- return mem_->deallocMemory (dmabuf_fd, size, addr);
-}
-
-/**
- * @brief extract the buffer instance from input generic buffers
- * @param[in] meta the model metadata
- * @param[in] input the input generic buffers
- * @return the buffer instance
- */
-Buffer *
-TrinityVision::prepareInputBuffers (const Metadata *meta, const input_buffers *input)
-{
- if (meta == nullptr || input == nullptr ||
- meta->getInputNum() != input->num_buffers) {
- logerr (TAG, "Invalid metadata info provided\n");
- return nullptr;
- }
-
- Buffer * buffer;
- const generic_buffer *first = &input->bufs[0];
- if (first->type == BUFFER_DMABUF) {
- buffer = mem_->allocBuffer (new HWmemExternal);
- if (buffer == nullptr)
- return nullptr;
-
- buffer->setDmabuf (first->dmabuf);
- buffer->setOffset (first->offset);
- buffer->setSize (meta->getBufferSize());
- } else {
- buffer = mem_->allocBuffer (new HWmemDevice);
- if (buffer == nullptr)
- return nullptr;
-
- int status = buffer->alloc (meta->getBufferSize ());
- if (status != 0) {
- logerr (TAG, "Failed to allocate buffer: %d\n", status);
- delete buffer;
- return nullptr;
- }
- }
-
- int status = buffer->createTensors (meta);
- if (status != 0) {
- logerr (TAG, "Failed to create tensors: %d\n", status);
- delete buffer;
- buffer = nullptr;
- }
-
- return buffer;
-}
-
-/**
- * @brief implementation of TRIV's setModel ()
- * @param[in] model_buf the model generic buffer
- * @param[out] model the model instance
- * @return 0 if no error, otherwise a negative errno
- */
-int
-TrinityVision::setModel (const generic_buffer *model_buf, Model ** model_ptr)
-{
- if (!initialized ()) {
- logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
- return -EPERM;
- }
-
- if (model_buf == nullptr || model_ptr == nullptr)
- return -EINVAL;
-
- Model *model = nullptr;
- HWmem * hwmem_prog = nullptr;
- HWmem * hwmem_weight = nullptr;
- int status;
-
- /** In TRIV1, model data (including program/weight) should be contiguous */
-
- switch (model_buf->type) {
- case BUFFER_FILE:
- case BUFFER_MAPPED:
- model = mem_->allocModel (new HWmemDevice);
- if (model == nullptr) {
- logerr (TAG, "Failed to allocate model\n");
- return -ENOMEM;
- }
-
- status = model->alloc (model_buf->size);
- if (status != 0) {
- logerr (TAG, "Failed to allocate model: %d\n", status);
- goto delete_exit;
- }
-
- /** extract the whole model data */
- status = comm_.extractGenericBuffer (model_buf, model->getData(), nullptr);
- if (status != 0) {
- logerr (TAG, "Failed to extract generic buffer: %d\n", status);
- goto delete_exit;
- }
- break;
- default:
- return -EINVAL;
- }
-
- status = model->setMetadata (model->getData());
- if (status != 0)
- goto delete_exit;
-
- /** allocate program (optional; NOP) */
- if (model->getMetadata()->getProgramSize() > 0) {
- hwmem_prog = new HWmem (new HWmemChunk);
- model->setProgramData (hwmem_prog);
+ }
- hwmem_prog->setParent (model);
- hwmem_prog->setOffset (model->getMetadata()->getMetaSize());
- status = hwmem_prog->alloc (model->getMetadata()->getProgramSize());
- if (status != 0) {
- logerr (TAG, "Failed to allocate program\n");
- goto delete_exit;
- }
+ if (device != nullptr && device->init () != 0) {
+ delete device;
+ device = nullptr;
}
- /** allocate weight (optional) */
- if (model->getMetadata()->getWeightSize() > 0) {
- hwmem_weight = new HWmem (new HWmemChunk);
- model->setWeightData (hwmem_weight);
+ return device;
+}
- hwmem_weight->setParent (model);
- hwmem_weight->setOffset (model->getMetadata()->getMetaSize() +
- model->getMetadata()->getProgramSize());
- status = hwmem_weight->alloc (model->getMetadata()->getWeightSize());
- if (status != 0) {
- logerr (TAG, "Failed to allocate program\n");
- goto delete_exit;
+/**
+ * @brief device initialization
+ * @return 0 if no error, otherwise a negative errno
+ * @note Init failures come from createDriverAPI() only.
+ */
+int
+Device::init ()
+{
+ /** should be initilizaed only once */
+ if (!atomic_flag_.test_and_set()) {
+ /** create the corresponding driver API */
+ api_ = DriverAPI::createDriverAPI (type_, id_);
+ if (api_.get() == nullptr) {
+ atomic_flag_.clear();
+ logerr (TAG, "Failed to create driver API\n");
+ return -EINVAL;
}
- }
-
- if (hwmem_prog != nullptr) {
- /** register this model to the driver */
- model_config_t config;
- config.dbuf_fd = hwmem_prog->getDmabuf ();
- config.program_size = hwmem_prog->getSize ();
- config.program_offset_addr = hwmem_prog->getOffset ();
- if (hwmem_weight != nullptr)
- config.weight_offset_addr = hwmem_weight->getOffset ();
- status = api_->registerModel (&config);
- if (status != 0)
- goto delete_exit;
+ handler_.reset (new HostHandler (this));
+ scheduler_.reset (new Scheduler (api_.get()));
+ mem_ = MemAllocator::createInstance (api_.get());
- model->setInternalID(config.id);
+ initialized_ = true; /** c++11 does not provide test() of atomic flag */
}
- *model_ptr = model;
- return status;
-
-delete_exit:
- delete model;
- return status;
+ return 0;
}
/**
- * @brief implementation of TRIV's unsetModel ()
- * @param[in] model the model instance
+ * @brief stop all requests from this device
+ * @param[in] force_stop indicate the schedduler waits until to handle previous requests
* @return 0 if no error, otherwise a negative errno
*/
int
-TrinityVision::unsetModel (Model * model)
+Device::stop (bool force_stop)
{
if (!initialized ()) {
logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
return -EPERM;
}
- if (model == nullptr) {
- logerr (TAG, "Invalid model instance\n");
- return -EINVAL;
- }
-
- if (model->getMetadata()->getProgramSize() > 0)
- return api_->deregisterModel (model->getInternalID ());
-
- return 0;
+ Request *req = new Request (NPUINPUT_STOP);
+ req->setForceStop (force_stop);
+ return scheduler_->submitRequest (req);
}
/**
- * @brief implementation of TRIV's run()
- * @param[in] opmode input opmode
- * @param[in] model the model instance
- * @param[in] input generic buffers of input data
- * @param[in] cb the output callback
- * @param[in] cb_data the output callback data
- * @param[out] sequence The sequence number returned with runNPU_async.
+ * @brief allocate generic memory buffer
+ * @param[in] size the size to allocate
+ * @param[out] addr the mapped address
+ * @return dmabuf fd if no error, otherwise a negative errno
*/
int
-TrinityVision::run (npu_input_opmode opmode, const Model *model,
- const input_buffers *input, npuOutputNotify cb, void *cb_data,
- uint64_t *sequence)
+Device::allocMemory (size_t size, void **addr)
{
if (!initialized ()) {
logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
return -EPERM;
}
- if (opmode != NPUINPUT_HOST) {
- logerr (TAG, "TRIV supports only host inputservice\n");
- return -EINVAL;
- }
-
- if (model == nullptr || input == nullptr) {
- logerr (TAG, "TRIV requires both model and input buffers\n");
- return -EINVAL;
- }
-
- Buffer *buffer = prepareInputBuffers (model->getMetadata(), input);
- if (buffer == nullptr) {
- logerr (TAG, "Failed to extract buffer instance\n");
+ if (size == 0 || addr == nullptr) {
+ logerr (TAG, "Invalid arguments\n");
return -EINVAL;
}
- if (!buffer->isExternal ()) {
- for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
- auto func = std::bind (TrinityVision::manipulateData, model, idx, true,
- std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
- int status = comm_.extractGenericBuffer (&input->bufs[idx],
- buffer->getInputTensor(idx)->getData(), func);
- if (status != 0) {
- logerr (TAG, "Failed to feed input buffer: %d\n", status);
- return status;
- }
- }
- }
-
- /** this device uses CMA buffer */
-
- Request *req = new Request (opmode);
- req->setModel (model);
- req->setBuffer (buffer);
-
- if (cb != nullptr)
- req->setCallback (std::bind (&TrinityVision::callback, this, req, cb, cb_data));
-
- if (sequence != nullptr)
- *sequence = req->getID();
-
- return scheduler_->submitRequest (req);
+ return mem_->allocMemory (size, addr);
}
/**
- * @brief callback of TRIV2 request
- * @param[in] req the request instance
- * @param[in] cb callback for completion
- * @param[in] cb_data callback data
- * @note The callback invoke does not gurantee the request was successful
- * @todo Check the request failures
+ * @brief deallocate generic memory buffer
+ * @param[in] dmabuf_fd dmabuf file descriptor
+ * @param[in] size buffer size
+ * @param[in] addr mapped addr
+ * @return 0 if no error, otherwise a negative errno
*/
-void
-TrinityVision::callback (Request *req, npuOutputNotify cb, void *cb_data)
+int
+Device::deallocMemory (int dmabuf_fd, size_t size, void * addr)
{
- const Model *model = req->getModel ();
- Buffer *buffer = req->getBuffer ();
- output_buffers output = {
- .num_buffers = buffer->getOutputNum ()
- };
-
- for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
- uint32_t output_tensor_size = model->getOutputTensorSize (idx);
-
- if (buffer->isExternal ()) {
- output.bufs[idx].type = BUFFER_DMABUF;
- output.bufs[idx].size = output_tensor_size;
- output.bufs[idx].addr = buffer->getOutputTensor(idx)->getData();
- } else {
- output.bufs[idx].type = BUFFER_MAPPED;
- output.bufs[idx].size = output_tensor_size;
- /** user needs to free this */
- output.bufs[idx].addr = malloc (output_tensor_size);
-
- auto func = std::bind (TrinityVision::manipulateData, model, idx, false,
- std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
- int status = comm_.insertGenericBuffer (buffer->getOutputTensor(idx)->getData(),
- &output.bufs[idx], func);
- if (status != 0) {
- logerr (TAG, "Failed to return output buffer: %d\n", status);
- }
- }
+ if (!initialized ()) {
+ logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
+ return -EPERM;
}
- cb (&output, req->getID(), cb_data);
+ if (dmabuf_fd < 0 || size == 0 || addr == nullptr) {
+ logerr (TAG, "Invalid arguments\n");
+ return -EINVAL;
+ }
- delete buffer;
+ return mem_->deallocMemory (dmabuf_fd, size, addr);
}
/**
TrinityVision2::prepareSegmentTable (const Model *model, const input_buffers *input,
const output_buffers *output)
{
- if (model == nullptr || input == nullptr) {
- logerr (TAG, "Invalid arguments provided\n");
- return nullptr;
- }
-
const Metadata *meta = model->getMetadata ();
- if (meta == nullptr ||
- meta->getInputNum() != input->num_buffers) {
+ if (meta == nullptr || (input != nullptr &&
+ meta->getInputNum() != input->num_buffers)) {
logerr (TAG, "Invalid metadata info provided\n");
return nullptr;
}
if (model->getMetadata()->getProgramSize() > 0) {
HWmem * hwmem_prog = new HWmem (new HWmemDevice);
hwmem_prog->setDriverAPI (api_.get());
+ hwmem_prog->setContiguous (true);
model->setProgramData (hwmem_prog);
/** register this model to the driver */
model_config_t config;
+ config.version = model->getMetadata()->getVersion ();
config.dbuf_fd = hwmem_prog->getDmabuf ();
config.program_size = hwmem_prog->getSize ();
config.program_offset_addr = 0;
+ config.metadata_dbuf_fd = model->getDmabuf ();
+
+ /** for metadata extended section */
+ size_t extended_size = model->getMetadata()->getMetaExtendedSize();
+ if (extended_size > 0) {
+ HWmem * hwmem_extended = new HWmem (new HWmemDevice);
+ hwmem_extended->setDriverAPI (api_.get ());
+
+ model->setExtendedMetadata (hwmem_extended);
+
+ status = hwmem_extended->alloc (extended_size);
+ if (status != 0) {
+ logerr (TAG, "Failed to allocate extended metadata: %d\n", status);
+ goto delete_exit;
+ }
+
+ config.metadata_ext_dbuf_fd = hwmem_extended->getDmabuf ();
+ config.metadata_ext_size = extended_size;
+
+ status = comm_.extractGenericBuffer (model_buf, hwmem_extended->getData (),
+ nullptr, NPUBIN_META_SIZE, extended_size);
+ if (status != 0) {
+ logerr (TAG, "Failed to extract generic buffer: %d\n", status);
+ goto delete_exit;
+ }
+ } else {
+ config.metadata_ext_dbuf_fd = -1;
+ config.metadata_ext_size = 0;
+ }
- status = api_->registerModel (&config);
+ status = api_->registerModel (&config, model->getMetadata()->getNPUVersion());
if (status != 0)
goto delete_exit;
if (opmode != NPUINPUT_HOST)
return -EINVAL;
+ if (input == nullptr || input->num_buffers == 0 || model == nullptr)
+ return -EINVAL;
+
+ const_cast<Model *>(model)->updateDataInfo ();
+
/** this device uses segment table */
SegmentTable * segt = prepareSegmentTable (model, input);
if (segt == nullptr) {
/** extract input data */
for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
- size_t max_seg_size = segt->getInputSegment(idx)->getSize();
- uint32_t seg_offset = segt->getInputSegmentOffset(idx);
-
- if (input->bufs[idx].size + seg_offset > max_seg_size) {
- logerr (TAG, "Too large input data provided: max segment size (%zu)\n",
- max_seg_size);
- return -ERANGE;
- }
-
if (!segt->getInputSegment(idx)->isExternal ()) {
+ uint32_t seg_offset = segt->getInputSegmentOffset(idx);
auto func = std::bind (TrinityVision2::manipulateData, model, idx, true,
std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
int status = comm_.extractGenericBuffer (
Request *req = new Request (opmode);
req->setModel (model);
- req->setSegmentTable (segt);
+ req->setInferData (segt);
req->setCallback (std::bind (&TrinityVision2::callback, this, req, cb, cb_data));
- if (sequence)
- *sequence = req->getID();
+ if (sequence && req->getID () > 0) {
+ *sequence = (uint32_t) req->getID ();
+ }
return scheduler_->submitRequest (req);
}
/** @brief implementation of TRIV2's runInternal() */
int
TrinityVision2::runInternal (npu_input_opmode opmode, const Model *model,
- const input_buffers *input, const output_buffers *output)
+ std::string hw_dev)
{
if (!initialized ()) {
logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
return -EINVAL;
/** this device uses segment table */
- SegmentTable * segt = prepareSegmentTable (model, input, output);
+ SegmentTable * segt = prepareSegmentTable (model, nullptr, nullptr);
if (segt == nullptr) {
logerr (TAG, "Failed to create segment table instance\n");
return -EINVAL;
Request *req = new Request (opmode);
req->setModel (model);
- req->setSegmentTable (segt);
+ req->setInferData (segt);
+ req->setHwDevice (hw_dev);
return scheduler_->submitRequest (req);
}
void
TrinityVision2::callback (Request *req, npuOutputNotify cb, void *cb_data)
{
+ if (cb == nullptr)
+ return;
+
const Model *model = req->getModel ();
- SegmentTable *segt = req->getSegmentTable ();
+ SegmentTable *segt = dynamic_cast<SegmentTable *> (req->getInferData ());
+ /** internal logic error */
+ assert (segt != nullptr);
+
output_buffers output = {
.num_buffers = segt->getNumOutputSegments ()
};
/** user needs to free this */
output.bufs[idx].addr = calloc (1, output_tensor_size);
+#if defined(ENABLE_FPGA_WORKAROUND)
+ api_->fpga_memcpy (
+ segt->getOutputSegment(idx)->getDmabuf(),
+ segt->getOutputSegmentOffset(idx),
+ output.bufs[idx].addr,
+ output.bufs[idx].size);
+#else
auto func = std::bind (TrinityVision2::manipulateData, model, idx, false,
std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
int status = comm_.insertGenericBuffer (
if (status != 0) {
logerr (TAG, "Failed to return output buffer: %d\n", status);
}
+#endif
}
cb (&output, req->getID(), cb_data);
delete segt;
}
-/** @brief implementation of TRIA's run(): WIP */
-int
-TrinityAsr::run (npu_input_opmode opmode, const Model *model,
- const input_buffers *input, npuOutputNotify cb, void *cb_data,
- uint64_t *sequence)
-{
- if (!initialized ()) {
- logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
- return -EPERM;
- }
-
- if (opmode != NPUINPUT_HOST)
- return -EINVAL;
-
- Buffer * buffer;
- int status;
- /** ASR does not require model and support only a single tensor */
- const generic_buffer *first_buf = &input->bufs[0];
- if (first_buf->type == BUFFER_DMABUF) {
- buffer = mem_->allocBuffer (new HWmemExternal);
- if (buffer == nullptr)
- return -ENOMEM;
-
- buffer->setDmabuf (first_buf->dmabuf);
- buffer->setOffset (first_buf->offset);
- buffer->setSize (first_buf->size);
- } else {
- buffer = mem_->allocBuffer (new HWmemDevice);
- if (buffer == nullptr)
- return -ENOMEM;
-
- status = buffer->alloc (first_buf->size);
- if (status != 0) {
- delete buffer;
- return status;
- }
- }
-
- status = buffer->createTensors ();
- if (status != 0) {
- logerr (TAG, "Failed to create tensors: %d\n", status);
- delete buffer;
- return status;
- }
-
- if (!buffer->isExternal ()) {
- status = comm_.extractGenericBuffer (first_buf,
- buffer->getInputTensor(0)->getData(), nullptr);
- if (status != 0)
- return status;
- }
-
- Request *req = new Request (opmode);
- req->setBuffer (buffer);
- req->setCallback (std::bind (&TrinityAsr::callback, this, req, cb, cb_data));
-
- if (sequence)
- *sequence = req->getID();
-
- return scheduler_->submitRequest (req);
-}
-
-/** @brief callback of TRIA request: WIP */
-void
-TrinityAsr::callback (Request *req, npuOutputNotify cb, void *cb_data)
-{
-}
-
/** Implement data manipulation (each device may have different impl.) */
#ifdef ENABLE_MANIP
-
-#define do_quantized_memcpy(type) do {\
- idx = 0;\
- if (quant) {\
- while (idx < num_elems) {\
- val = ((type *) src)[idx];\
- val = val / _scale;\
- val += _zero_point;\
- val = (val > 255.0) ? 255.0 : 0.0;\
- ((uint8_t *) dst)[idx++] = (uint8_t) val;\
- }\
- } else {\
- while (idx < num_elems) {\
- val = *(uint8_t *) src;\
- val -= _zero_point;\
- val *= _scale;\
- ((type *) dst)[idx++] = (type) val;\
- dst = (void*)(((uint8_t *) dst) + data_size);\
- src = (void*)(((uint8_t *) src) + 1);\
- }\
- }\
- } while (0)
-
-/**
- * @brief memcpy during quantization
- */
-static void memcpy_with_quant (bool quant, data_type type, float scale, uint32_t zero_point,
- void *dst, const void *src, uint32_t num_elems)
-{
- double _scale = (double) scale;
- double _zero_point = (double) zero_point;
- double val;
- uint32_t data_size = get_data_size (type);
- uint32_t idx;
-
- switch (type) {
- case DATA_TYPE_INT8:
- do_quantized_memcpy (int8_t);
- break;
- case DATA_TYPE_UINT8:
- do_quantized_memcpy (uint8_t);
- break;
- case DATA_TYPE_INT16:
- do_quantized_memcpy (int16_t);
- break;
- case DATA_TYPE_UINT16:
- do_quantized_memcpy (uint16_t);
- break;
- case DATA_TYPE_INT32:
- do_quantized_memcpy (int32_t);
- break;
- case DATA_TYPE_UINT32:
- do_quantized_memcpy (uint32_t);
- break;
- case DATA_TYPE_INT64:
- do_quantized_memcpy (int64_t);
- break;
- case DATA_TYPE_UINT64:
- do_quantized_memcpy (uint64_t);
- break;
- case DATA_TYPE_FLOAT32:
- do_quantized_memcpy (float);
- break;
- case DATA_TYPE_FLOAT64:
- do_quantized_memcpy (double);
- break;
- default:
- logerr (TAG, "Unsupported datatype %d\n", type);
- }
-}
-
/**
* @brief perform data manipulation
* @param[in] model model instance
* @return size of memory copy if no error, otherwise zero
*
* @note the input data format should be NHWC
- * @detail rules for the memory address of activations in NPU HW.
- * (https://code.sec.samsung.net/confluence/pages/viewpage.action?pageId=146491864)
*
- * 1) Special case (depth == 3)
- * - addr(x,y,z) = addr(0,0,0) + (z) + 3 * (x + width * y)
+ * @detail Feature map data in TRIV2, (x, y, z) = (width, height, depth)
*
- * 2) Common case
- * - addr(x,y,z) = addr(0,0,0) + (z % MPA_L) + MPA_L * (x + width * (y + height * (z / MPA_L)))
+ * 1) Image input (depth == 1 or depth == 3)
+ * Addr(x,y,z) = Addr(0,0,0) + z + depth * x + ymod * y
*
- * Thus, if depth is not a multiple of MPA_L (i.e., 64), zero padding is required
+ * 2) Common cases
+ * Addr(x,y,z) = Addr(0,0,0) + (z % 64) + (64 * x) + ymod * y + zmod * (z / 64)
*/
size_t
-TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
+TrinityVision2::manipulateData (const Model *model, uint32_t idx, bool is_input,
void *dst, void *src, size_t size)
{
- const Metadata *meta = model->getMetadata();
- const tensor_data_info* info;
- const uint32_t *dims;
- uint32_t zero_point;
- float scale;
+ const Metadata *meta = model->getMetadata ();
+ DataConverter converter (is_input);
- /** extract required information from the metadata */
+ converter.setData (src, dst, size);
+ converter.setTops (meta->getTops ());
if (is_input) {
- if (idx >= meta->getInputNum()) {
- logerr (TAG, "Wrong information for input tensors in metadata\n");
+ const tensor_data_info* info = model->getInputDataInfo (idx);
+ if (info == nullptr)
return 0;
- }
- info = model->getInputDataInfo (idx);
- dims = meta->getInputDims (idx);
- zero_point = meta->getInputQuantZero (idx);
- scale = meta->getInputQuantScale (idx);
+ converter.setDataLayout (info->layout, DATA_LAYOUT_TRIV2);
+ converter.setDataType (info->type, meta->getInputQuantType (idx));
+ converter.setDataDims (meta->getInputDims (idx));
+ converter.setQuantZero (meta->getInputQuantZero (idx));
+ converter.setQuantScale (meta->getInputQuantScale (idx));
} else {
- if (idx >= meta->getOutputNum()) {
- logerr (TAG, "Wrong information for output tensors in metadata\n");
- return 0;
- }
-
- info = model->getOutputDataInfo (idx);
- dims = meta->getOutputDims (idx);
- zero_point = meta->getOutputQuantZero (idx);
- scale = meta->getOutputQuantScale (idx);
- }
-
- if (info == nullptr) {
- logerr (TAG, "Unmatched tensors info\n");
- return 0;
- }
-
- uint32_t batch = dims[0];
- uint32_t height = dims[1];
- uint32_t width = dims[2];
- uint32_t depth = dims[3];
-
- uint32_t data_size = get_data_size (info->type);
- if (data_size == 0) {
- logerr (TAG, "Invalid data size\n");
- return 0;
- }
-
- bool need_quantization = false;
- /**
- * note that we assume DATA_TYPE_SRNPU is the smallest data type that we consider.
- * Also, DATA_TYPE_SRNPU and uint8_t may be regarded as the same in the view of apps.
- */
- if (info->type != DATA_TYPE_SRNPU) {
- assert (data_size >= get_data_size (DATA_TYPE_SRNPU));
-
- if (data_size > get_data_size (DATA_TYPE_SRNPU) ||
- !(zero_point == default_quant_zero && scale == default_quant_scale))
- need_quantization = true;
- }
-
- /** check data manipulation is required */
- if (depth != 3 && depth != 64 && info->layout != DATA_LAYOUT_SRNPU) {
- uint32_t MPA_L = DATA_GRANULARITY;
- uint32_t n, h, w, d;
- uint32_t std_offset; /* standard offset in NHWC data format */
- uint32_t npu_offset; /* npu offset in NPU HW data format*/
- uint32_t src_offset;
- uint32_t dst_offset;
- uint32_t slice_size;
-
- /* @todo we currently support only NHWC */
- if (info->layout != DATA_LAYOUT_NHWC) {
- logerr (TAG, "data manipulation is supported for NHWC only\n");
+ const tensor_data_info* info = model->getOutputDataInfo (idx);
+ if (info == nullptr)
return 0;
- }
- for (n = 0; n < batch; n++) {
- for (h = 0; h < height; h++) {
- for (w = 0; w < width; w++) {
- for (d = 0; d < depth; d += MPA_L) {
- std_offset = d + depth * (w + width * (h + n * height));
- npu_offset = MPA_L * (w + width * (h + (n + d / MPA_L) * height));
- slice_size = (depth - d >= MPA_L) ? MPA_L : depth - d;
-
- if (is_input) {
- src_offset = std_offset * data_size;
- dst_offset = npu_offset;
- } else {
- src_offset = npu_offset;
- dst_offset = std_offset * data_size;
- }
-
- /* if depth is not a multiple of MPA_L, add zero paddings (not exact values) */
- if (need_quantization) {
- memcpy_with_quant (is_input, info->type, scale, zero_point,
- static_cast<char*>(dst) + dst_offset,
- static_cast<char*>(src) + src_offset,
- slice_size);
- } else {
- memcpy (
- static_cast<char*>(dst) + dst_offset,
- static_cast<char*>(src) + src_offset,
- slice_size);
- }
- }
- }
- }
- }
- } else if (need_quantization) {
- /** depth == 3 || depth == 64; special cases which can directly copy input tensor data */
- memcpy_with_quant (is_input, info->type, scale, zero_point,
- dst, src, is_input ? size / data_size : size);
- } else {
- memcpy (dst, src, size);
+ converter.setDataLayout (DATA_LAYOUT_TRIV2, info->layout);
+ converter.setDataType (meta->getOutputQuantType (idx), info->type);
+ converter.setDataDims (meta->getOutputDims (idx));
+ converter.setQuantZero (meta->getOutputQuantZero (idx));
+ converter.setQuantScale (meta->getOutputQuantScale (idx));
}
- return size;
+ return converter.perform ();
}
#else
size_t
-TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
- void *dst, void *src, size_t size)
-{
- memcpy (dst, src, size);
- return size;
-}
-
-#endif
-
-/** other device types don't have data manip impl. yet */
-
-size_t
TrinityVision2::manipulateData (const Model *model, uint32_t idx, bool is_input,
void *dst, void *src, size_t size)
{
return size;
}
-size_t
-TrinityAsr::manipulateData (const Model *model, uint32_t idx, bool is_input,
- void *dst, void *src, size_t size)
-{
- memcpy (dst, src, size);
- return size;
-}
+#endif