* @bug No known bugs except for NYI items
*/
+#include "ne-handler.h"
+
+#include <libnpuhost.h>
#include <npubinfmt.h>
#include <NPUdrvAPI.h>
#include <CommPlugin.h>
-#include "ne-utils.h"
-#include "ne-mem.h"
-#include "ne-scheduler.h"
-#include "ne-handler.h"
-
#include <string.h>
#include <assert.h>
HostHandler *handler = tdev->getHostHandler (); \
if (handler == nullptr) return -EINVAL;
-/** @brief device class. it contains all related instances */
-class Device {
- public:
- /** @brief Factory method to create a trinity device dependong on dev type */
- static Device *createInstance (dev_type device_type, int device_id);
-
- /** @brief constructor of device */
- Device (dev_type type, int id, bool need_model = true)
- : comm_(CommPlugin::getCommPlugin()), type_ (type), id_ (id),
- need_model_ (true), mode_ (NPUASYNC_WAIT), initialized_ (ATOMIC_FLAG_INIT) {}
-
- /** @brief destructor of device */
- virtual ~Device () {}
-
- /** @brief initialization */
- int init () {
- if (!initialized_.test_and_set()) {
- /** create the corresponding driver API */
- api_ = DriverAPI::createDriverAPI (type_, id_);
- if (api_.get() == nullptr) {
- initialized_.clear();
- logerr (TAG, "Failed to create driver API\n");
- return -EINVAL;
- }
-
- handler_.reset (new HostHandler (this));
- scheduler_.reset (new Scheduler (api_.get()));
- mem_ = MemAllocator::createInstance (api_.get());
- }
-
- return 0;
- }
+/** just for backward-compatability */
+npudev_h HostHandler::latest_dev_ = nullptr;
- HostHandler *getHostHandler () { return handler_.get(); }
- dev_type getType () { return type_; }
- int getID () { return id_; }
- bool needModel () { return need_model_; }
- void setAsyncMode (npu_async_mode mode) { mode_ = mode; }
+/** implement libnpuhost APIs */
- HWmem * allocMemory () { return mem_->allocMemory (); }
- void deallocMemory (int dmabuf_fd) { mem_->deallocMemory (dmabuf_fd); }
+/**
+ * @brief Returns the number of available NPU devices.
+ * @return @c The number of NPU devices.
+ * @retval 0 if no NPU devices available. if positive (number of NPUs) if NPU devices available. otherwise, a negative error value.
+ * @note the caller should call putNPUdevice() to release the device handle
+ */
+int getnumNPUdeviceByType (dev_type type)
+{
+ return HostHandler::getNumDevices (type);
+}
- /** it stops all requests in this device (choose wait or force) */
- int stop (bool force_stop) {
- Request *req = new Request (NPUINPUT_STOP);
- req->setForceStop (force_stop);
- return scheduler_->submitRequest (req);
- }
+/**
+ * @brief Returns the handle of the chosen NPU devices.
+ * @param[out] dev The NPU device handle
+ * @param[in] id The NPU id to get the handle. 0 <= id < getnumNPUdeviceByType().
+ * @return @c 0 if no error. otherwise a negative error value
+ * @note the caller should call putNPUdevice() to release the device handle
+ */
+int getNPUdeviceByType (npudev_h *dev, dev_type type, uint32_t id)
+{
+ return HostHandler::getDevice (dev, type, id);
+}
- virtual Model * registerModel (const generic_buffer *model) = 0;
- virtual int run (npu_input_opmode opmode, const Model *model,
- const input_buffers *input, npuOutputNotify cb, void *cb_data,
- uint64_t *sequence) = 0;
+/**
+ * @brief Returns the handle of an NPU device meeting the condition
+ * @param[out] dev The NPU device handle
+ * @param[in] cond The condition for device search.
+ * @return @c 0 if no error. otherwise a negative error value
+ * @note the caller should call putNPUdevice() to release the device handle
+ * @note it's not supported yet
+ */
+int getNPUdeviceByCondition(npudev_h *dev, const npucondition *cond)
+{
+ /** not implmeneted yet */
+ return getNPUdeviceByType (dev, NPUCOND_TRIV_CONN_SOCIP, 0);
+}
- protected:
- /** the device instance has ownership of all related components */
- std::unique_ptr<DriverAPI> api_; /**< device api */
- std::unique_ptr<MemAllocator> mem_; /**< memory allocator */
- std::unique_ptr<HostHandler> handler_; /**< host handler */
- std::unique_ptr<Scheduler> scheduler_; /**< scheduler */
+/**
+ * @brief release the NPU device instance obtained by getDevice ()
+ * @param[in] dev the NPU device handle
+ */
+void putNPUdevice (npudev_h dev)
+{
+ if (dev != nullptr)
+ delete static_cast<Device *> (dev);
+}
- CommPlugin& comm_; /**< plugin communicator */
+/**
+ * @brief Send the NN model to NPU.
+ * @param[in] dev The NPU device handle
+ * @param[in] modelfile The filepath to the compiled NPU NN model in any buffer_type
+ * @param[out] modelid The modelid allocated for this instance of NN model.
+ * @return @c 0 if no error. otherwise a negative error value
+ *
+ * @detail For ASR devices, which do not accept models, but have models
+ * embedded in devices, you do not need to call register and
+ * register calls for ASR are ignored.
+ *
+ * @todo Add a variation: in-memory model register.
+ */
+int registerNPUmodel (npudev_h dev, generic_buffer *modelfile, uint32_t *modelid)
+{
+ INIT_HOST_HANDLER (host_handler, dev);
- dev_type type_; /**< device type */
- int id_; /**< device id */
- bool need_model_; /**< indicates whether the device needs model */
- npu_async_mode mode_; /**< async run mode */
+ return host_handler->registerModel (modelfile, modelid);
+}
- private:
- std::atomic_flag initialized_;
-};
+/**
+ * @brief Remove the NN model from NPU
+ * @param[in] dev The NPU device handle
+ * @param[in] modelid The model to be removed from the NPU.
+ * @return @c 0 if no error. otherwise a negative error value
+ * @detail This may incur some latency with memory compatcion.
+ */
+int unregisterNPUmodel(npudev_h dev, uint32_t modelid)
+{
+ INIT_HOST_HANDLER (host_handler, dev);
-/** @brief Trinity Vision (TRIV) classs */
-class TrinityVision : public Device {
- public:
- TrinityVision (int id) : Device (NPUCOND_TRIV_CONN_SOCIP, id) {}
- ~TrinityVision () {}
+ return host_handler->unregisterModel (modelid);
+}
- static size_t manipulateData (const Model *model, uint32_t idx, bool is_input,
- void *dst, void *src, size_t size);
+/**
+ * @brief Remove all NN models from NPU
+ * @param[in] dev The NPU device handle
+ * @return @c 0 if no error. otherwise a negative error value
+ */
+int unregisterNPUmodel_all(npudev_h dev)
+{
+ INIT_HOST_HANDLER (host_handler, dev);
- Model * registerModel (const generic_buffer *model_buf) {
- Model *model = mem_->allocModel ();
- if (model == nullptr) {
- logerr (TAG, "Failed to allocate model\n");
- return nullptr;
- }
+ return host_handler->unregisterModels ();
+}
- int status;
- if (model_buf->type == BUFFER_DMABUF) {
- model->setDmabuf (model_buf->dmabuf);
- model->setOffset (model_buf->offset);
- model->setSize (model_buf->size);
- } else {
- status = model->alloc (model_buf->size);
- if (status != 0) {
- logerr (TAG, "Failed to allocate model: %d\n", status);
- goto delete_exit;
- }
+/**
+ * @brief [OPTIONAL] Set the data layout for input/output tensors
+ * @param[in] dev The NPU device handle
+ * @param[in] modelid The ID of model whose layouts are set
+ * @param[in] info_in the layout/type info for input tensors
+ * @param[in] info_out the layout/type info for output tensors
+ * @return @c 0 if no error. otherwise a negative error value
+ * @note if this function is not called, default layout/type will be used.
+ */
+int setNPU_dataInfo(npudev_h dev, uint32_t modelid,
+ tensors_data_info *info_in, tensors_data_info *info_out)
+{
+ INIT_HOST_HANDLER (host_handler, dev);
- status = comm_.extractGenericBuffer (model_buf, model->getData(), nullptr);
- if (status != 0) {
- logerr (TAG, "Failed to extract generic buffer: %d\n", status);
- goto delete_exit;
- }
- }
+ return host_handler->setDataInfo (modelid, info_in, info_out);
+}
- status = model->setMetadata (model->getData());
- if (status != 0)
- goto delete_exit;
+/**
+ * @brief [OPTIONAL] Set the inference constraint for next NPU inferences
+ * @param[in] dev The NPU device handle
+ * @param[in] modelid The target model id
+ * @param[in] constraint inference constraint (e.g., timeout, priority)
+ * @return @c 0 if no error. otherwise a negative error value
+ * @note If this function is not called, default values are used.
+ */
+int setNPU_constraint(npudev_h dev, uint32_t modelid, npuConstraint constraint)
+{
+ INIT_HOST_HANDLER (host_handler, dev);
- model_config_t config;
- config.dmabuf_id = model->getDmabuf();
- config.program_size = model->getMetadata()->getProgramSize();
- config.program_offset_addr = model->getOffset() + model->getMetadata()->getMetaSize();
- config.weight_offset_addr = config.program_offset_addr + config.program_size;
+ return host_handler->setConstraint (modelid, constraint);
+}
- status = api_->setModel (&config);
- if (status != 0)
- goto delete_exit;
+/**
+ * @brief Execute inference. Wait (block) until the output is available.
+ * @param[in] dev The NPU device handle
+ * @param[in] modelid The model to be inferred.
+ * @param[in] input The input data to be inferred.
+ * @param[out] output The output result. The caller MUST allocate appropriately before calling this.
+ * @return @c 0 if no error. otherwise a negative error value
+ *
+ * @detail This is a syntactic sugar of runNPU_async().
+ * CAUTION: There is a memcpy for the output buffer.
+ */
+int runNPU_sync(npudev_h dev, uint32_t modelid, const input_buffers *input,
+ output_buffers *output)
+{
+ INIT_HOST_HANDLER (host_handler, dev);
- return model;
+ return host_handler->runSync (modelid, input, output);
+}
-delete_exit:
- delete model;
- return nullptr;
- }
+/**
+ * @brief Invoke NPU inference. Unblocking call.
+ * @param[in] dev The NPU device handle
+ * @param[in] modelid The model to be inferred.
+ * @param[in] input The input data to be inferred.
+ * @param[in] cb The output buffer handler.
+ * @param[out] sequence The sequence number returned with runNPU_async.
+ * @param[in] data The data given as a parameter to the runNPU_async call.
+ * @param[in] mode Configures how this operation works.
+ * @return @c 0 if no error. otherwise a negative error value
+ */
+int runNPU_async(npudev_h dev, uint32_t modelid, const input_buffers *input,
+ npuOutputNotify cb, uint64_t *sequence, void *data,
+ npu_async_mode mode)
+{
+ INIT_HOST_HANDLER (host_handler, dev);
- Buffer * prepareInputBuffers (const Model *model, const input_buffers *input) {
- const Metadata *meta = model->getMetadata();
- const generic_buffer *first = &input->bufs[0];
+ return host_handler->runAsync (modelid, input, cb, data, mode, sequence);
+}
- if (meta->getInputNum() != input->num_buffers)
- return nullptr;
+/**
+ * @brief Allocate a buffer for NPU model with the requested buffer type.
+ * @param[in] dev The NPU device handle
+ * @param[in/out] Buffer the buffer pointer where memory is allocated.
+ * @return 0 if no error, otherwise a negative errno.
+ */
+int allocNPU_modelBuffer (npudev_h dev, generic_buffer *buffer)
+{
+ INIT_HOST_HANDLER (host_handler, dev);
- Buffer * buffer = mem_->allocBuffer ();
- if (buffer != nullptr) {
- int status;
-
- if (first->type == BUFFER_DMABUF) {
- buffer->setDmabuf (first->dmabuf);
- buffer->setOffset (first->offset);
- buffer->setSize (meta->getBufferSize());
- } else {
- status = buffer->alloc (meta->getBufferSize ());
- if (status != 0) {
- logerr (TAG, "Failed to allocate buffer: %d\n", status);
- goto delete_buffer;
- }
- }
+ return host_handler->allocGenericBuffer (buffer);
+}
- status = buffer->createTensors (meta);
- if (status != 0) {
- logerr (TAG, "Failed to allocate tensors: %s\n", status);
- goto delete_buffer;
- }
- }
+/**
+ * @brief Free the buffer and remove the address mapping.
+ * @param[in] dev The NPU device handle
+ * @param[in] buffer the model buffer
+ * @return 0 if no error, otherwise a negative errno.
+ */
+int cleanNPU_modelBuffer (npudev_h dev, generic_buffer *buffer)
+{
+ INIT_HOST_HANDLER (host_handler, dev);
- return buffer;
+ return host_handler->deallocGenericBuffer (buffer);
+}
-delete_buffer:
- delete buffer;
- return nullptr;
- }
+/**
+ * @brief Allocate a buffer for NPU input with the requested buffer type.
+ * @param[in] dev The NPU device handle
+ * @param[in/out] Buffer the buffer pointer where memory is allocated.
+ * @return 0 if no error, otherwise a negative errno.
+ * @note please utilize allocInputBuffers() for multiple input tensors because subsequent
+ * calls of allocInputBuffer() don't gurantee contiguous allocations between them.
+ */
+int allocNPU_inputBuffer (npudev_h dev, generic_buffer *buffer)
+{
+ INIT_HOST_HANDLER (host_handler, dev);
- int run (npu_input_opmode opmode, const Model *model,
- const input_buffers *input, npuOutputNotify cb, void *cb_data,
- uint64_t *sequence) {
- if (opmode != NPUINPUT_HOST)
- return -EINVAL;
+ return host_handler->allocGenericBuffer (buffer);
+}
- Buffer *buffer = prepareInputBuffers (model, input);
- if (buffer == nullptr)
- return -EINVAL;
+/**
+ * @brief Free the buffer and remove the address mapping.
+ * @param[in] dev The NPU device handle
+ * @param[in] buffer the input buffer
+ * @return 0 if no error, otherwise a negative errno.
+ */
+int cleanNPU_inputBuffer (npudev_h dev, generic_buffer *buffer)
+{
+ INIT_HOST_HANDLER (host_handler, dev);
- for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
- auto func = std::bind (TrinityVision::manipulateData, model, idx, true,
- std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
- int status = comm_.extractGenericBuffer (&input->bufs[idx],
- buffer->getInputTensor(idx)->getData(), func);
- if (status != 0) {
- logerr (TAG, "Failed to feed input buffer: %d\n", status);
- return status;
- }
- }
+ return host_handler->deallocGenericBuffer (buffer);
+}
- /** this device uses CMA buffer */
+/**
+ * @brief Allocate input buffers, which have multiple instances of generic_buffer
+ * @param[in] dev The NPU device handle
+ * @param[in/out] input input buffers.
+ * @return 0 if no error, otherwise a negative errno.
+ * @note it reuses allocInputBuffer().
+ * @details in case of BUFFER_DMABUF, this function can be used to gurantee physically-contiguous
+ * memory mapping for multiple tensors (in a single inference, not batch size).
+ */
+int allocNPU_inputBuffers (npudev_h dev, input_buffers * input)
+{
+ INIT_HOST_HANDLER (host_handler, dev);
- Request *req = new Request (opmode);
- req->setModel (model);
- req->setBuffer (buffer);
- req->setCallback (std::bind (&TrinityVision::callback, this, req, cb, cb_data));
+ return host_handler->allocGenericBuffer (input);
+}
- if (sequence)
- *sequence = req->getID();
+/**
+ * @brief Free input buffers allocated by allocInputBuffers().
+ * @param[in] dev The NPU device handle
+ * @param[in/out] input input buffers.
+ * @note it reuses cleanInputbuffer().
+ * @return 0 if no error, otherwise a negative errno.
+ */
+int cleanNPU_inputBuffers (npudev_h dev, input_buffers * input)
+{
+ INIT_HOST_HANDLER (host_handler, dev);
- return scheduler_->submitRequest (req);
- }
+ return host_handler->deallocGenericBuffer (input);
+}
- void callback (Request *req, npuOutputNotify cb, void *cb_data) {
- const Model *model = req->getModel ();
- Buffer *buffer = req->getBuffer ();
- output_buffers output = {
- .num_buffers = buffer->getOutputNum ()
- };
-
- for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
- uint32_t output_tensor_size = model->getOutputTensorSize (idx);
-
- output.bufs[idx].type = BUFFER_MAPPED;
- output.bufs[idx].size = output_tensor_size;
- /** user needs to free this */
- output.bufs[idx].addr = malloc (output_tensor_size);
-
- auto func = std::bind (TrinityVision::manipulateData, model, idx, false,
- std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
- int status = comm_.insertGenericBuffer (buffer->getOutputTensor(idx)->getData(),
- &output.bufs[idx], func);
- if (status != 0) {
- logerr (TAG, "Failed to return output buffer: %d\n", status);
- }
- }
-
- cb (&output, req->getID(), cb_data);
- }
-};
-
-/** @brief Trinity Vision2 (TRIV2) classs */
-class TrinityVision2 : public Device {
- public:
- TrinityVision2 (int id) : Device (NPUCOND_TRIV2_CONN_SOCIP, id) {}
- ~TrinityVision2 () {}
-
- static size_t manipulateData (const Model *model, uint32_t idx, bool is_input,
- void *dst, void *src, size_t size) {
- memcpy (dst, src, size);
- return size;
- }
+/**
+ * @brief Get metadata for NPU model
+ * @param[in] model The path of model binary file
+ * @param[in] need_extra whether you want to extract the extra data in metadata
+ * @return the metadata structure to be filled if no error, otherwise nullptr
+ *
+ * @note For most npu-engine users, the extra data is not useful because it will be
+ * used for second-party users (e.g., compiler, simulator).
+ * Also, the caller needs to free the metadata.
+ *
+ * @note the caller needs to free the metadata
+ */
+npubin_meta * getNPUmodel_metadata (const char *model, bool need_extra)
+{
+ npubin_meta *meta;
+ FILE *fp;
+ size_t ret;
- Model * registerModel (const generic_buffer *model_buf) {
- /** TODO: model's weight values are stored in segments */
- return nullptr;
- }
+ if (!model)
+ return nullptr;
- int run (npu_input_opmode opmode, const Model *model,
- const input_buffers *input, npuOutputNotify cb, void *cb_data,
- uint64_t *sequence) {
- if (opmode != NPUINPUT_HOST && opmode != NPUINPUT_HW_RECURRING)
- return -EINVAL;
+ fp = fopen (model, "rb");
+ if (!fp) {
+ logerr (TAG, "Failed to open the model binary: %d\n", -errno);
+ return nullptr;
+ }
- /** this device uses segment table */
+ meta = (npubin_meta *) malloc (NPUBIN_META_SIZE);
+ if (!meta) {
+ logerr (TAG, "Failed to allocate metadata\n");
+ goto exit_err;
+ }
- Request *req = new Request (opmode);
- req->setModel (model);
-#if 0
- req->setSegmentTable (segt);
-#endif
- req->setCallback (std::bind (&TrinityVision2::callback, this, req, cb, cb_data));
+ ret = fread (meta, 1, NPUBIN_META_SIZE, fp);
+ if (ret != NPUBIN_META_SIZE) {
+ logerr (TAG, "Failed to read the metadata\n");
+ goto exit_free;
+ }
- if (sequence)
- *sequence = req->getID();
+ if (!CHECK_NPUBIN (meta->magiccode)) {
+ logerr (TAG, "Invalid metadata provided\n");
+ goto exit_free;
+ }
- return scheduler_->submitRequest (req);
- }
+ if (need_extra && NPUBIN_META_EXTRA (meta->magiccode) > 0) {
+ npubin_meta *new_meta;
- void callback (Request *req, npuOutputNotify cb, void *cb_data) {
+ new_meta = (npubin_meta *) realloc (meta, NPUBIN_META_TOTAL_SIZE(meta->magiccode));
+ if (!new_meta) {
+ logerr (TAG, "Failed to allocate extra metadata\n");
+ goto exit_free;
}
-};
-/** @brief Trinity Asr (TRIA) classs */
-class TrinityAsr : public Device {
- public:
- TrinityAsr (int id) : Device (NPUCOND_TRIA_CONN_SOCIP, id, false) {}
- ~TrinityAsr () {}
-
- static size_t manipulateData (const Model *model, uint32_t idx, bool is_input,
- void *dst, void *src, size_t size) {
- memcpy (dst, src, size);
- return size;
+ ret = fread (new_meta->reserved_extra, 1, NPUBIN_META_EXTRA_SIZE (meta->magiccode), fp);
+ if (ret != NPUBIN_META_EXTRA_SIZE (meta->magiccode)) {
+ logerr (TAG, "Invalid extra metadata provided\n");
+ free (new_meta);
+ goto exit_err;
}
- Model * registerModel (const generic_buffer *model_buf) { return nullptr; }
-
- int run (npu_input_opmode opmode, const Model *model,
- const input_buffers *input, npuOutputNotify cb, void *cb_data,
- uint64_t *sequence) {
- if (opmode != NPUINPUT_HOST)
- return -EINVAL;
-
- /** ASR does not require model and support only a single tensor */
- const generic_buffer *first_buf = &input->bufs[0];
- Buffer * buffer = mem_->allocBuffer ();
- int status;
- if (first_buf->type == BUFFER_DMABUF) {
- buffer->setDmabuf (first_buf->dmabuf);
- buffer->setOffset (first_buf->offset);
- buffer->setSize (first_buf->size);
- } else {
- status = buffer->alloc (first_buf->size);
- if (status != 0) {
- delete buffer;
- return status;
- }
- }
- buffer->createTensors ();
-
- status = comm_.extractGenericBuffer (first_buf,
- buffer->getInputTensor(0)->getData(), nullptr);
- if (status != 0)
- return status;
-
- Request *req = new Request (opmode);
- req->setBuffer (buffer);
- req->setCallback (std::bind (&TrinityAsr::callback, this, req, cb, cb_data));
+ meta = new_meta;
+ }
- if (sequence)
- *sequence = req->getID();
+ fclose (fp);
- return scheduler_->submitRequest (req);
- }
+ return meta;
- void callback (Request *req, npuOutputNotify cb, void *cb_data) {
- }
-};
+exit_free:
+ free (meta);
+exit_err:
+ fclose (fp);
-#ifdef ENABLE_MANIP
+ return nullptr;
+}
-#define do_quantized_memcpy(type) do {\
- idx = 0;\
- if (quant) {\
- while (idx < num_elems) {\
- val = ((type *) src)[idx];\
- val = val / _scale;\
- val += _zero_point;\
- val = (val > 255.0) ? 255.0 : 0.0;\
- ((uint8_t *) dst)[idx++] = (uint8_t) val;\
- }\
- } else {\
- while (idx < num_elems) {\
- val = *(uint8_t *) src;\
- val -= _zero_point;\
- val *= _scale;\
- ((type *) dst)[idx++] = (type) val;\
- dst = (void*)(((uint8_t *) dst) + data_size);\
- src = (void*)(((uint8_t *) src) + 1);\
- }\
- }\
- } while (0)
+/** deprecated buffer APIs; please use the above APIs */
/**
- * @brief memcpy during quantization
+ * @brief Returns the number of NPU devices (TRIV).
*/
-static void memcpy_with_quant (bool quant, data_type type, float scale, uint32_t zero_point,
- void *dst, const void *src, uint32_t num_elems)
+int getnumNPUdevice (void)
{
- double _scale = (double) scale;
- double _zero_point = (double) zero_point;
- double val;
- uint32_t data_size = get_data_size (type);
- uint32_t idx;
-
- switch (type) {
- case DATA_TYPE_INT8:
- do_quantized_memcpy (int8_t);
- break;
- case DATA_TYPE_UINT8:
- do_quantized_memcpy (uint8_t);
- break;
- case DATA_TYPE_INT16:
- do_quantized_memcpy (int16_t);
- break;
- case DATA_TYPE_UINT16:
- do_quantized_memcpy (uint16_t);
- break;
- case DATA_TYPE_INT32:
- do_quantized_memcpy (int32_t);
- break;
- case DATA_TYPE_UINT32:
- do_quantized_memcpy (uint32_t);
- break;
- case DATA_TYPE_INT64:
- do_quantized_memcpy (int64_t);
- break;
- case DATA_TYPE_UINT64:
- do_quantized_memcpy (uint64_t);
- break;
- case DATA_TYPE_FLOAT32:
- do_quantized_memcpy (float);
- break;
- case DATA_TYPE_FLOAT64:
- do_quantized_memcpy (double);
- break;
- default:
- logerr (TAG, "Unsupported datatype %d\n", type);
- }
+ logwarn (TAG, "deprecated. Please use getnumNPUdeviceByType ()\n");
+ return getnumNPUdeviceByType (NPUCOND_TRIV_CONN_SOCIP);
}
/**
- * @brief perform data manipulation
- * @param[in] model model instance
- * @param[in] idx tensor index
- * @param[in] is_input indicate it's input manipulation
- * @param[out] dst destination buffer
- * @param[in] src source buffer (feature map)
- * @param[in] size size to be copied
- * @return size of memory copy if no error, otherwise zero
- *
- * @note the input data format should be NHWC
- * @detail rules for the memory address of activations in NPU HW.
- * (https://code.sec.samsung.net/confluence/pages/viewpage.action?pageId=146491864)
- *
- * 1) Special case (depth == 3)
- * - addr(x,y,z) = addr(0,0,0) + (z) + 3 * (x + width * y)
- *
- * 2) Common case
- * - addr(x,y,z) = addr(0,0,0) + (z % MPA_L) + MPA_L * (x + width * (y + height * (z / MPA_L)))
- *
- * Thus, if depth is not a multiple of MPA_L (i.e., 64), zero padding is required
+ * @brief Returns the list of ASR devices (TRIA)
*/
-size_t
-TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
- void *dst, void *src, size_t size)
+int getnumASRdevice (void)
{
- const Metadata *meta = model->getMetadata();
- const tensor_data_info* info;
- const uint32_t *dims;
- uint32_t zero_point;
- float scale;
-
- /** extract required information from the metadata */
- if (is_input) {
- if (idx >= meta->getInputNum()) {
- logerr (TAG, "Wrong information for input tensors in metadata\n");
- return 0;
- }
-
- info = model->getInputDataInfo (idx);
- dims = meta->getInputDims (idx);
- zero_point = meta->getInputQuantZero (idx);
- scale = meta->getInputQuantScale (idx);
- } else {
- if (idx >= meta->getOutputNum()) {
- logerr (TAG, "Wrong information for output tensors in metadata\n");
- return 0;
- }
-
- info = model->getOutputDataInfo (idx);
- dims = meta->getOutputDims (idx);
- zero_point = meta->getOutputQuantZero (idx);
- scale = meta->getOutputQuantScale (idx);
- }
-
- if (info == nullptr) {
- logerr (TAG, "Unmatched tensors info\n");
- return 0;
- }
-
- uint32_t batch = dims[0];
- uint32_t height = dims[1];
- uint32_t width = dims[2];
- uint32_t depth = dims[3];
-
- uint32_t data_size = get_data_size (info->type);
- if (data_size == 0) {
- logerr (TAG, "Invalid data size\n");
- return 0;
- }
-
- bool need_quantization = false;
- /**
- * note that we assume DATA_TYPE_SRNPU is the smallest data type that we consider.
- * Also, DATA_TYPE_SRNPU and uint8_t may be regarded as the same in the view of apps.
- */
- if (info->type != DATA_TYPE_SRNPU) {
- assert (data_size >= get_data_size (DATA_TYPE_SRNPU));
-
- if (data_size > get_data_size (DATA_TYPE_SRNPU) ||
- !(zero_point == DEFAULT_ZERO_POINT && scale == DEFAULT_SCALE))
- need_quantization = true;
- }
-
- /** check data manipulation is required */
- if (depth != 3 && depth != 64 && info->layout != DATA_LAYOUT_SRNPU) {
- uint32_t MPA_L = DATA_GRANULARITY;
- uint32_t n, h, w, d;
- uint32_t std_offset; /* standard offset in NHWC data format */
- uint32_t npu_offset; /* npu offset in NPU HW data format*/
- uint32_t src_offset;
- uint32_t dst_offset;
- uint32_t slice_size;
-
- /* @todo we currently support only NHWC */
- if (info->layout != DATA_LAYOUT_NHWC) {
- logerr (TAG, "data manipulation is supported for NHWC only\n");
- return -EINVAL;
- }
-
- for (n = 0; n < batch; n++) {
- for (h = 0; h < height; h++) {
- for (w = 0; w < width; w++) {
- for (d = 0; d < depth; d += MPA_L) {
- std_offset = d + depth * (w + width * (h + n * height));
- npu_offset = MPA_L * (w + width * (h + (n + d / MPA_L) * height));
- slice_size = (depth - d >= MPA_L) ? MPA_L : depth - d;
-
- if (is_input) {
- src_offset = std_offset * data_size;
- dst_offset = npu_offset;
- } else {
- src_offset = npu_offset;
- dst_offset = std_offset * data_size;
- }
-
- /* if depth is not a multiple of MPA_L, add zero paddings (not exact values) */
- if (need_quantization) {
- memcpy_with_quant (is_input, info->type, scale, zero_point,
- static_cast<char*>(dst) + dst_offset,
- static_cast<char*>(src) + src_offset,
- slice_size);
- } else {
- memcpy (
- static_cast<char*>(dst) + dst_offset,
- static_cast<char*>(src) + src_offset,
- slice_size);
- }
- }
- }
- }
- }
- } else if (need_quantization) {
- /** depth == 3 || depth == 64; special cases which can directly copy input tensor data */
- if (is_input)
- size = size / data_size;
+ logwarn (TAG, "deprecated. Please use getnumNPUdeviceByType ()\n");
+ return getnumNPUdeviceByType (NPUCOND_TRIA_CONN_SOCIP);
+}
- memcpy_with_quant (is_input, info->type, scale, zero_point,
- dst, src, size);
- } else {
- memcpy (dst, src, size);
- }
+/**
+ * @brief Returns the handle of the chosen TRIV device.
+ */
+int getNPUdevice (npudev_h *dev, uint32_t id)
+{
+ logwarn (TAG, "deprecated. Please use getNPUdeviceByType ()\n");
+ return getNPUdeviceByType (dev, NPUCOND_TRIV_CONN_SOCIP, id);
+}
- return 0;
+/**
+ * @brief Returns the handle of the chosen TRIA device.
+ */
+int getASRdevice (npudev_h *dev, uint32_t id)
+{
+ logwarn (TAG, "deprecated. Please use getNPUdeviceByType ()\n");
+ return getNPUdeviceByType (dev, NPUCOND_TRIA_CONN_SOCIP, id);
}
-#else
+/** @brief deprecated */
+int allocModelBuffer (generic_buffer *buffer)
+{
+ logwarn (TAG, "deprecated. Please use allocNPU_modelBuffer\n");
+ return allocNPU_modelBuffer (HostHandler::getLatestDevice(), buffer);
+}
-size_t
-TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
- void *dst, void *src, size_t size)
+/** @brief deprecated */
+int cleanModelBuffer (generic_buffer *buffer)
{
- memcpy (dst, src, size);
- return size;
+ logwarn (TAG, "deprecated. Please use cleanNPU_modelBuffer\n");
+ return allocNPU_modelBuffer (HostHandler::getLatestDevice(), buffer);
}
-#endif
+/** @brief deprecated */
+int allocInputBuffer (generic_buffer *buffer)
+{
+ logwarn (TAG, "deprecated. Please use allocNPU_inputBuffer\n");
+ return allocNPU_inputBuffer (HostHandler::getLatestDevice(), buffer);
+}
-/**
- * @brief create device instance depending on device type and id
- * @param[in] type device type
- * @param[in] id device id
- * @return device instance
- */
-Device *
-Device::createInstance (dev_type type, int id)
+/** @brief deprecated */
+int cleanInputBuffer (generic_buffer *buffer)
{
- Device *device = nullptr;
+ logwarn (TAG, "deprecated. Please use cleanNPU_inputBuffer\n");
+ return cleanNPU_inputBuffer (HostHandler::getLatestDevice(), buffer);
+}
- switch (type & DEVICETYPE_MASK) {
- case DEVICETYPE_TRIV:
- device = new TrinityVision (id);
- break;
- case DEVICETYPE_TRIV2:
- device = new TrinityVision2 (id);
- break;
- case DEVICETYPE_TRIA:
- device = new TrinityAsr (id);
- break;
- default:
- break;
- }
+/** @brief deprecated */
+int allocInputBuffers (input_buffers * input)
+{
+ logwarn (TAG, "deprecated. Please use allocNPU_inputBuffers\n");
+ return allocNPU_inputBuffers (HostHandler::getLatestDevice(), input);
+}
- if (device != nullptr && device->init () != 0) {
- delete device;
- device = nullptr;
+/** @brief deprecated */
+int cleanInputBuffers (input_buffers * input)
+{
+ logwarn (TAG, "deprecated. Please use cleanNPU_inputBuffers\n");
+ return cleanNPU_inputBuffers (HostHandler::getLatestDevice(), input);
+}
+
+/** @brief deprecated */
+int allocNPUBuffer (uint64_t size, buffer_types type,
+ const char * filepath, generic_buffer *buffer)
+{
+ if (buffer) {
+ buffer->size = size;
+ buffer->type = type;
+ buffer->filepath = filepath;
}
- return device;
+ logwarn (TAG, "deprecated. Please use allocNPU_* APIs\n");
+ return allocModelBuffer (buffer);
+}
+
+/** @brief deprecated */
+int cleanNPUBuffer (generic_buffer * buffer)
+{
+ logwarn (TAG, "deprecated. Please use cleanNPU_* APIs\n");
+ return cleanModelBuffer (buffer);
}
+/** implement methods of HostHandler class */
+
/** @brief host handler constructor */
HostHandler::HostHandler (Device *device)
: device_(device),
int
HostHandler::registerModel (generic_buffer *model_buf, uint32_t *modelid)
{
- Model *model = device_->registerModel (model_buf);
- if (model == nullptr) {
- logerr (TAG, "Failed to register model\n");
+ if (model_buf == nullptr || modelid == nullptr) {
+ logerr (TAG, "Invalid arguments given\n");
return -EINVAL;
}
- int status = models_.insert (model->getID(), model);
+ Model *model = nullptr;
+ int status = device_->setModel (model_buf, &model);
+ if (status != 0) {
+ logerr (TAG, "Failed to set model: %d\n", status);
+ return status;
+ }
+
+ assert (model != nullptr);
+
+ status = models_.insert (model->getID(), model);
if (status != 0) {
logerr (TAG, "Failed to insert model id\n");
delete model;
}
void callback (output_buffers *output, uint64_t sequence) {
- /** just copy internal variables of output buffers */
- memcpy (output_, output, sizeof (output_buffers));
+ if (output_ != nullptr) {
+ /** just copy internal variables of output buffers */
+ memcpy (output_, output, sizeof (output_buffers));
+ }
done_ = true;
cv_.notify_one ();
}
if (buffer == NULL)
return -EINVAL;
+ if (buffer->size == 0) {
+ logerr (TAG, "Invalid size\n");
+ return -EINVAL;
+ }
+
if (buffer->size > UINT32_MAX) {
logerr (TAG, "Don't support such a large size");
return -ENOMEM;
}
- if (buffer->type == BUFFER_FILE) {
- /* nothing to do */
- if (buffer->filepath == nullptr)
+ switch (buffer->type) {
+ case BUFFER_FILE:
+ /* nothing to do */
+ if (buffer->filepath == nullptr)
+ return -EINVAL;
+ break;
+ case BUFFER_MAPPED:
+ case BUFFER_DMABUF:
+ {
+ /* now, npu-engine always provides dmabuf-based allocation */
+ HWmem *hwmem;
+ int status = device_->allocMemory (buffer->size, &hwmem);
+ if (status != 0)
+ return status;
+
+ buffer->dmabuf = hwmem->getDmabuf();
+ buffer->offset = hwmem->getOffset();
+ buffer->addr = hwmem->getData();
+ } break;
+ default:
return -EINVAL;
- } else {
- /* now, npu-engine always provides dmabuf-based allocation */
- HWmem *hwmem = device_->allocMemory ();
- if (hwmem == nullptr || hwmem->alloc (buffer->size) < 0)
- return -ENOMEM;
-
- buffer->dmabuf = hwmem->getDmabuf();
- buffer->offset = hwmem->getOffset();
- buffer->addr = hwmem->getData();
}
+
return 0;
}
if (buffer == NULL)
return -EINVAL;
- if (buffer->type != BUFFER_FILE)
- device_->deallocMemory (buffer->dmabuf);
+ int status;
+ switch (buffer->type) {
+ case BUFFER_FILE:
+ status = 0; /** always true cuz nothing to do */
+ break;
+ case BUFFER_MAPPED:
+ case BUFFER_DMABUF:
+ status = device_->deallocMemory (buffer->dmabuf);
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ }
- return 0;
+ return status;
}
/**
for (uint32_t idx = 1; idx < buffers->num_buffers; idx++) {
buffers->bufs[idx].dmabuf = buffers->bufs[0].dmabuf;
buffers->bufs[idx].offset = buffers->bufs[0].offset + offset;
+ buffers->bufs[idx].addr = static_cast<char*>(buffers->bufs[0].addr) + offset;
buffers->bufs[idx].type = type;
offset += buffers->bufs[idx].size;
}
+ buffers->bufs[0].size = first_size;
+
return 0;
}
return deallocGenericBuffer (&buffers->bufs[0]);
}
-/** just for backward-compatability */
-npudev_h HostHandler::latest_dev_ = nullptr;
-
-/** implementation of libnpuhost APIs */
+/** implement methods of Device class */
-/**
- * @brief Returns the number of available NPU devices.
- * @return @c The number of NPU devices.
- * @retval 0 if no NPU devices available. if positive (number of NPUs) if NPU devices available. otherwise, a negative error value.
- * @note the caller should call putNPUdevice() to release the device handle
- */
-int getnumNPUdeviceByType (dev_type type)
+/** @brief constructor of device */
+Device::Device (dev_type type, int id, bool need_model)
+ : comm_ (CommPlugin::getCommPlugin()), type_ (type), id_ (id), need_model_ (true),
+ mode_ (NPUASYNC_WAIT), initialized_ (false), atomic_flag_ (ATOMIC_FLAG_INIT)
{
- return HostHandler::getNumDevices (type);
}
/**
- * @brief Returns the handle of the chosen NPU devices.
- * @param[out] dev The NPU device handle
- * @param[in] id The NPU id to get the handle. 0 <= id < getnumNPUdeviceByType().
- * @return @c 0 if no error. otherwise a negative error value
- * @note the caller should call putNPUdevice() to release the device handle
+ * @brief create device instance depending on device type and id
+ * @param[in] type device type
+ * @param[in] id device id
+ * @return device instance
*/
-int getNPUdeviceByType (npudev_h *dev, dev_type type, uint32_t id)
+Device *
+Device::createInstance (dev_type type, int id)
{
- return HostHandler::getDevice (dev, type, id);
-}
+ Device *device = nullptr;
-/**
- * @brief Returns the handle of an NPU device meeting the condition
- * @param[out] dev The NPU device handle
- * @param[in] cond The condition for device search.
- * @return @c 0 if no error. otherwise a negative error value
- * @note the caller should call putNPUdevice() to release the device handle
- * @note it's not supported yet
- */
-int getNPUdeviceByCondition(npudev_h *dev, const npucondition *cond)
-{
- /** not implmeneted yet */
- return getNPUdeviceByType (dev, NPUCOND_TRIV_CONN_SOCIP, 0);
-}
+ switch (type & DEVICETYPE_MASK) {
+ case DEVICETYPE_TRIV:
+ device = new TrinityVision (id);
+ break;
+ case DEVICETYPE_TRIV2:
+ device = new TrinityVision2 (id);
+ break;
+ case DEVICETYPE_TRIA:
+ device = new TrinityAsr (id);
+ break;
+ default:
+ break;
+ }
-/**
- * @brief release the NPU device instance obtained by getDevice ()
- * @param[in] dev the NPU device handle
- */
-void putNPUdevice (npudev_h dev)
-{
- if (dev != nullptr)
- delete static_cast<Device *> (dev);
+ if (device != nullptr && device->init () != 0) {
+ delete device;
+ device = nullptr;
+ }
+
+ return device;
}
/**
- * @brief Send the NN model to NPU.
- * @param[in] dev The NPU device handle
- * @param[in] modelfile The filepath to the compiled NPU NN model in any buffer_type
- * @param[out] modelid The modelid allocated for this instance of NN model.
- * @return @c 0 if no error. otherwise a negative error value
- *
- * @detail For ASR devices, which do not accept models, but have models
- * embedded in devices, you do not need to call register and
- * register calls for ASR are ignored.
- *
- * @todo Add a variation: in-memory model register.
+ * @brief device initialization
+ * @return 0 if no error, otherwise a negative errno
+ * @note Init failures come from createDriverAPI() only.
*/
-int registerNPUmodel (npudev_h dev, generic_buffer *modelfile, uint32_t *modelid)
+int
+Device::init ()
{
- INIT_HOST_HANDLER (host_handler, dev);
+ /** should be initilizaed only once */
+ if (!atomic_flag_.test_and_set()) {
+ /** create the corresponding driver API */
+ api_ = DriverAPI::createDriverAPI (type_, id_);
+ if (api_.get() == nullptr) {
+ atomic_flag_.clear();
+ logerr (TAG, "Failed to create driver API\n");
+ return -EINVAL;
+ }
- return host_handler->registerModel (modelfile, modelid);
+ handler_.reset (new HostHandler (this));
+ scheduler_.reset (new Scheduler (api_.get()));
+ mem_ = MemAllocator::createInstance (api_.get());
+
+ initialized_ = true; /** c++11 does not provide test() of atomic flag */
+ }
+
+ return 0;
}
/**
- * @brief Remove the NN model from NPU
- * @param[in] dev The NPU device handle
- * @param[in] modelid The model to be removed from the NPU.
- * @return @c 0 if no error. otherwise a negative error value
- * @detail This may incur some latency with memory compatcion.
+ * @brief stop all requests from this device
+ * @param[in] force_stop indicate the schedduler waits until to handle previous requests
+ * @return 0 if no error, otherwise a negative errno
*/
-int unregisterNPUmodel(npudev_h dev, uint32_t modelid)
+int
+Device::stop (bool force_stop)
{
- INIT_HOST_HANDLER (host_handler, dev);
+ if (!initialized ()) {
+ logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
+ return -EPERM;
+ }
- return host_handler->unregisterModel (modelid);
+ Request *req = new Request (NPUINPUT_STOP);
+ req->setForceStop (force_stop);
+ return scheduler_->submitRequest (req);
}
/**
- * @brief Remove all NN models from NPU
- * @param[in] dev The NPU device handle
- * @return @c 0 if no error. otherwise a negative error value
+ * @brief allocate generic memory buffer
+ * @param[out] hwmem_ptr hwmem instance pointer
+ * @return 0 if no error, otherwise a negative errno
*/
-int unregisterNPUmodel_all(npudev_h dev)
+int
+Device::allocMemory (size_t size, HWmem ** hwmem_ptr)
{
- INIT_HOST_HANDLER (host_handler, dev);
+ if (!initialized ()) {
+ logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
+ return -EPERM;
+ }
- return host_handler->unregisterModels ();
+ if (size == 0 || hwmem_ptr == nullptr)
+ return -EINVAL;
+
+ HWmem *hwmem = mem_->allocMemory (size);
+ if (hwmem == nullptr)
+ return -ENOMEM;
+
+ *hwmem_ptr = hwmem;
+ return 0;
}
/**
- * @brief [OPTIONAL] Set the data layout for input/output tensors
- * @param[in] dev The NPU device handle
- * @param[in] modelid The ID of model whose layouts are set
- * @param[in] info_in the layout/type info for input tensors
- * @param[in] info_out the layout/type info for output tensors
- * @return @c 0 if no error. otherwise a negative error value
- * @note if this function is not called, default layout/type will be used.
+ * @brief deallocate generic memory buffer
+ * @param[in] dmabuf_fd dmabuf file descriptor
+ * @return 0 if no error, otherwise a negative errno
*/
-int setNPU_dataInfo(npudev_h dev, uint32_t modelid,
- tensors_data_info *info_in, tensors_data_info *info_out)
+int
+Device::deallocMemory (int dmabuf_fd)
{
- INIT_HOST_HANDLER (host_handler, dev);
+ if (!initialized ()) {
+ logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
+ return -EPERM;
+ }
- return host_handler->setDataInfo (modelid, info_in, info_out);
+ return mem_->deallocMemory (dmabuf_fd);
}
/**
- * @brief [OPTIONAL] Set the inference constraint for next NPU inferences
- * @param[in] dev The NPU device handle
- * @param[in] modelid The target model id
- * @param[in] constraint inference constraint (e.g., timeout, priority)
- * @return @c 0 if no error. otherwise a negative error value
- * @note If this function is not called, default values are used.
+ * @brief extract the buffer instance from input generic buffers
+ * @param[in] meta the model metadata
+ * @param[in] input the input generic buffers
+ * @return the buffer instance
*/
-int setNPU_constraint(npudev_h dev, uint32_t modelid, npuConstraint constraint)
+Buffer *
+TrinityVision::prepareInputBuffers (const Metadata *meta, const input_buffers *input)
{
- INIT_HOST_HANDLER (host_handler, dev);
+ if (meta == nullptr ||
+ meta->getInputNum() != input->num_buffers) {
+ logerr (TAG, "Invalid metadata info provided\n");
+ return nullptr;
+ }
- return host_handler->setConstraint (modelid, constraint);
+ Buffer * buffer = mem_->allocBuffer ();
+ if (buffer != nullptr) {
+ const generic_buffer *first = &input->bufs[0];
+ if (first->type == BUFFER_DMABUF) {
+ buffer->setDmabuf (first->dmabuf);
+ buffer->setOffset (first->offset);
+ buffer->setSize (meta->getBufferSize());
+ } else {
+ int status = buffer->alloc (meta->getBufferSize ());
+ if (status != 0) {
+ logerr (TAG, "Failed to allocate buffer: %d\n", status);
+ delete buffer;
+ return nullptr;
+ }
+ }
+ }
+
+ try {
+ buffer->createTensors (meta);
+ } catch (std::bad_alloc& bad) {
+ logerr (TAG, "Failed to allocate buffer: No enough memory\n");
+ delete buffer;
+ buffer = nullptr;
+ } catch (std::exception& exp) {
+ logerr (TAG, "Failed to allocate buffer: %s\n", exp.what());
+ delete buffer;
+ buffer = nullptr;
+ }
+ return buffer;
}
/**
- * @brief Execute inference. Wait (block) until the output is available.
- * @param[in] dev The NPU device handle
- * @param[in] modelid The model to be inferred.
- * @param[in] input The input data to be inferred.
- * @param[out] output The output result. The caller MUST allocate appropriately before calling this.
- * @return @c 0 if no error. otherwise a negative error value
- *
- * @detail This is a syntactic sugar of runNPU_async().
- * CAUTION: There is a memcpy for the output buffer.
+ * @brief implementation of TRIV's setModel ()
+ * @param[in] model_buf the model generic buffer
+ * @param[out] model the model instance
+ * @return 0 if no error, otherwise a negative errno
*/
-int runNPU_sync(npudev_h dev, uint32_t modelid, const input_buffers *input,
- output_buffers *output)
+int
+TrinityVision::setModel (const generic_buffer *model_buf, Model ** model_ptr)
{
- INIT_HOST_HANDLER (host_handler, dev);
+ if (!initialized ()) {
+ logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
+ return -EPERM;
+ }
- return host_handler->runSync (modelid, input, output);
+ if (model_buf == nullptr || model_ptr == nullptr)
+ return -EINVAL;
+
+ Model *model = mem_->allocModel ();
+ if (model == nullptr) {
+ logerr (TAG, "Failed to allocate model\n");
+ return -ENOMEM;
+ }
+
+ int status = 0;
+ switch (model_buf->type) {
+ case BUFFER_DMABUF:
+ model->setDmabuf (model_buf->dmabuf);
+ model->setOffset (model_buf->offset);
+ model->setSize (model_buf->size);
+ break;
+ case BUFFER_FILE:
+ case BUFFER_MAPPED:
+ status = model->alloc (model_buf->size);
+ if (status != 0) {
+ logerr (TAG, "Failed to allocate model: %d\n", status);
+ goto delete_exit;
+ }
+
+ status = comm_.extractGenericBuffer (model_buf, model->getData(), nullptr);
+ if (status != 0) {
+ logerr (TAG, "Failed to extract generic buffer: %d\n", status);
+ goto delete_exit;
+ }
+ break;
+ default:
+ status = -EINVAL;
+ goto delete_exit;
+ }
+
+ status = model->setMetadata (model->getData());
+ if (status != 0)
+ goto delete_exit;
+
+ model_config_t config;
+ config.dmabuf_id = model->getDmabuf();
+ config.program_size = model->getMetadata()->getProgramSize();
+ config.program_offset_addr = model->getOffset() + model->getMetadata()->getMetaSize();
+ config.weight_offset_addr = config.program_offset_addr + config.program_size;
+
+ status = api_->setModel (&config);
+ if (status != 0)
+ goto delete_exit;
+
+ *model_ptr = model;
+ return status;
+
+delete_exit:
+ delete model;
+ return status;
}
+
/**
- * @brief Invoke NPU inference. Unblocking call.
- * @param[in] dev The NPU device handle
- * @param[in] modelid The model to be inferred.
- * @param[in] input The input data to be inferred.
- * @param[in] cb The output buffer handler.
+ * @brief implementation of TRIV's run()
+ * @param[in] opmode input opmode
+ * @param[in] model the model instance
+ * @param[in] input generic buffers of input data
+ * @param[in] cb the output callback
+ * @param[in] cb_data the output callback data
* @param[out] sequence The sequence number returned with runNPU_async.
- * @param[in] data The data given as a parameter to the runNPU_async call.
- * @param[in] mode Configures how this operation works.
- * @return @c 0 if no error. otherwise a negative error value
*/
-int runNPU_async(npudev_h dev, uint32_t modelid, const input_buffers *input,
- npuOutputNotify cb, uint64_t *sequence, void *data,
- npu_async_mode mode)
+int
+TrinityVision::run (npu_input_opmode opmode, const Model *model,
+ const input_buffers *input, npuOutputNotify cb, void *cb_data,
+ uint64_t *sequence)
{
- INIT_HOST_HANDLER (host_handler, dev);
+ if (!initialized ()) {
+ logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
+ return -EPERM;
+ }
- return host_handler->runAsync (modelid, input, cb, data, mode, sequence);
+ if (opmode != NPUINPUT_HOST) {
+ logerr (TAG, "TRIV supports only host inputservice\n");
+ return -EINVAL;
+ }
+
+ if (model == nullptr || input == nullptr) {
+ logerr (TAG, "TRIV requires both model and input buffers\n");
+ return -EINVAL;
+ }
+
+ Buffer *buffer = prepareInputBuffers (model->getMetadata(), input);
+ if (buffer == nullptr) {
+ logerr (TAG, "Failed to extract buffer instance\n");
+ return -EINVAL;
+ }
+
+ for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
+ auto func = std::bind (TrinityVision::manipulateData, model, idx, true,
+ std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
+ int status = comm_.extractGenericBuffer (&input->bufs[idx],
+ buffer->getInputTensor(idx)->getData(), func);
+ if (status != 0) {
+ logerr (TAG, "Failed to feed input buffer: %d\n", status);
+ return status;
+ }
+ }
+
+ /** this device uses CMA buffer */
+
+ Request *req = new Request (opmode);
+ req->setModel (model);
+ req->setBuffer (buffer);
+
+ if (cb != nullptr)
+ req->setCallback (std::bind (&TrinityVision::callback, this, req, cb, cb_data));
+
+ if (sequence != nullptr)
+ *sequence = req->getID();
+
+ return scheduler_->submitRequest (req);
}
/**
- * @brief Allocate a buffer for NPU model with the requested buffer type.
- * @param[in] dev The NPU device handle
- * @param[in/out] Buffer the buffer pointer where memory is allocated.
- * @return 0 if no error, otherwise a negative errno.
+ * @brief callback of TRIV2 request
+ * @param[in] req the request instance
+ * @param[in] cb callback for completion
+ * @param[in] cb_data callback data
+ * @note The callback invoke does not gurantee the request was successful
+ * @todo Check the request failures
*/
-int allocNPU_modelBuffer (npudev_h dev, generic_buffer *buffer)
+void
+TrinityVision::callback (Request *req, npuOutputNotify cb, void *cb_data)
{
- INIT_HOST_HANDLER (host_handler, dev);
+ const Model *model = req->getModel ();
+ Buffer *buffer = req->getBuffer ();
+ output_buffers output = {
+ .num_buffers = buffer->getOutputNum ()
+ };
+
+ for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
+ uint32_t output_tensor_size = model->getOutputTensorSize (idx);
+
+ output.bufs[idx].type = BUFFER_MAPPED;
+ output.bufs[idx].size = output_tensor_size;
+ /** user needs to free this */
+ output.bufs[idx].addr = malloc (output_tensor_size);
+
+ auto func = std::bind (TrinityVision::manipulateData, model, idx, false,
+ std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
+ int status = comm_.insertGenericBuffer (buffer->getOutputTensor(idx)->getData(),
+ &output.bufs[idx], func);
+ if (status != 0) {
+ logerr (TAG, "Failed to return output buffer: %d\n", status);
+ }
+ }
- return host_handler->allocGenericBuffer (buffer);
+ cb (&output, req->getID(), cb_data);
}
-/**
- * @brief Free the buffer and remove the address mapping.
- * @param[in] dev The NPU device handle
- * @param[in] buffer the model buffer
- * @return 0 if no error, otherwise a negative errno.
- */
-int cleanNPU_modelBuffer (npudev_h dev, generic_buffer *buffer)
+/** @brief implementation of TRIV2's setModel (): WIP */
+int
+TrinityVision2::setModel (const generic_buffer *model_buf, Model ** model_ptr)
{
- INIT_HOST_HANDLER (host_handler, dev);
+ if (!initialized ()) {
+ logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
+ return -EPERM;
+ }
- return host_handler->deallocGenericBuffer (buffer);
+ /** TODO: model's weight values are stored in segments */
+ *model_ptr = nullptr;
+ return -EINVAL;
}
-/**
- * @brief Allocate a buffer for NPU input with the requested buffer type.
- * @param[in] dev The NPU device handle
- * @param[in/out] Buffer the buffer pointer where memory is allocated.
- * @return 0 if no error, otherwise a negative errno.
- * @note please utilize allocInputBuffers() for multiple input tensors because subsequent
- * calls of allocInputBuffer() don't gurantee contiguous allocations between them.
- */
-int allocNPU_inputBuffer (npudev_h dev, generic_buffer *buffer)
+/** @brief implementation of TRIV2's run(): WIP */
+int
+TrinityVision2::run (npu_input_opmode opmode, const Model *model,
+ const input_buffers *input, npuOutputNotify cb, void *cb_data,
+ uint64_t *sequence)
{
- INIT_HOST_HANDLER (host_handler, dev);
+ if (!initialized ()) {
+ logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
+ return -EPERM;
+ }
- return host_handler->allocGenericBuffer (buffer);
+ if (opmode != NPUINPUT_HOST && opmode != NPUINPUT_HW_RECURRING)
+ return -EINVAL;
+
+ /** this device uses segment table */
+
+ Request *req = new Request (opmode);
+ req->setModel (model);
+#if 0
+ req->setSegmentTable (segt);
+#endif
+ req->setCallback (std::bind (&TrinityVision2::callback, this, req, cb, cb_data));
+
+ if (sequence)
+ *sequence = req->getID();
+
+ return scheduler_->submitRequest (req);
}
-/**
- * @brief Free the buffer and remove the address mapping.
- * @param[in] dev The NPU device handle
- * @param[in] buffer the input buffer
- * @return 0 if no error, otherwise a negative errno.
- */
-int cleanNPU_inputBuffer (npudev_h dev, generic_buffer *buffer)
+/** @brief callback of TRIV2 request: WIP */
+void
+TrinityVision2::callback (Request *req, npuOutputNotify cb, void *cb_data)
{
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->deallocGenericBuffer (buffer);
}
-/**
- * @brief Allocate input buffers, which have multiple instances of generic_buffer
- * @param[in] dev The NPU device handle
- * @param[in/out] input input buffers.
- * @return 0 if no error, otherwise a negative errno.
- * @note it reuses allocInputBuffer().
- * @details in case of BUFFER_DMABUF, this function can be used to gurantee physically-contiguous
- * memory mapping for multiple tensors (in a single inference, not batch size).
- */
-int allocNPU_inputBuffers (npudev_h dev, input_buffers * input)
+/** @brief implementation of TRIA's run(): WIP */
+int
+TrinityAsr::run (npu_input_opmode opmode, const Model *model,
+ const input_buffers *input, npuOutputNotify cb, void *cb_data,
+ uint64_t *sequence)
{
- INIT_HOST_HANDLER (host_handler, dev);
+ if (!initialized ()) {
+ logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
+ return -EPERM;
+ }
- return host_handler->allocGenericBuffer (input);
+ if (opmode != NPUINPUT_HOST)
+ return -EINVAL;
+
+ /** ASR does not require model and support only a single tensor */
+ const generic_buffer *first_buf = &input->bufs[0];
+ Buffer * buffer = mem_->allocBuffer ();
+ int status;
+ if (first_buf->type == BUFFER_DMABUF) {
+ buffer->setDmabuf (first_buf->dmabuf);
+ buffer->setOffset (first_buf->offset);
+ buffer->setSize (first_buf->size);
+ } else {
+ status = buffer->alloc (first_buf->size);
+ if (status != 0) {
+ delete buffer;
+ return status;
+ }
+ }
+ buffer->createTensors ();
+
+ status = comm_.extractGenericBuffer (first_buf,
+ buffer->getInputTensor(0)->getData(), nullptr);
+ if (status != 0)
+ return status;
+
+ Request *req = new Request (opmode);
+ req->setBuffer (buffer);
+ req->setCallback (std::bind (&TrinityAsr::callback, this, req, cb, cb_data));
+
+ if (sequence)
+ *sequence = req->getID();
+
+ return scheduler_->submitRequest (req);
+}
+
+/** @brief callback of TRIA request: WIP */
+void
+TrinityAsr::callback (Request *req, npuOutputNotify cb, void *cb_data)
+{
}
+/** Implement data manipulation (each device may have different impl.) */
+
+#ifdef ENABLE_MANIP
+
+#define do_quantized_memcpy(type) do {\
+ idx = 0;\
+ if (quant) {\
+ while (idx < num_elems) {\
+ val = ((type *) src)[idx];\
+ val = val / _scale;\
+ val += _zero_point;\
+ val = (val > 255.0) ? 255.0 : 0.0;\
+ ((uint8_t *) dst)[idx++] = (uint8_t) val;\
+ }\
+ } else {\
+ while (idx < num_elems) {\
+ val = *(uint8_t *) src;\
+ val -= _zero_point;\
+ val *= _scale;\
+ ((type *) dst)[idx++] = (type) val;\
+ dst = (void*)(((uint8_t *) dst) + data_size);\
+ src = (void*)(((uint8_t *) src) + 1);\
+ }\
+ }\
+ } while (0)
+
/**
- * @brief Free input buffers allocated by allocInputBuffers().
- * @param[in] dev The NPU device handle
- * @param[in/out] input input buffers.
- * @note it reuses cleanInputbuffer().
- * @return 0 if no error, otherwise a negative errno.
+ * @brief memcpy during quantization
*/
-int cleanNPU_inputBuffers (npudev_h dev, input_buffers * input)
+static void memcpy_with_quant (bool quant, data_type type, float scale, uint32_t zero_point,
+ void *dst, const void *src, uint32_t num_elems)
{
- INIT_HOST_HANDLER (host_handler, dev);
+ double _scale = (double) scale;
+ double _zero_point = (double) zero_point;
+ double val;
+ uint32_t data_size = get_data_size (type);
+ uint32_t idx;
- return host_handler->deallocGenericBuffer (input);
+ switch (type) {
+ case DATA_TYPE_INT8:
+ do_quantized_memcpy (int8_t);
+ break;
+ case DATA_TYPE_UINT8:
+ do_quantized_memcpy (uint8_t);
+ break;
+ case DATA_TYPE_INT16:
+ do_quantized_memcpy (int16_t);
+ break;
+ case DATA_TYPE_UINT16:
+ do_quantized_memcpy (uint16_t);
+ break;
+ case DATA_TYPE_INT32:
+ do_quantized_memcpy (int32_t);
+ break;
+ case DATA_TYPE_UINT32:
+ do_quantized_memcpy (uint32_t);
+ break;
+ case DATA_TYPE_INT64:
+ do_quantized_memcpy (int64_t);
+ break;
+ case DATA_TYPE_UINT64:
+ do_quantized_memcpy (uint64_t);
+ break;
+ case DATA_TYPE_FLOAT32:
+ do_quantized_memcpy (float);
+ break;
+ case DATA_TYPE_FLOAT64:
+ do_quantized_memcpy (double);
+ break;
+ default:
+ logerr (TAG, "Unsupported datatype %d\n", type);
+ }
}
/**
- * @brief Get metadata for NPU model
- * @param[in] model The path of model binary file
- * @param[in] need_extra whether you want to extract the extra data in metadata
- * @return the metadata structure to be filled if no error, otherwise nullptr
+ * @brief perform data manipulation
+ * @param[in] model model instance
+ * @param[in] idx tensor index
+ * @param[in] is_input indicate it's input manipulation
+ * @param[out] dst destination buffer
+ * @param[in] src source buffer (feature map)
+ * @param[in] size size to be copied
+ * @return size of memory copy if no error, otherwise zero
+ *
+ * @note the input data format should be NHWC
+ * @detail rules for the memory address of activations in NPU HW.
+ * (https://code.sec.samsung.net/confluence/pages/viewpage.action?pageId=146491864)
+ *
+ * 1) Special case (depth == 3)
+ * - addr(x,y,z) = addr(0,0,0) + (z) + 3 * (x + width * y)
*
- * @note For most npu-engine users, the extra data is not useful because it will be
- * used for second-party users (e.g., compiler, simulator).
- * Also, the caller needs to free the metadata.
+ * 2) Common case
+ * - addr(x,y,z) = addr(0,0,0) + (z % MPA_L) + MPA_L * (x + width * (y + height * (z / MPA_L)))
*
- * @note the caller needs to free the metadata
+ * Thus, if depth is not a multiple of MPA_L (i.e., 64), zero padding is required
*/
-npubin_meta * getNPUmodel_metadata (const char *model, bool need_extra)
+size_t
+TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
+ void *dst, void *src, size_t size)
{
- npubin_meta *meta;
- FILE *fp;
- size_t ret;
-
- if (!model)
- return nullptr;
-
- fp = fopen (model, "rb");
- if (!fp) {
- logerr (TAG, "Failed to open the model binary: %d\n", -errno);
- return nullptr;
- }
-
- meta = (npubin_meta *) malloc (NPUBIN_META_SIZE);
- if (!meta) {
- logerr (TAG, "Failed to allocate metadata\n");
- goto exit_err;
- }
-
- ret = fread (meta, 1, NPUBIN_META_SIZE, fp);
- if (ret != NPUBIN_META_SIZE) {
- logerr (TAG, "Failed to read the metadata\n");
- goto exit_free;
- }
-
- if (!CHECK_NPUBIN (meta->magiccode)) {
- logerr (TAG, "Invalid metadata provided\n");
- goto exit_free;
- }
-
- if (need_extra && NPUBIN_META_EXTRA (meta->magiccode) > 0) {
- npubin_meta *new_meta;
+ const Metadata *meta = model->getMetadata();
+ const tensor_data_info* info;
+ const uint32_t *dims;
+ uint32_t zero_point;
+ float scale;
- new_meta = (npubin_meta *) realloc (meta, NPUBIN_META_TOTAL_SIZE(meta->magiccode));
- if (!new_meta) {
- logerr (TAG, "Failed to allocate extra metadata\n");
- goto exit_free;
+ /** extract required information from the metadata */
+ if (is_input) {
+ if (idx >= meta->getInputNum()) {
+ logerr (TAG, "Wrong information for input tensors in metadata\n");
+ return 0;
}
- ret = fread (new_meta->reserved_extra, 1, NPUBIN_META_EXTRA_SIZE (meta->magiccode), fp);
- if (ret != NPUBIN_META_EXTRA_SIZE (meta->magiccode)) {
- logerr (TAG, "Invalid extra metadata provided\n");
- free (new_meta);
- goto exit_err;
+ info = model->getInputDataInfo (idx);
+ dims = meta->getInputDims (idx);
+ zero_point = meta->getInputQuantZero (idx);
+ scale = meta->getInputQuantScale (idx);
+ } else {
+ if (idx >= meta->getOutputNum()) {
+ logerr (TAG, "Wrong information for output tensors in metadata\n");
+ return 0;
}
- meta = new_meta;
+ info = model->getOutputDataInfo (idx);
+ dims = meta->getOutputDims (idx);
+ zero_point = meta->getOutputQuantZero (idx);
+ scale = meta->getOutputQuantScale (idx);
}
- fclose (fp);
+ if (info == nullptr) {
+ logerr (TAG, "Unmatched tensors info\n");
+ return 0;
+ }
- return meta;
+ uint32_t batch = dims[0];
+ uint32_t height = dims[1];
+ uint32_t width = dims[2];
+ uint32_t depth = dims[3];
-exit_free:
- free (meta);
-exit_err:
- fclose (fp);
+ uint32_t data_size = get_data_size (info->type);
+ if (data_size == 0) {
+ logerr (TAG, "Invalid data size\n");
+ return 0;
+ }
- return nullptr;
-}
+ bool need_quantization = false;
+ /**
+ * note that we assume DATA_TYPE_SRNPU is the smallest data type that we consider.
+ * Also, DATA_TYPE_SRNPU and uint8_t may be regarded as the same in the view of apps.
+ */
+ if (info->type != DATA_TYPE_SRNPU) {
+ assert (data_size >= get_data_size (DATA_TYPE_SRNPU));
-/** deprecated buffer APIs; please use the above APIs */
+ if (data_size > get_data_size (DATA_TYPE_SRNPU) ||
+ !(zero_point == DEFAULT_ZERO_POINT && scale == DEFAULT_SCALE))
+ need_quantization = true;
+ }
-/**
- * @brief Returns the number of NPU devices (TRIV).
- */
-int getnumNPUdevice (void)
-{
- logwarn (TAG, "deprecated. Please use getnumNPUdeviceByType ()\n");
- return getnumNPUdeviceByType (NPUCOND_TRIV_CONN_SOCIP);
-}
+ /** check data manipulation is required */
+ if (depth != 3 && depth != 64 && info->layout != DATA_LAYOUT_SRNPU) {
+ uint32_t MPA_L = DATA_GRANULARITY;
+ uint32_t n, h, w, d;
+ uint32_t std_offset; /* standard offset in NHWC data format */
+ uint32_t npu_offset; /* npu offset in NPU HW data format*/
+ uint32_t src_offset;
+ uint32_t dst_offset;
+ uint32_t slice_size;
-/**
- * @brief Returns the list of ASR devices (TRIA)
- */
-int getnumASRdevice (void)
-{
- logwarn (TAG, "deprecated. Please use getnumNPUdeviceByType ()\n");
- return getnumNPUdeviceByType (NPUCOND_TRIA_CONN_SOCIP);
-}
+ /* @todo we currently support only NHWC */
+ if (info->layout != DATA_LAYOUT_NHWC) {
+ logerr (TAG, "data manipulation is supported for NHWC only\n");
+ return 0;
+ }
-/**
- * @brief Returns the handle of the chosen TRIV device.
- */
-int getNPUdevice (npudev_h *dev, uint32_t id)
-{
- logwarn (TAG, "deprecated. Please use getNPUdeviceByType ()\n");
- return getNPUdeviceByType (dev, NPUCOND_TRIV_CONN_SOCIP, id);
-}
+ for (n = 0; n < batch; n++) {
+ for (h = 0; h < height; h++) {
+ for (w = 0; w < width; w++) {
+ for (d = 0; d < depth; d += MPA_L) {
+ std_offset = d + depth * (w + width * (h + n * height));
+ npu_offset = MPA_L * (w + width * (h + (n + d / MPA_L) * height));
+ slice_size = (depth - d >= MPA_L) ? MPA_L : depth - d;
-/**
- * @brief Returns the handle of the chosen TRIA device.
- */
-int getASRdevice (npudev_h *dev, uint32_t id)
-{
- logwarn (TAG, "deprecated. Please use getNPUdeviceByType ()\n");
- return getNPUdeviceByType (dev, NPUCOND_TRIA_CONN_SOCIP, id);
-}
+ if (is_input) {
+ src_offset = std_offset * data_size;
+ dst_offset = npu_offset;
+ } else {
+ src_offset = npu_offset;
+ dst_offset = std_offset * data_size;
+ }
-/** @brief deprecated */
-int allocModelBuffer (generic_buffer *buffer)
-{
- logwarn (TAG, "deprecated. Please use allocNPU_modelBuffer\n");
- return allocNPU_modelBuffer (HostHandler::getLatestDevice(), buffer);
-}
+ /* if depth is not a multiple of MPA_L, add zero paddings (not exact values) */
+ if (need_quantization) {
+ memcpy_with_quant (is_input, info->type, scale, zero_point,
+ static_cast<char*>(dst) + dst_offset,
+ static_cast<char*>(src) + src_offset,
+ slice_size);
+ } else {
+ memcpy (
+ static_cast<char*>(dst) + dst_offset,
+ static_cast<char*>(src) + src_offset,
+ slice_size);
+ }
+ }
+ }
+ }
+ }
+ } else if (need_quantization) {
+ /** depth == 3 || depth == 64; special cases which can directly copy input tensor data */
+ memcpy_with_quant (is_input, info->type, scale, zero_point,
+ dst, src, is_input ? size / data_size : size);
+ } else {
+ memcpy (dst, src, size);
+ }
-/** @brief deprecated */
-int cleanModelBuffer (generic_buffer *buffer)
-{
- logwarn (TAG, "deprecated. Please use cleanNPU_modelBuffer\n");
- return allocNPU_modelBuffer (HostHandler::getLatestDevice(), buffer);
+ return size;
}
-/** @brief deprecated */
-int allocInputBuffer (generic_buffer *buffer)
-{
- logwarn (TAG, "deprecated. Please use allocNPU_inputBuffer\n");
- return allocNPU_inputBuffer (HostHandler::getLatestDevice(), buffer);
-}
+#else
-/** @brief deprecated */
-int cleanInputBuffer (generic_buffer *buffer)
+size_t
+TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
+ void *dst, void *src, size_t size)
{
- logwarn (TAG, "deprecated. Please use cleanNPU_inputBuffer\n");
- return cleanNPU_inputBuffer (HostHandler::getLatestDevice(), buffer);
+ memcpy (dst, src, size);
+ return size;
}
-/** @brief deprecated */
-int allocInputBuffers (input_buffers * input)
-{
- logwarn (TAG, "deprecated. Please use allocNPU_inputBuffers\n");
- return allocNPU_inputBuffers (HostHandler::getLatestDevice(), input);
-}
+#endif
-/** @brief deprecated */
-int cleanInputBuffers (input_buffers * input)
-{
- logwarn (TAG, "deprecated. Please use cleanNPU_inputBuffers\n");
- return cleanNPU_inputBuffers (HostHandler::getLatestDevice(), input);
-}
+/** other device types don't have data manip impl. yet */
-/** @brief deprecated */
-int allocNPUBuffer (uint64_t size, buffer_types type,
- const char * filepath, generic_buffer *buffer)
+size_t
+TrinityVision2::manipulateData (const Model *model, uint32_t idx, bool is_input,
+ void *dst, void *src, size_t size)
{
- if (buffer) {
- buffer->size = size;
- buffer->type = type;
- buffer->filepath = filepath;
- }
-
- logwarn (TAG, "deprecated. Please use allocNPU_* APIs\n");
- return allocModelBuffer (buffer);
+ memcpy (dst, src, size);
+ return size;
}
-/** @brief deprecated */
-int cleanNPUBuffer (generic_buffer * buffer)
+size_t
+TrinityAsr::manipulateData (const Model *model, uint32_t idx, bool is_input,
+ void *dst, void *src, size_t size)
{
- logwarn (TAG, "deprecated. Please use cleanNPU_* APIs\n");
- return cleanModelBuffer (buffer);
+ memcpy (dst, src, size);
+ return size;
}