}
/**
+ * @brief Get the DSP DSPM size of the opened NPU device
+ * @param[in] dev the NPU device handle
+ * @param[out] dspm dspm size
+ * @return 0 if no error, otherwise a negative errno
+ * @note this does not support for emulated devices
+ */
+int
+HostHandler::getDspmSize (uint32_t *dspm)
+{
+ const DriverAPI * api = device_->getDriverAPI ();
+ assert (api != nullptr);
+
+ return api->getDspmSize (dspm);
+}
+/**
* @brief Set the data layout for input/output tensors
* @param[in] modelid The ID of model whose layouts are set
* @param[in] in the layout/type info for input tensors
}
void callback (output_buffers *output, uint64_t sequence) {
- if (output_ != nullptr) {
+ if (output_ != nullptr && output != nullptr) {
/** just copy internal variables of output buffers */
memcpy (output_, output, sizeof (output_buffers));
}
* @param[in] modelid The model to be inferred.
* @param[in] input The input data to be inferred.
* @param[out] output The output result.
- * @return @c 0 if no error. otherwise a negative error value
+ * @return @c positive id if no error. otherwise a negative error value
*/
int
HostHandler::runSync (uint32_t modelid, const input_buffers *input,
callbackSync sync (output);
int status = runAsync (modelid, input, callbackSync::callback,
static_cast <void*> (&sync), NPUASYNC_DROP_OLD, nullptr);
- if (status == 0) {
+ if (status > 0) {
/** sync needs to wait callback */
sync.wait ();
}
* @param[in] cb_data The data given as a parameter to the runNPU_async call.
* @param[in] mode Configures how this operation works.
* @param[out] sequence The sequence number returned with runNPU_async.
- * @return @c 0 if no error. otherwise a negative error value
+ * @return @c positive id if no error. otherwise a negative error value
*/
int
HostHandler::runAsync (uint32_t modelid, const input_buffers *input,
Device *device = nullptr;
switch (type & DEVICETYPE_MASK) {
- case DEVICETYPE_TRIV:
- device = new TrinityVision (id);
- break;
case DEVICETYPE_TRIV2:
device = new TrinityVision2 (id);
break;
- case DEVICETYPE_TRIA:
- device = new TrinityAsr (id);
- device->setNeedModel (false);
+ case DEVICETYPE_DEPR:
+ logwarn (TAG, "You're trying to open deprecated devices..\n");
break;
default:
break;
}
/**
- * @brief extract the buffer instance from input generic buffers
- * @param[in] meta the model metadata
- * @param[in] input the input generic buffers
- * @return the buffer instance
- */
-Buffer *
-TrinityVision::prepareInputBuffers (const Metadata *meta, const input_buffers *input)
-{
- if (meta == nullptr || input == nullptr ||
- meta->getInputNum() != input->num_buffers) {
- logerr (TAG, "Invalid metadata info provided\n");
- return nullptr;
- }
-
- Buffer * buffer;
- const generic_buffer *first = &input->bufs[0];
- if (first->type == BUFFER_DMABUF) {
- buffer = mem_->allocBuffer (new HWmemExternal);
- if (buffer == nullptr)
- return nullptr;
-
- buffer->setDmabuf (first->dmabuf);
- buffer->setOffset (first->offset);
- buffer->setSize (meta->getBufferSize());
- } else {
- buffer = mem_->allocBuffer (new HWmemDevice);
- if (buffer == nullptr)
- return nullptr;
-
- int status = buffer->alloc (meta->getBufferSize ());
- if (status != 0) {
- logerr (TAG, "Failed to allocate buffer: %d\n", status);
- delete buffer;
- return nullptr;
- }
- }
-
- int status = buffer->createTensors (meta);
- if (status != 0) {
- logerr (TAG, "Failed to create tensors: %d\n", status);
- delete buffer;
- buffer = nullptr;
- }
-
- return buffer;
-}
-
-/**
- * @brief implementation of TRIV's setModel ()
- * @param[in] model_buf the model generic buffer
- * @param[out] model the model instance
- * @return 0 if no error, otherwise a negative errno
- */
-int
-TrinityVision::setModel (const generic_buffer *model_buf, Model ** model_ptr)
-{
- if (!initialized ()) {
- logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
- return -EPERM;
- }
-
- if (model_buf == nullptr || model_ptr == nullptr)
- return -EINVAL;
-
- Model *model = nullptr;
- HWmem * hwmem_prog = nullptr;
- HWmem * hwmem_weight = nullptr;
- int status;
-
- /** In TRIV1, model data (including program/weight) should be contiguous */
-
- switch (model_buf->type) {
- case BUFFER_FILE:
- case BUFFER_MAPPED:
- model = mem_->allocModel (new HWmemDevice);
- if (model == nullptr) {
- logerr (TAG, "Failed to allocate model\n");
- return -ENOMEM;
- }
-
- status = model->alloc (model_buf->size);
- if (status != 0) {
- logerr (TAG, "Failed to allocate model: %d\n", status);
- goto delete_exit;
- }
-
- /** extract the whole model data */
- status = comm_.extractGenericBuffer (model_buf, model->getData(), nullptr);
- if (status != 0) {
- logerr (TAG, "Failed to extract generic buffer: %d\n", status);
- goto delete_exit;
- }
- break;
- default:
- return -EINVAL;
- }
-
- status = model->setMetadata (model->getData());
- if (status != 0)
- goto delete_exit;
-
- /** allocate program (optional; NOP) */
- if (model->getMetadata()->getProgramSize() > 0) {
- hwmem_prog = new HWmem (new HWmemChunk);
- model->setProgramData (hwmem_prog);
-
- hwmem_prog->setParent (model);
- hwmem_prog->setOffset (model->getMetadata()->getMetaSize());
- status = hwmem_prog->alloc (model->getMetadata()->getProgramSize());
- if (status != 0) {
- logerr (TAG, "Failed to allocate program\n");
- goto delete_exit;
- }
- }
-
- /** allocate weight (optional) */
- if (model->getMetadata()->getWeightSize() > 0) {
- hwmem_weight = new HWmem (new HWmemChunk);
- model->setWeightData (hwmem_weight);
-
- hwmem_weight->setParent (model);
- hwmem_weight->setOffset (model->getMetadata()->getMetaSize() +
- model->getMetadata()->getProgramSize());
- status = hwmem_weight->alloc (model->getMetadata()->getWeightSize());
- if (status != 0) {
- logerr (TAG, "Failed to allocate program\n");
- goto delete_exit;
- }
- }
-
- if (hwmem_prog != nullptr) {
- /** register this model to the driver */
- model_config_t config;
- config.version = model->getMetadata()->getVersion ();
- config.dbuf_fd = hwmem_prog->getDmabuf ();
- config.program_size = hwmem_prog->getSize ();
- config.program_offset_addr = hwmem_prog->getOffset ();
- if (hwmem_weight != nullptr)
- config.weight_offset_addr = hwmem_weight->getOffset ();
-
- status = api_->registerModel (&config);
- if (status != 0)
- goto delete_exit;
-
- model->setInternalID(config.id);
- }
-
- *model_ptr = model;
- return status;
-
-delete_exit:
- delete model;
- return status;
-}
-
-/**
- * @brief implementation of TRIV's unsetModel ()
- * @param[in] model the model instance
- * @return 0 if no error, otherwise a negative errno
- */
-int
-TrinityVision::unsetModel (Model * model)
-{
- if (!initialized ()) {
- logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
- return -EPERM;
- }
-
- if (model == nullptr) {
- logerr (TAG, "Invalid model instance\n");
- return -EINVAL;
- }
-
- if (model->getMetadata()->getProgramSize() > 0)
- return api_->deregisterModel (model->getInternalID ());
-
- return 0;
-}
-
-/**
- * @brief implementation of TRIV's run()
- * @param[in] opmode input opmode
- * @param[in] model the model instance
- * @param[in] input generic buffers of input data
- * @param[in] cb the output callback
- * @param[in] cb_data the output callback data
- * @param[out] sequence The sequence number returned with runNPU_async.
- */
-int
-TrinityVision::run (npu_input_opmode opmode, const Model *model,
- const input_buffers *input, npuOutputNotify cb, void *cb_data,
- uint64_t *sequence)
-{
- if (!initialized ()) {
- logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
- return -EPERM;
- }
-
- if (opmode != NPUINPUT_HOST) {
- logerr (TAG, "TRIV supports only host inputservice\n");
- return -EINVAL;
- }
-
- if (model == nullptr || input == nullptr) {
- logerr (TAG, "TRIV requires both model and input buffers\n");
- return -EINVAL;
- }
-
- const_cast<Model *>(model)->updateDataInfo ();
-
- Buffer *buffer = prepareInputBuffers (model->getMetadata(), input);
- if (buffer == nullptr) {
- logerr (TAG, "Failed to extract buffer instance\n");
- return -EINVAL;
- }
-
- if (!buffer->isExternal ()) {
- for (uint32_t idx = 0; idx < input->num_buffers; idx++) {
- auto func = std::bind (TrinityVision::manipulateData, model, idx, true,
- std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
- int status = comm_.extractGenericBuffer (&input->bufs[idx],
- buffer->getInputTensor(idx)->getData(), func);
- if (status != 0) {
- logerr (TAG, "Failed to feed input buffer: %d\n", status);
- return status;
- }
- }
- }
-
- /** this device uses CMA buffer */
-
- Request *req = new Request (opmode);
- req->setModel (model);
- req->setBuffer (buffer);
-
- if (cb != nullptr)
- req->setCallback (std::bind (&TrinityVision::callback, this, req, cb, cb_data));
-
- if (sequence != nullptr)
- *sequence = req->getID();
-
- return scheduler_->submitRequest (req);
-}
-
-/**
- * @brief callback of TRIV2 request
- * @param[in] req the request instance
- * @param[in] cb callback for completion
- * @param[in] cb_data callback data
- * @note The callback invoke does not gurantee the request was successful
- * @todo Check the request failures
- */
-void
-TrinityVision::callback (Request *req, npuOutputNotify cb, void *cb_data)
-{
- const Model *model = req->getModel ();
- Buffer *buffer = req->getBuffer ();
- output_buffers output = {
- .num_buffers = buffer->getOutputNum ()
- };
-
- for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
- uint32_t output_tensor_size = model->getOutputTensorSize (idx);
-
- if (buffer->isExternal ()) {
- output.bufs[idx].type = BUFFER_DMABUF;
- output.bufs[idx].size = output_tensor_size;
- output.bufs[idx].addr = buffer->getOutputTensor(idx)->getData();
- } else {
- output.bufs[idx].type = BUFFER_MAPPED;
- output.bufs[idx].size = output_tensor_size;
- /** user needs to free this */
- output.bufs[idx].addr = malloc (output_tensor_size);
-
- auto func = std::bind (TrinityVision::manipulateData, model, idx, false,
- std::placeholders::_1, std::placeholders::_2, std::placeholders::_3);
- int status = comm_.insertGenericBuffer (buffer->getOutputTensor(idx)->getData(),
- &output.bufs[idx], func);
- if (status != 0) {
- logerr (TAG, "Failed to return output buffer: %d\n", status);
- }
- }
- }
-
- cb (&output, req->getID(), cb_data);
-
- delete buffer;
-}
-
-/**
* @brief extract the segment table instance from input generic buffers
* @param[in] model the model instance
* @param[in] input the input generic buffers
if (model->getMetadata()->getProgramSize() > 0) {
HWmem * hwmem_prog = new HWmem (new HWmemDevice);
hwmem_prog->setDriverAPI (api_.get());
+ hwmem_prog->setContiguous (true);
model->setProgramData (hwmem_prog);
config.dbuf_fd = hwmem_prog->getDmabuf ();
config.program_size = hwmem_prog->getSize ();
config.program_offset_addr = 0;
+ config.metadata_dbuf_fd = model->getDmabuf ();
/** for metadata extended section */
- config.metadata_dbuf_fd = model->getDmabuf ();
- config.metadata_extra_addr = NPUBIN_META_SIZE;
- config.metadata_extra_size = model->getMetadata()->getMetaExtendedSize ();
+ size_t extended_size = model->getMetadata()->getMetaExtendedSize();
+ if (extended_size > 0) {
+ HWmem * hwmem_extended = new HWmem (new HWmemDevice);
+ hwmem_extended->setDriverAPI (api_.get ());
+
+ model->setExtendedMetadata (hwmem_extended);
+
+ status = hwmem_extended->alloc (extended_size);
+ if (status != 0) {
+ logerr (TAG, "Failed to allocate extended metadata: %d\n", status);
+ goto delete_exit;
+ }
+
+ config.metadata_ext_dbuf_fd = hwmem_extended->getDmabuf ();
+ config.metadata_ext_size = extended_size;
+
+ status = comm_.extractGenericBuffer (model_buf, hwmem_extended->getData (),
+ nullptr, NPUBIN_META_SIZE, extended_size);
+ if (status != 0) {
+ logerr (TAG, "Failed to extract generic buffer: %d\n", status);
+ goto delete_exit;
+ }
+ } else {
+ config.metadata_ext_dbuf_fd = -1;
+ config.metadata_ext_size = 0;
+ }
status = api_->registerModel (&config, model->getMetadata()->getNPUVersion());
if (status != 0)
Request *req = new Request (opmode);
req->setModel (model);
- req->setSegmentTable (segt);
+ req->setInferData (segt);
req->setCallback (std::bind (&TrinityVision2::callback, this, req, cb, cb_data));
- if (sequence)
- *sequence = req->getID();
+ if (sequence && req->getID () > 0) {
+ *sequence = (uint32_t) req->getID ();
+ }
return scheduler_->submitRequest (req);
}
Request *req = new Request (opmode);
req->setModel (model);
- req->setSegmentTable (segt);
+ req->setInferData (segt);
req->setHwDevice (hw_dev);
return scheduler_->submitRequest (req);
void
TrinityVision2::callback (Request *req, npuOutputNotify cb, void *cb_data)
{
+ if (cb == nullptr)
+ return;
+
const Model *model = req->getModel ();
- SegmentTable *segt = req->getSegmentTable ();
+ SegmentTable *segt = dynamic_cast<SegmentTable *> (req->getInferData ());
+ /** internal logic error */
+ assert (segt != nullptr);
+
output_buffers output = {
.num_buffers = segt->getNumOutputSegments ()
};
delete segt;
}
-/** @brief implementation of TRIA's run(): WIP */
-int
-TrinityAsr::run (npu_input_opmode opmode, const Model *model,
- const input_buffers *input, npuOutputNotify cb, void *cb_data,
- uint64_t *sequence)
-{
- if (!initialized ()) {
- logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
- return -EPERM;
- }
-
- if (opmode != NPUINPUT_HOST)
- return -EINVAL;
-
- if (input == nullptr || input->num_buffers != 1)
- return -EINVAL;
-
- Buffer * buffer;
- int status;
- /** ASR does not require model and support only a single tensor */
- const generic_buffer *first_buf = &input->bufs[0];
- if (first_buf->type == BUFFER_DMABUF) {
- buffer = mem_->allocBuffer (new HWmemExternal);
- if (buffer == nullptr)
- return -ENOMEM;
-
- buffer->setDmabuf (first_buf->dmabuf);
- buffer->setOffset (first_buf->offset);
- buffer->setSize (first_buf->size);
- } else {
- buffer = mem_->allocBuffer (new HWmemDevice);
- if (buffer == nullptr)
- return -ENOMEM;
-
- status = buffer->alloc (first_buf->size);
- if (status != 0) {
- delete buffer;
- return status;
- }
- }
-
- status = buffer->createTensors ();
- if (status != 0) {
- logerr (TAG, "Failed to create tensors: %d\n", status);
- delete buffer;
- return status;
- }
-
- if (!buffer->isExternal ()) {
- status = comm_.extractGenericBuffer (first_buf,
- buffer->getInputTensor(0)->getData(), nullptr);
- if (status != 0)
- return status;
- }
-
- Request *req = new Request (opmode);
- req->setBuffer (buffer);
- req->setCallback (std::bind (&TrinityAsr::callback, this, req, cb, cb_data));
-
- if (sequence)
- *sequence = req->getID();
-
- return scheduler_->submitRequest (req);
-}
-
-/** @brief callback of TRIA request: WIP */
-void
-TrinityAsr::callback (Request *req, npuOutputNotify cb, void *cb_data)
-{
- Buffer *buffer = req->getBuffer ();
- output_buffers output = {
- .num_buffers = 0
- };
-
- /** TODO: finalize this impl. when the ASR's working scenario is determined */
- cb (&output, req->getID(), cb_data);
-
- delete buffer;
-}
-
/** Implement data manipulation (each device may have different impl.) */
#ifdef ENABLE_MANIP
-
-/**
- * @brief perform data manipulation
- * @param[in] model model instance
- * @param[in] idx tensor index
- * @param[in] is_input indicate it's input manipulation
- * @param[out] dst destination buffer
- * @param[in] src source buffer (feature map)
- * @param[in] size size to be copied
- * @return size of memory copy if no error, otherwise zero
- *
- * @note the input data format should be NHWC
- * @detail rules for the memory address of activations in NPU HW.
- * (https://code.sec.samsung.net/confluence/pages/viewpage.action?pageId=146491864)
- *
- * 1) Special case (depth == 3)
- * - addr(x,y,z) = addr(0,0,0) + (z) + 3 * (x + width * y)
- *
- * 2) Common case
- * - addr(x,y,z) = addr(0,0,0) + (z % MPA_L) + MPA_L * (x + width * (y + height * (z / MPA_L)))
- *
- * Thus, if depth is not a multiple of MPA_L (i.e., 64), zero padding is required
- */
-size_t
-TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
- void *dst, void *src, size_t size)
-{
- const Metadata *meta = model->getMetadata();
- DataConverter converter (is_input);
-
- converter.setData (src, dst, size);
-
- if (is_input) {
- const tensor_data_info* info = model->getInputDataInfo (idx);
- if (info == nullptr)
- return 0;
-
- converter.setDataLayout (info->layout, DATA_LAYOUT_SRNPU);
- converter.setDataType (info->type, DATA_TYPE_SRNPU);
- converter.setDataDims (meta->getInputDims (idx));
- converter.setQuantZero (meta->getInputQuantZero (idx));
- converter.setQuantScale (meta->getInputQuantScale (idx));
- } else {
- const tensor_data_info* info = model->getOutputDataInfo (idx);
- if (info == nullptr)
- return 0;
-
- converter.setDataLayout (DATA_LAYOUT_SRNPU, info->layout);
- converter.setDataType (DATA_TYPE_SRNPU, info->type);
- converter.setDataDims (meta->getOutputDims (idx));
- converter.setQuantZero (meta->getOutputQuantZero (idx));
- converter.setQuantScale (meta->getOutputQuantScale (idx));
- }
-
- return converter.perform ();
-}
-
/**
* @brief perform data manipulation
* @param[in] model model instance
#else
size_t
-TrinityVision::manipulateData (const Model *model, uint32_t idx, bool is_input,
- void *dst, void *src, size_t size)
-{
- memcpy (dst, src, size);
- return size;
-}
-
-size_t
TrinityVision2::manipulateData (const Model *model, uint32_t idx, bool is_input,
void *dst, void *src, size_t size)
{
}
#endif
-
-/** other device types don't have data manip impl. yet */
-
-size_t
-TrinityAsr::manipulateData (const Model *model, uint32_t idx, bool is_input,
- void *dst, void *src, size_t size)
-{
- memcpy (dst, src, size);
- return size;
-}