In order to do this, you have to call additional API, `getNPU_profile()`, after running models.
it collects the profile information from a NPU device or simulator.
-But, please make sure that you should register models with `NPU_PRIORITY_PROFILE` to use such profiling APIs.
Please refer to the example program in [here](https://github.sec.samsung.net/AIP/NPU_SystemService/blob/tizen/tests/apptests/tvn_triv2_profile.cc).
```console
NPU_PRIORITY_LOW = 0, /**< Low priority: tasks could be delayed or canceled */
NPU_PRIORITY_MID = 1, /**< Mid priority: tasks could be slightly delayed */
NPU_PRIORITY_HIGH = 2, /**< High priority: tasks should be issued immediately */
- NPU_PRIORITY_PROFILE = 3, /**< Special priority for profiling */
+ NPU_PRIORITY_PROFILE = NPU_PRIORITY_HIGH /**< Deprecated */
} npu_priority;
/**
* @param[in] modelid The model to be inferred.
* @param[in] input The input data to be inferred.
* @param[out] output The output result. The caller MUST allocate appropriately before calling this.
- * @return @c 0 or positive id if no error. otherwise a negative error value
+ * @return @c positive id if no error. otherwise a negative error value
*
* @detail This is a syntactic sugar of runNPU_async().
* CAUTION: There is a memcpy for the output buffer.
* @param[in] modelid The model to be inferred.
* @param[in] opmode NPU has different opmode with auto-inputs. Choose one.
* @param[in] hw_dev The target device feeding input data
- * @return @c 0 or positive id if no error. otherwise a negative error value
+ * @return @c positive id if no error. otherwise a negative error value
* @note input and output are handled internally by third-party HW (e.g., DSP)
*/
int runNPU_internalInput(npudev_h dev, uint32_t modelid, npu_input_opmode opmode,
* @param[out] sequence The sequence number returned with runNPU_async.
* @param[in] data The data given as a parameter to the runNPU_async call.
* @param[in] mode Configures how this operation works.
- * @return @c 0 or positive id if no error. otherwise a negative error value
+ * @return @c positive id if no error. otherwise a negative error value
*/
int runNPU_async(npudev_h dev, uint32_t modelid, const input_buffers *input,
npuOutputNotify cb, uint64_t *sequence, void *data,
* @param[in] task_id Identifier for each inference (obtained by runNPU_*)
* @param[out] profile Profile instance
* @return 0 if no error, otherwise a negative errno.
- * @note This API supports the models with NPU_PRIORITY_PROFILE only.
*/
int getNPU_profile (npudev_h dev, int task_id, npu_profile *profile);
'ne-data.cc',
'ne-handler.cc',
'ne-scheduler.cc',
- 'ne-inf.cc',
'ne-host-input-service.cc',
'ne-hw-input-service.cc',
'ne-thread-pool.cc'
}
void callback (output_buffers *output, uint64_t sequence) {
- if (output_ != nullptr) {
+ if (output_ != nullptr && output != nullptr) {
/** just copy internal variables of output buffers */
memcpy (output_, output, sizeof (output_buffers));
}
* @param[in] modelid The model to be inferred.
* @param[in] input The input data to be inferred.
* @param[out] output The output result.
- * @return @c 0 if no error. otherwise a negative error value
+ * @return @c positive id if no error. otherwise a negative error value
*/
int
HostHandler::runSync (uint32_t modelid, const input_buffers *input,
callbackSync sync (output);
int status = runAsync (modelid, input, callbackSync::callback,
static_cast <void*> (&sync), NPUASYNC_DROP_OLD, nullptr);
- if (status == 0) {
+ if (status > 0) {
/** sync needs to wait callback */
sync.wait ();
}
* @param[in] cb_data The data given as a parameter to the runNPU_async call.
* @param[in] mode Configures how this operation works.
* @param[out] sequence The sequence number returned with runNPU_async.
- * @return @c 0 if no error. otherwise a negative error value
+ * @return @c positive id if no error. otherwise a negative error value
*/
int
HostHandler::runAsync (uint32_t modelid, const input_buffers *input,
logerr (TAG, "Failed to extract generic buffer: %d\n", status);
goto delete_exit;
}
+ } else {
+ config.metadata_ext_dbuf_fd = -1;
+ config.metadata_ext_size = 0;
}
status = api_->registerModel (&config, model->getMetadata()->getNPUVersion());
Request *req = new Request (opmode);
req->setModel (model);
- req->setSegmentTable (segt);
+ req->setInferData (segt);
req->setCallback (std::bind (&TrinityVision2::callback, this, req, cb, cb_data));
- if (sequence)
- *sequence = req->getID();
+ if (sequence && req->getID () > 0) {
+ *sequence = (uint32_t) req->getID ();
+ }
return scheduler_->submitRequest (req);
}
Request *req = new Request (opmode);
req->setModel (model);
- req->setSegmentTable (segt);
+ req->setInferData (segt);
req->setHwDevice (hw_dev);
return scheduler_->submitRequest (req);
return;
const Model *model = req->getModel ();
- SegmentTable *segt = req->getSegmentTable ();
+ SegmentTable *segt = dynamic_cast<SegmentTable *> (req->getInferData ());
+ /** internal logic error */
+ assert (segt != nullptr);
+
output_buffers output = {
.num_buffers = segt->getNumOutputSegments ()
};
return *(instance_.get ());
}
+int
+HostInputService::submit (const DriverAPI *api, int id,
+ const Model *model, HWmem *data, outputCallback callback)
+{
+ if (api == nullptr)
+ return -EINVAL;
+
+ if (dynamic_cast<Buffer *> (data)) {
+ /* empty model is possible */
+ return submit_buffer (api, id, model, dynamic_cast<Buffer *> (data), callback);
+ } else if (dynamic_cast<SegmentTable *> (data)) {
+ if (model == nullptr)
+ return -EINVAL;
+ return submit_segt (api, id, model, dynamic_cast<SegmentTable *> (data), callback);
+ } else {
+ return -EINVAL;
+ }
+}
+
/**
* @brief submit the request to the thread pool
* @param[in] api the driver API
* @param[in] model the target model
* @param[in] buffer the target buffer
* @param[in] callback output callback
- * @return 0 if no error, otherwise a negative errno.
+ * @return task id if no error, otherwise a negative errno.
*/
int
-HostInputService::submit (const DriverAPI *api, uint32_t id,
+HostInputService::submit_buffer (const DriverAPI *api, int id,
const Model *model, Buffer *buffer, outputCallback callback)
{
- if (api == nullptr)
- return -EINVAL;
-
- if (model != nullptr) {
- npuConstraint constraint = model->getConstraint ();
- if (constraint.priority == NPU_PRIORITY_PROFILE)
- return invoke_buffer (api, model, buffer, callback);
- }
-
taskFunc func = std::bind (&HostInputService::invoke_buffer, this,
- api, model, buffer, callback);
+ api, model, buffer, callback, id);
ThreadTask *task = new ThreadTask (id, func);
return ThreadPool::getInstance().enqueueTask (task);
* @param[in] model the target model
* @param[in] segt the target segment table
* @param[in] callback output callback
- * @return 0 if no error, otherwise a negative errno.
+ * @return task id if no error, otherwise a negative errno.
*/
int
-HostInputService::submit (const DriverAPI *api, uint32_t id,
+HostInputService::submit_segt (const DriverAPI *api, int id,
const Model *model, SegmentTable *segt, outputCallback callback)
{
- if (api == nullptr || model == nullptr)
- return -EINVAL;
-
- npuConstraint constraint = model->getConstraint ();
- if (constraint.priority == NPU_PRIORITY_PROFILE)
- return invoke_segt (api, model, segt, callback);
-
taskFunc func = std::bind (&HostInputService::invoke_segt, this,
- api, model, segt, callback);
+ api, model, segt, callback, id);
ThreadTask *task = new ThreadTask (id, func);
return ThreadPool::getInstance().enqueueTask (task);
* @return 0 if no erorr. otherwise a negative errno
*/
int
-HostInputService::remove (uint32_t id)
+HostInputService::remove (int id)
{
return ThreadPool::getInstance().removeTask (id);
}
*/
int
HostInputService::invoke_buffer (const DriverAPI *api, const Model *model,
- Buffer *buffer, outputCallback callback)
+ Buffer *buffer, outputCallback callback, int task_id)
{
input_config_t input_config;
device_state_t state;
goto handle_callback;
}
+ /** internal logic error */
+ assert (buffer != nullptr);
+
if (model != nullptr) {
/** consider NOP cases */
- if (model->getProgramData() == nullptr)
+ if (model->getProgramData() == nullptr) {
+ ret = 0;
goto handle_callback;
+ }
input_config.model_id = model->getInternalID();
} else {
input_config.model_id = 0;
}
- if (buffer != nullptr) {
- input_config.dbuf_fd = buffer->getDmabuf ();
- input_config.activation_offset_addr0 = buffer->getOffset ();
- input_config.activation_offset_addr1 = buffer->getOffset ();
- } else {
- /** some instructions do not require the buffer (e.g., nop) */
- goto handle_callback;
- }
+ input_config.dbuf_fd = buffer->getDmabuf ();
+ input_config.activation_offset_addr0 = buffer->getOffset ();
+ input_config.activation_offset_addr1 = buffer->getOffset ();
+ input_config.task_id = task_id;
/** run the inference with the input */
ret = api->runInput (&input_config);
*/
int
HostInputService::invoke_segt (const DriverAPI *api, const Model *model,
- SegmentTable *segt, outputCallback callback)
+ SegmentTable *segt, outputCallback callback, int task_id)
{
input_config_t input_config;
device_state_t state;
goto handle_callback;
}
- if (model == nullptr) {
- logerr (TAG, "No valid model provided\n");
- goto handle_callback;
- }
+ /** internal logic error */
+ assert (model != nullptr);
+ assert (segt != nullptr);
/** consider NOP cases */
- if (model->getProgramData() == nullptr)
+ if (model->getProgramData() == nullptr) {
+ ret = 0;
goto handle_callback;
+ }
input_config.model_id = model->getInternalID();
- if (segt != nullptr) {
- input_config.dbuf_fd = segt->getDmabuf ();
- input_config.num_segments = segt->getNumTotalSegments ();
- } else {
- /** some instructions do not require the segment table (e.g., nop) */
- goto handle_callback;
- }
+ input_config.dbuf_fd = segt->getDmabuf ();
+ input_config.num_segments = segt->getNumTotalSegments ();
/** set constraints */
constraint = model->getConstraint ();
input_config.output_mode = OUTPUT_CPU_INTR;
}
+ input_config.task_id = task_id;
/** run the inference with the input */
ret = api->runInput (&input_config);
if (ret < 0 && ret != -ECANCELED)
* @param[in] api the driver API
* @param[in] id the request id
* @param[in] model the target model
- * @param[in] segt the target segment table
+ * @param[in] data the inference data
* @param[in] callback output callback
- * @return 0 if no error, otherwise a negative errno.
+ * @return task_id if no error, otherwise a negative errno.
*/
int
-HwInputService::submit (const DriverAPI *api, uint32_t id,
- const Model *model, SegmentTable *segt, outputCallback callback)
+HwInputService::submit (const DriverAPI *api, int id,
+ const Model *model, HWmem *data, outputCallback callback)
{
if (api == nullptr || model == nullptr)
return -EINVAL;
- return invoke (api, model, segt, callback);
+ SegmentTable *segt = dynamic_cast<SegmentTable *> (data);
+ if (segt == nullptr)
+ return -EINVAL;
+
+ return invoke (api, model, segt, callback, id);
}
/**
* @param[in] model the target model
* @param[in] segt the target segment table
* @param[in] callback output callback
- * @return 0 if no error, otherwise a negative errno
+ * @return task_id if no error, otherwise a negative errno
*/
int
HwInputService::invoke (const DriverAPI *api, const Model *model,
- SegmentTable *segt, outputCallback callback)
+ SegmentTable *segt, outputCallback callback, int task_id)
{
input_config_t input_config;
device_state_t state;
input_config.hw_rdev = devnode_stat.st_rdev;
#else
+ input_config.task_id = task_id;
input_config.hw_rdev = 0; /* don't care */
#endif
+++ /dev/null
-/**
- * Proprietary
- * Copyright (C) 2020 Samsung Electronics
- * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
- */
-/**
- * @file ne-inf.cc
- * @date 03 Apr 2020
- * @brief Implementation of internal APIs to manage inferences.
- * @author Dongju Chae <dongju.chae@samsung.com>
- * @bug No known bugs except for NYI items
- */
-
-#include "ne-inf.h"
-#include "ne-inputservice.h"
-
-/**
- * @brief get inputservice instance (singleton) based on opmode
- * @param[in] opmode input opmode to decide the input service type
- * @return input service if no error. otherwise nullptr;
- */
-static InputService *
-getInputService (npu_input_opmode opmode)
-{
- switch (opmode) {
- case NPUINPUT_HOST:
- return &HostInputService::getInstance ();
- case NPUINPUT_HW_RECURRING:
- return &HwInputService::getInstance ();
- default:
- return nullptr;
- }
-}
-
-/**
- * @brief invoke the input service with the request
- * @param[in] api driver api
- * @param[in] opmode npu input opmode
- * @param[in] requets_id the id of request
- * @param[in] model the model instance
- * @param[in] buffer the buffer instance
- * @param[in] callback the output callback
- * @return 0 if no error. otherwise a negative errno
- */
-int
-InferenceEngine::invokeInputService (const DriverAPI *api, npu_input_opmode opmode,
- uint32_t request_id, const Model *model, Buffer *buffer, outputCallback callback)
-{
- InputService *service = getInputService (opmode);
-
- if (service == nullptr || api == nullptr)
- return -EINVAL;
-
- return service->submit (api, request_id, model, buffer, callback);
-}
-
-/**
- * @brief invoke the input service with the request
- * @param[in] api driver api
- * @param[in] opmode npu input opmode
- * @param[in] requets_id the id of request
- * @param[in] model the model instance
- * @param[in] segt the segment table instance
- * @param[in] callback the output callback
- * @return 0 if no error. otherwise a negative errno
- */
-int
-InferenceEngine::invokeInputService (const DriverAPI *api, npu_input_opmode opmode,
- uint32_t request_id, const Model *model, SegmentTable *segt, outputCallback callback)
-{
- InputService *service = getInputService (opmode);
-
- if (service == nullptr || api == nullptr)
- return -EINVAL;
-
- return service->submit (api, request_id, model, segt, callback);
-}
-
-/**
- * @brief stop the request
- * @param[in] opmode npu input opmode
- * @param[in] request_id the id of submitted request
- * @return 0 if no error. otherwise a negative errno
- * @note if the request was already served, it cannot stop this
- */
-int
-InferenceEngine::stopRequest (npu_input_opmode opmode, uint32_t request_id)
-{
- InputService *service = getInputService (opmode);
-
- if (service == nullptr)
- return -EINVAL;
-
- return service->remove (request_id);
-}
+++ /dev/null
-/**
- * Proprietary
- * Copyright (C) 2019 Samsung Electronics
- * Copyright (C) 2019 MyungJoo Ham <myungjoo.ham@samsung.com>
- * Copyright (C) 2019 Dongju Chae <dongju.chae@samsung.com>
- */
-/**
- * @file ne-inf.h
- * @date 20 Jun 2019
- * @brief Header of internal APIs to manage inferences.
- * @see https://code.sec.samsung.net/confluence/display/ODLC/2020+Overall+Software+Stack
- * @author MyungJoo Ham <myungjoo.ham@samsung.com>
- * @author Dongju Chae <dongju.chae@samsung.com>
- * @bug No known bugs except for NYI items
- */
-
-#ifndef __NPU_ENGINE_N4_INFERENCE_H__
-#define __NPU_ENGINE_N4_INFERENCE_H__
-
-#include <stdint.h>
-#include <typedef.h>
-#include <NPUdrvAPI.h>
-
-#include "ne-common.h"
-#include "ne-model.h" /* represent model, with hwmem */
-#include "ne-buffer.h"
-#include "ne-segment-table.h"
-
-/**
- * @brief Designate how to stop tasks started by n4_start().
- * @todo Use the same enum with host library. Make this enum "common" with "npu_async_mode"
- * TODO: is this still valid ??
- */
-typedef enum {
- STOP_PREEMPT = 0, /**< Preemptive stop. Cancel the current job and stop the thread. */
- STOP_WAIT = 1, /**< Non-preemptive stop. Wait for the current job and stop the thread. */
- STOP_TRY = 2, /**< If there is a current job, do not stop the thread and return "RETRY" */
-} stop_condition;
-
-/** @brief class def. of inference engine */
-class InferenceEngine {
- public:
- static int invokeInputService (const DriverAPI *api, npu_input_opmode opmode,
- uint32_t request_id, const Model *model, Buffer *buffer,
- outputCallback callback = nullptr);
- static int invokeInputService (const DriverAPI *api, npu_input_opmode opmode,
- uint32_t request_id, const Model *model, SegmentTable *segt,
- outputCallback callback = nullptr);
- static int stopRequest (npu_input_opmode opmode, uint32_t request_id);
- /** TODO add extra features */
-};
-
-#endif /* __NPU_ENGINE_N4_INFERENCE_H__ */
class InputService {
public:
/** @brief submit request to somewhere (depends on each impl.) */
- virtual int submit (const DriverAPI *api, uint32_t request_id, const Model *model,
- Buffer *buffer, outputCallback callback = nullptr) { return -EPERM; }
- virtual int submit (const DriverAPI *api, uint32_t request_id, const Model *model,
- SegmentTable *segt, outputCallback callback = nullptr) { return -EPERM; }
+ virtual int submit (const DriverAPI *api, int request_id, const Model *model,
+ HWmem *data, outputCallback callback = nullptr) { return -EPERM; }
/** @brief [OPTIONAL] remove the submitted request (if possible) */
- virtual int remove (uint32_t request_id) { return -EINVAL; }
+ virtual int remove (int request_id) { return -EINVAL; }
virtual ~InputService () {}
public:
static HostInputService & getInstance ();
- int submit (const DriverAPI *api, uint32_t request_id, const Model *model,
+ int submit (const DriverAPI *api, int request_id, const Model *model,
+ HWmem *data, outputCallback callback = nullptr);
+ int remove (int request_id);
+
+ private:
+ int submit_buffer (const DriverAPI *api, int request_id, const Model *model,
Buffer *buffer, outputCallback callback = nullptr);
- int submit (const DriverAPI *api, uint32_t request_id, const Model *model,
+ int submit_segt (const DriverAPI *api, int request_id, const Model *model,
SegmentTable *segt, outputCallback callback = nullptr);
- int remove (uint32_t request_id);
-
- private:
/** do not allow to directly call invoke () */
int invoke_buffer (const DriverAPI *api, const Model *model, Buffer *buffer,
- outputCallback callback);
+ outputCallback callback, int task_id);
int invoke_segt (const DriverAPI *api, const Model *model, SegmentTable *segt,
- outputCallback callback);
+ outputCallback callback, int task_id);
static std::unique_ptr<HostInputService> instance_;
static std::once_flag once_flag_;
public:
static HwInputService & getInstance ();
- int submit (const DriverAPI *api, uint32_t request_id, const Model *model,
- SegmentTable *segt, outputCallback callback = nullptr);
+ int submit (const DriverAPI *api, int request_id, const Model *model,
+ HWmem *data, outputCallback callback = nullptr);
private:
/** do not allow to directly call invoke () */
int invoke (const DriverAPI *api, const Model *model, SegmentTable *segt,
- outputCallback callback);
+ outputCallback callback, int task_id);
static std::unique_ptr<HwInputService> instance_;
static std::once_flag once_flag_;
*/
#include "ne-scheduler.h"
-#include "ne-inf.h"
+#include "ne-inputservice.h"
+
#include <assert.h>
#define TAG _N3
-std::atomic<uint32_t> Request::global_request_id_ (1);
+std::atomic<int> Request::global_request_id_ (1);
/** @brief constructor of request class */
Request::Request (npu_input_opmode opmode)
: opmode_ (opmode), force_stop_ (false), stopped_ (false),
- model_ (nullptr), buffer_ (nullptr), segt_ (nullptr),
- cb_ (nullptr)
+ model_ (nullptr), data_ (nullptr), cb_ (nullptr)
{
request_id_ = Request::global_request_id_.fetch_add(1);
}
}
/**
- * @brief submit request to inference engine
+ * @brief get inputservice instance (singleton) based on opmode
+ * @param[in] opmode input opmode to decide the input service type
+ * @return input service if no error. otherwise nullptr
+ */
+static InputService *
+getInputService (npu_input_opmode opmode)
+{
+ switch (opmode) {
+ case NPUINPUT_HOST:
+ return &HostInputService::getInstance ();
+ case NPUINPUT_HW_RECURRING:
+ return &HwInputService::getInstance ();
+ default:
+ return nullptr;
+ }
+}
+
+/**
+ * @brief handle inference stop request
* @param[in] req the request instance
* @return 0 if no error. otherwise a negative errno.
*/
+int Scheduler::handleStop (Request *req)
+{
+ if (req->getForceStop ()) {
+ std::function <bool (Request *)> functor =
+ [] (Request *r) -> bool {
+ InputService * service = getInputService (r->getOpmode ());
+
+ /* remove a request if it's not scheduled */
+ if (service->remove (r->getID ()) != 0) {
+ /* In case of already-served requests, let's mark it as stopped */
+ r->setStopped ();
+ return false;
+ }
+
+ return true;
+ };
+ request_map_.for_each (functor);
+
+ /* send the stop signal to the device driver */
+ int status = api_->stop ();
+ if (status != 0)
+ return status;
+ }
+
+ /* wait until all requests are handled */
+ request_map_.wait_empty ();
+
+ delete req;
+
+ return 0;
+}
+
+/**
+ * @brief handle inference request with input data from host
+ * @param[in] req the request instance
+ * @param[in] service the input service
+ * @return positive id if no error. otherwise a negative errno.
+ */
+int Scheduler::handleHostInput (Request *req, InputService *service)
+{
+ int req_id = req->getID ();
+ int status = request_map_.insert (req_id, req);
+ assert (status == 0); /** request ID is atomic value. So, should be successful */
+
+ const Model * model = req->getModel ();
+ HWmem * data = req->getInferData ();
+ auto callback = std::bind (&Scheduler::handleCallback, this, req);
+
+ status = service->submit (api_, req_id, model, data, callback);
+ if (status < 0)
+ return status;
+
+ /* request id == task id */
+ return req_id;
+}
+
+/**
+ * @brief handle inference request with input data from hw device
+ * @param[in] req the request instance
+ * @param[in] service the input service
+ * @return positive id if no error. otherwise a negative errno.
+ */
+int Scheduler::handleHwInput (Request *req, InputService *service)
+{
+ int req_id = req->getID ();
+ int status = request_map_.insert (req_id, req);
+ assert (status == 0); /** request ID is atomic value. So, should be successful */
+
+ const Model * model = req->getModel ();
+ HWmem * data = req->getInferData ();
+ auto callback = std::bind (&Scheduler::handleCallback, this, req);
+
+ SegmentTable *segt = dynamic_cast<SegmentTable *> (data);
+ if (segt == nullptr)
+ return -EINVAL;
+
+ segt->setHwDevice (req->getHwDevice ());
+
+ /* task id is obtained from a device driver */
+ return service->submit (api_, req_id, model, data, callback);
+}
+
+/**
+ * @brief submit request to inference engine
+ * @param[in] req the request instance
+ * @return 0 or positive value if no error. otherwise a negative errno.
+ */
int
Scheduler::submitRequest (Request *req)
{
}
npu_input_opmode opmode = req->getOpmode();
+ InputService * service = getInputService (opmode);
int status = 0;
- if (opmode == NPUINPUT_STOP) {
- if (req->getForceStop ()) {
- std::function <bool (Request *)> functor =
- [] (Request *r) -> bool {
- bool can_remove = true;
-
- /* remove a request if it's not scheduled */
- if (InferenceEngine::stopRequest (r->getOpmode (), r->getID ()) != 0) {
- /* In case of already-served requests, let's mark it as stopped */
- r->setStopped ();
- can_remove = false;
- }
-
- return can_remove;
- };
- request_map_.for_each (functor);
-
- /* send the stop signal to the device driver */
- status = api_->stop ();
- if (status != 0)
- return status;
- }
-
- /* wait until all requests are handled */
- request_map_.wait_empty ();
-
- delete req;
- } else {
- status = request_map_.insert (req->getID(), req);
- assert (status == 0); /** request ID is atomic value. So, should be successful */
-
- auto callback = std::bind (&Scheduler::handleCallback, this, req);
-
- /** consider NOP cases */
- if (req->getBuffer () != nullptr) {
- status = InferenceEngine::invokeInputService (api_, req->getOpmode(),
- req->getID(), req->getModel(), req->getBuffer(), callback);
- } else if (req->getSegmentTable () != nullptr) {
- req->getSegmentTable()->setHwDevice (req->getHwDevice ());
-
- status = InferenceEngine::invokeInputService (api_, req->getOpmode(),
- req->getID(), req->getModel(), req->getSegmentTable(), callback);
- } else
- status = -EINVAL;
-
- if (status < 0) {
- /** if failed to invoke input service, directly handle callback (if exists) */
- handleCallback (req);
- }
+ switch (opmode) {
+ case NPUINPUT_STOP:
+ return handleStop (req);
+ case NPUINPUT_HOST:
+ status = handleHostInput (req, service);
+ break;
+ case NPUINPUT_HW_RECURRING:
+ status = handleHwInput (req, service);
+ break;
+ default:
+ return -EINVAL;
}
+ /** if failed to invoke input service, directly handle callback (if exists) */
+ if (status < 0)
+ handleCallback (req);
+
return status;
}
#include <NPUdrvAPI.h>
#include "ne-model.h"
-#include "ne-buffer.h"
-#include "ne-segment-table.h"
+#include "ne-hwmem.h"
#include "ne-utils.h"
#include "ne-common.h"
+#include "ne-inputservice.h"
#include <atomic>
void setModel (const Model *model) { model_ = model; }
const Model *getModel () { return model_; }
- void setBuffer (Buffer *buffer) { buffer_ = buffer; }
- Buffer *getBuffer () { return buffer_; }
-
- void setSegmentTable (SegmentTable *segt) { segt_ = segt; }
- SegmentTable *getSegmentTable () { return segt_; }
+ void setInferData (HWmem *data) { data_ = data; }
+ HWmem *getInferData () { return data_; }
void setCallback (outputCallback cb) { cb_ = cb; }
outputCallback getCallback () { return cb_; }
bool getForceStop () { return force_stop_; }
npu_input_opmode getOpmode () { return opmode_; }
- uint32_t getID () { return request_id_; }
+ int getID () { return request_id_; }
void setStopped () { stopped_ = true; }
bool isStopped () { return stopped_; }
std::string getHwDevice () { return hw_dev_; }
private:
- static std::atomic<uint32_t> global_request_id_;
- uint32_t request_id_; /**< request id */
+ static std::atomic<int> global_request_id_;
+ int request_id_; /**< request id */
npu_input_opmode opmode_; /**< opmode of the request */
bool force_stop_; /**< indicates force stop */
bool stopped_; /**< stopped request */
const Model *model_; /**< model of the request */
- Buffer *buffer_; /**< buffer of the request */
- SegmentTable *segt_; /**< segment table of the request */
+ HWmem *data_; /**< inference data of the request */
outputCallback cb_; /**< request callback */
std::string hw_dev_; /**< HW device path */
* even if an user does not specify it.
*/
void handleCallback (Request *req);
+ int handleStop (Request *req);
+ int handleHostInput (Request *req, InputService *service);
+ int handleHwInput (Request *req, InputService *service);
- ThreadSafeMap<uint32_t, Request> request_map_;
+ ThreadSafeMap<int, Request> request_map_;
/**< request map */
const DriverAPI *api_; /**< driver api */
};
else
cmd_path += "/mRPsim/triv2.cmd";
- int taskid = global_fd_.fetch_add (1);
+ int taskid = input_config->task_id;
EmulTask *task = new EmulTask (taskid);
status = task_map_.insert (taskid, task);
for (uint32_t i = 0; i < repeat_; i++) {
status = runNPU_async (dev_, model_id_, &input_, callback,
NULL, this, NPUASYNC_WAIT);
- if (status != 0)
+ if (status < 0)
return status;
}
}
private:
- const uint64_t program_size = 0x1000;
- const uint64_t weight_size = 0x1000;
const uint32_t segment_size = 0x1000;
+ const uint64_t weight_size = 0x1000;
int prepare_model () {
generic_buffer model;
memset (&meta, '\x00', sizeof(npubin_meta));
meta.magiccode = NPUBIN_MAGICCODE | 0x3; /** npubinfmt v3 for TRIV2 */
- meta.program_size = program_size;
+ meta.program_size = 0;
meta.weight_size = weight_size;
- meta.size = NPUBIN_META_SIZE + program_size + weight_size;
+ meta.size = NPUBIN_META_SIZE + weight_size;
meta.type = SMODEL_OPS_NPU;
/** TRIV2 requires segment table */
output_buffers output;
status = runNPU_sync (dev_, model_id_, &input_, &output);
- if (status != 0)
+ if (status < 0)
return status;
return compare_result (output);
}
private:
- const uint64_t program_size = 0x1000;
const uint64_t weight_size = 0x1000;
const uint32_t segment_size = 0x1000;
memset (&meta, '\x00', sizeof(npubin_meta));
meta.magiccode = NPUBIN_MAGICCODE | 0x3; /** npubinfmt v3 for TRIV2 */
- meta.program_size = program_size;
+ meta.program_size = 0;
meta.weight_size = weight_size;
- meta.size = NPUBIN_META_SIZE + program_size + weight_size;
+ meta.size = NPUBIN_META_SIZE + weight_size;
meta.type = SMODEL_OPS_NPU;
/** TRIV2 requires segment table */
output_buffers output;
status = runNPU_sync (dev_, model_id_, &input_, &output);
- if (status != 0)
+ if (status < 0)
return status;
return compare_result (output);
input.bufs[0].size = filesize (inputpath);
output_buffers output;
- if (runNPU_sync(dev, modelid, &input, &output)) {
+ if (runNPU_sync (dev, modelid, &input, &output) < 0) {
cerr << "Fail to run inference\n";
unregisterNPUmodel (dev, modelid);
putNPUdevice (dev);
}
return UtilTRIV2::loadModel (
- model_dir, &model_id_, NPU_PRIORITY_PROFILE, NPU_TIMEOUT_MS);
+ model_dir, &model_id_, NPU_PRIORITY_MID, NPU_TIMEOUT_MS);
}
/** @brief run the inference */
install_rpath : ne_libdir,
install_dir : join_paths(ne_bindir, 'unittests')
)
- test('unittest_ne_core_inf', unittest_ne_core_sched, env: testenv)
-
- unittest_ne_core_inf = executable('unittest_ne_core_inf',
- ['ne_core_inf_test.cc'],
- include_directories: ne_host_inc,
- dependencies: [gtest_dep, ne_core_dep],
- install : true,
- install_rpath : ne_libdir,
- install_dir : join_paths(ne_bindir, 'unittests')
- )
- test('unittest_ne_core_inf', unittest_ne_core_inf, env: testenv)
+ test('unittest_ne_core_sched', unittest_ne_core_sched, env: testenv)
unittest_ne_core_thread_pool = executable('unittest_ne_core_thread_pool',
['ne_core_thread_pool_test.cc'],
npu_input_opmode opmode = NPUINPUT_HOST;
- EXPECT_EQ (device->run (opmode, model, &input_buf), 0);
- EXPECT_EQ (device->run (opmode, model, &input_buf), 0);
- EXPECT_EQ (device->run (opmode, model, &input_buf), 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf), 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf), 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf), 0);
usleep (TEST_SLEEP_MS);
user_cb_data data;
data.num_called = 0;
- EXPECT_EQ (device->run (opmode, model, &input_buf, user_cb, &data), 0);
- EXPECT_EQ (device->run (opmode, model, &input_buf, user_cb, &data), 0);
- EXPECT_EQ (device->run (opmode, model, &input_buf, user_cb, &data), 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data), 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data), 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data), 0);
wait_callbacks (data.num_called, max_called, data.m, data.cv);
EXPECT_EQ (data.num_called, max_called);
uint64_t sequence;
data.num_called = 0;
- EXPECT_EQ (device->run (opmode, model, &input_buf, user_cb, &data, &sequence), 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data, &sequence), 0);
EXPECT_EQ (sequence, (uint64_t) 7); /* 7th run request */
- EXPECT_EQ (device->run (opmode, model, &input_buf, user_cb, &data, &sequence), 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data, &sequence), 0);
EXPECT_EQ (sequence, (uint64_t) 8);
- EXPECT_EQ (device->run (opmode, model, &input_buf, user_cb, &data, &sequence), 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data, &sequence), 0);
EXPECT_EQ (sequence, (uint64_t) 9);
wait_callbacks (data.num_called, max_called, data.m, data.cv);
npu_input_opmode opmode = NPUINPUT_HOST;
- EXPECT_EQ (device->run (opmode, model, &input_buf), 0);
- EXPECT_EQ (device->run (opmode, model, &input_buf), 0);
- EXPECT_EQ (device->run (opmode, model, &input_buf), 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf), 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf), 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf), 0);
EXPECT_EQ (device->stop (true), 0);
usleep (TEST_SLEEP_MS); /** TODO remove this when stop() is implemented */
user_cb_data data;
data.num_called = 0;
- EXPECT_EQ (device->run (opmode, model, &input_buf, user_cb, &data), 0);
- EXPECT_EQ (device->run (opmode, model, &input_buf, user_cb, &data), 0);
- EXPECT_EQ (device->run (opmode, model, &input_buf, user_cb, &data), 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data), 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data), 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data), 0);
EXPECT_EQ (device->stop (false), 0); /* wait until all requests are resolved */
usleep (TEST_SLEEP_MS); /** TODO remove this when stop() is implemented */
EXPECT_EQ (data.num_called, max_called); /** callbacks are called successfully */
/** runSync from host handler */
output_buffers output;
- EXPECT_EQ (handler->runSync (modelid, &input), 0);
- EXPECT_EQ (handler->runSync (modelid, &input, &output), 0);
+ output.num_buffers = 0;
+
+ EXPECT_GT (handler->runSync (modelid, &input), 0);
+ EXPECT_GT (handler->runSync (modelid, &input, &output), 0);
EXPECT_EQ (handler->unregisterModels (), 0);
EXPECT_EQ (handler->deallocGenericBuffer (&input), 0);
EXPECT_EQ (handler->allocGenericBuffer (&input), 0);
output_buffers output;
+ output.num_buffers = 0;
/** TRIV2 always requires model and input buffers */
- EXPECT_NE (handler->runSync (modelid, nullptr), 0);
- EXPECT_NE (handler->runSync (modelid + 1, &input), 0);
- EXPECT_NE (handler->runSync (modelid + 1, &input, &output), 0);
+ EXPECT_LT (handler->runSync (modelid, nullptr), 0);
+ EXPECT_LT (handler->runSync (modelid + 1, &input), 0);
+ EXPECT_LT (handler->runSync (modelid + 1, &input, &output), 0);
EXPECT_EQ (handler->unregisterModels (), 0);
EXPECT_EQ (handler->deallocGenericBuffer (&input), 0);
EXPECT_EQ (handler->allocGenericBuffer (&input), 0);
/** runAsync from host handler */
- EXPECT_EQ (handler->runAsync (modelid, &input), 0);
- EXPECT_EQ (handler->runAsync (modelid, &input), 0);
- EXPECT_EQ (handler->runAsync (modelid, &input), 0);
+ EXPECT_GT (handler->runAsync (modelid, &input), 0);
+ EXPECT_GT (handler->runAsync (modelid, &input), 0);
+ EXPECT_GT (handler->runAsync (modelid, &input), 0);
usleep (TEST_SLEEP_MS);
EXPECT_EQ (handler->allocGenericBuffer (&input), 0);
/** TRIV2 always requires model and input buffers */
- EXPECT_NE (handler->runAsync (modelid, nullptr), 0);
- EXPECT_NE (handler->runAsync (modelid + 1, &input), 0);
+ EXPECT_LT (handler->runAsync (modelid, nullptr), 0);
+ EXPECT_LT (handler->runAsync (modelid + 1, &input), 0);
EXPECT_EQ (handler->unregisterModels (), 0);
EXPECT_EQ (handler->deallocGenericBuffer (&input), 0);
+++ /dev/null
-/**
- * Proprietary
- * Copyright (C) 2019 Samsung Electronics
- * Copyright (C) 2019 Parichay Kapoor <pk.kapoor@samsung.com>
- * Copyright (C) 2019 Dongju Chae <dongju.chae@samsung.com>
- */
-/**
- * @file ne_core_inf_test.cc
- * @date 12 Aug 2019
- * @brief UnitTests to test functions in inference engine for NPU Engine
- * @author Parichay Kapoor <pk.kapoor@samsung.com>
- * @author Dongju Chae <dongju.chae@samsung.com>
- * @bug No known bugs except for NYI items
- */
-
-#include <ne-inf.h>
-#include "ne_unittest_utils.h"
-
-/**
- * @brief test invokeInputService ()
- */
-TEST (ne_core_inf_test, invoke)
-{
- std::unique_ptr<DriverAPI> api;
- api = DriverAPI::createDriverAPI (NPUCOND_TRIV2_CONN_SOCIP, 0);
-
- /** create dummy model & buffer */
- std::unique_ptr<Model> model (new Model (new HWmemDevice));
- model->setDriverAPI (api.get());
- EXPECT_EQ (model->alloc (4096), 0);
-
- std::unique_ptr<Buffer> buffer (new Buffer (new HWmemDevice));
- buffer->setDriverAPI (api.get());
- EXPECT_EQ (buffer->alloc (4096), 0);
-
- /** dummy segt */
- std::unique_ptr<SegmentTable> segt (new SegmentTable (new HWmemDevice));
- segt->setDriverAPI (api.get());
- EXPECT_EQ (segt->alloc (), 0);
-
- EXPECT_EQ (InferenceEngine::invokeInputService (
- api.get(), NPUINPUT_HOST, 0, model.get(), buffer.get()), 0);
- EXPECT_EQ (InferenceEngine::invokeInputService (
- api.get(), NPUINPUT_HOST, 1, model.get(), segt.get()), 0);
-
- usleep (TEST_SLEEP_MS);
-}
-
-/**
- * @brief test invokeInputService () with TRIV2
- */
-TEST (ne_core_inf_test, invoke_triv2)
-{
- std::unique_ptr<DriverAPI> api;
- api = DriverAPI::createDriverAPI (NPUCOND_TRIV2_CONN_SOCIP, 0);
-
- /** create dummy model & segt */
- std::unique_ptr<Model> model (new Model (new HWmemDevice));
- model->setDriverAPI (api.get());
- EXPECT_EQ (model->alloc (4096), 0);
-
- std::unique_ptr<SegmentTable> segt (new SegmentTable (new HWmemDevice));
- segt->setDriverAPI (api.get());
- EXPECT_EQ (segt->alloc (), 0);
-
- EXPECT_EQ (InferenceEngine::invokeInputService (
- api.get(), NPUINPUT_HOST, 2, model.get(), segt.get()), 0);
- EXPECT_EQ (InferenceEngine::invokeInputService (
- api.get(), NPUINPUT_HW_RECURRING, 3, model.get(), segt.get()), 0);
-
- usleep (TEST_SLEEP_MS);
-}
-
-/**
- * @brief test invokeInputService () with error handling
- */
-TEST (ne_core_inf_test, invoke_opmode_n)
-{
- std::unique_ptr<DriverAPI> api;
- api = DriverAPI::createDriverAPI (NPUCOND_TRIV2_CONN_SOCIP, 0);
-
- /** create dummy model & buffer */
- std::unique_ptr<Model> model (new Model (new HWmemDevice));
- model->setDriverAPI (api.get());
- EXPECT_EQ (model->alloc (4096), 0);
-
- std::unique_ptr<Buffer> buffer (new Buffer (new HWmemDevice));
- buffer->setDriverAPI (api.get());
- EXPECT_EQ (buffer->alloc (4096), 0);
-
- std::unique_ptr<SegmentTable> segt (new SegmentTable (new HWmemDevice));
- segt->setDriverAPI (api.get());
- EXPECT_EQ (segt->alloc (), 0);
-
- /** unsupported opmode */
- EXPECT_NE (InferenceEngine::invokeInputService (
- api.get(), NPUINPUT_STOP, 0, model.get(), buffer.get()), 0);
- EXPECT_NE (InferenceEngine::invokeInputService (
- api.get(), NPUINPUT_INTERNAL_CAM, 0, model.get(), buffer.get()), 0);
- EXPECT_NE (InferenceEngine::invokeInputService (
- api.get(), NPUINPUT_I2S_MIC, 0, model.get(), buffer.get()), 0);
-
- EXPECT_NE (InferenceEngine::invokeInputService (
- api.get(), NPUINPUT_STOP, 0, model.get(), segt.get()), 0);
- EXPECT_NE (InferenceEngine::invokeInputService (
- api.get(), NPUINPUT_INTERNAL_CAM, 0, model.get(), segt.get()), 0);
- EXPECT_NE (InferenceEngine::invokeInputService (
- api.get(), NPUINPUT_I2S_MIC, 0, model.get(), segt.get()), 0);
-
- /** HW_RECURRING works only with segment table */
- EXPECT_NE (InferenceEngine::invokeInputService (
- api.get(), NPUINPUT_HW_RECURRING, 0, model.get(), buffer.get()), 0);
-
- usleep (TEST_SLEEP_MS);
-}
-
-/**
- * @brief test invokeInputService () with error handling
- */
-TEST (ne_core_inf_test, invoke_args_n)
-{
- std::unique_ptr<DriverAPI> api;
- api = DriverAPI::createDriverAPI (NPUCOND_TRIV2_CONN_SOCIP, 0);
-
- /** create dummy model & buffer */
- std::unique_ptr<Model> model (new Model (new HWmemDevice));
- model->setDriverAPI (api.get());
- EXPECT_EQ (model->alloc (4096), 0);
-
- std::unique_ptr<Buffer> buffer (new Buffer (new HWmemDevice));
- buffer->setDriverAPI (api.get());
- EXPECT_EQ (buffer->alloc (4096), 0);
-
- std::unique_ptr<SegmentTable> segt (new SegmentTable (new HWmemDevice));
- segt->setDriverAPI (api.get());
- EXPECT_EQ (segt->alloc (), 0);
-
- /** invalid args (api should be valid) */
- EXPECT_NE (InferenceEngine::invokeInputService (
- nullptr, NPUINPUT_HOST, 0, nullptr, (Buffer *) nullptr), 0);
- EXPECT_NE (InferenceEngine::invokeInputService (
- nullptr, NPUINPUT_HOST, 0, model.get(), (Buffer *) nullptr), 0);
- EXPECT_NE (InferenceEngine::invokeInputService (
- nullptr, NPUINPUT_HOST, 0, nullptr, buffer.get()), 0);
- EXPECT_NE (InferenceEngine::invokeInputService (
- nullptr, NPUINPUT_HOST, 0, model.get(), buffer.get()), 0);
-
- EXPECT_NE (InferenceEngine::invokeInputService (
- nullptr, NPUINPUT_HOST, 0, nullptr, (SegmentTable *) nullptr), 0);
- EXPECT_NE (InferenceEngine::invokeInputService (
- nullptr, NPUINPUT_HOST, 0, model.get(), (SegmentTable *) nullptr), 0);
- EXPECT_NE (InferenceEngine::invokeInputService (
- nullptr, NPUINPUT_HOST, 0, nullptr, segt.get()), 0);
- EXPECT_NE (InferenceEngine::invokeInputService (
- nullptr, NPUINPUT_HOST, 0, model.get(), segt.get()), 0);
-}
-
-/**
- * @brief test StopRequest ()
- */
-TEST (ne_core_inf_test, stop)
-{
- std::unique_ptr<DriverAPI> api;
- api = DriverAPI::createDriverAPI (NPUCOND_TRIV2_CONN_SOCIP, 0);
-
- /** create dummy model & buffer */
- std::unique_ptr<Model> model (new Model (new HWmemDevice));
- model->setDriverAPI (api.get());
- EXPECT_EQ (model->alloc (4096), 0);
-
- std::unique_ptr<Buffer> buffer (new Buffer (new HWmemDevice));
- buffer->setDriverAPI (api.get());
- EXPECT_EQ (buffer->alloc (4096), 0);
-
- int max_called = 16; /** enough num to have pending requests */
- int num_called = 0;
- std::condition_variable cv;
- std::mutex m;
-
- auto callback = std::bind (test_callback_sleep, &num_called, &m, &cv);
- for (int i = 0; i < max_called; i++)
- EXPECT_EQ (InferenceEngine::invokeInputService (
- api.get(), NPUINPUT_HOST, i, model.get(), buffer.get(), callback), 0);
-
- EXPECT_EQ (InferenceEngine::stopRequest (NPUINPUT_HOST, max_called - 1), 0);
- max_called--;
-
- wait_callbacks (num_called, max_called, m, cv);
- EXPECT_EQ (num_called, max_called);
-}
-
-/**
- * @brief test StopRequest () with error handling
- */
-TEST (ne_core_inf_test, stop_opmode_n)
-{
- EXPECT_NE (InferenceEngine::stopRequest (NPUINPUT_STOP, 0), 0);
- EXPECT_NE (InferenceEngine::stopRequest (NPUINPUT_INTERNAL_CAM, 0), 0);
- EXPECT_NE (InferenceEngine::stopRequest (NPUINPUT_I2S_MIC, 0), 0);
-}
-
-/**
- * @brief test StopRequest () with error handling
- */
-TEST (ne_core_inf_test, stop_not_started_n)
-{
- std::unique_ptr<DriverAPI> api;
- api = DriverAPI::createDriverAPI (NPUCOND_TRIV2_CONN_SOCIP, 0);
-
- /** create dummy model & buffer */
- std::unique_ptr<Model> model (new Model (new HWmemDevice));
- model->setDriverAPI (api.get());
- EXPECT_EQ (model->alloc (4096), 0);
-
- std::unique_ptr<Buffer> buffer (new Buffer (new HWmemDevice));
- buffer->setDriverAPI (api.get());
- EXPECT_EQ (buffer->alloc (4096), 0);
-
- EXPECT_EQ (InferenceEngine::invokeInputService (
- api.get(), NPUINPUT_HOST, 0, model.get(), buffer.get()), 0);
- EXPECT_NE (InferenceEngine::stopRequest (NPUINPUT_HOST, 1), 0);
- EXPECT_NE (InferenceEngine::stopRequest (NPUINPUT_HOST, 2), 0);
- EXPECT_NE (InferenceEngine::stopRequest (NPUINPUT_HOST, 3), 0);
-
- usleep (TEST_SLEEP_MS);
-
- /** already handled */
- EXPECT_NE (InferenceEngine::stopRequest (NPUINPUT_HOST, 0), 0);
-}
-
-/**
- * @brief main function for unit test
- */
-int
-main (int argc, char **argv)
-{
- return start_gtest (argc, argv);
-}
EXPECT_EQ (buffer->alloc (4096), 0);
HostInputService & service = HostInputService::getInstance ();
- /** run without buffer */
- EXPECT_EQ (service.submit (api.get(), 0, model, (Buffer *) nullptr), 0);
- /** run with buffer */
EXPECT_EQ (service.submit (api.get(), 1, model, buffer), 0);
usleep (TEST_SLEEP_MS);
HostInputService & service = HostInputService::getInstance ();
- Buffer * buffer = nullptr;
+ Buffer * buffer = new Buffer (new HWmemDevice);
+ buffer->setDriverAPI (api.get());
+ EXPECT_EQ (buffer->alloc (4096), 0);
+
EXPECT_NE (service.submit (nullptr, 0, nullptr, buffer), 0);
EXPECT_NE (service.submit (nullptr, 0, model, buffer), 0);
EXPECT_EQ (service.submit (api.get(), 0, nullptr, buffer), 0);
EXPECT_EQ (service.submit (api.get(), 1, model, buffer), 0);
- SegmentTable * segt = nullptr;
+ SegmentTable * segt = new SegmentTable (new HWmemDevice);
+ segt->setDriverAPI (api.get());
+ EXPECT_EQ (segt->alloc (4096), 0);
+
EXPECT_NE (service.submit (nullptr, 2, nullptr, segt), 0);
EXPECT_NE (service.submit (nullptr, 2, model, segt), 0);
EXPECT_NE (service.submit (api.get(), 2, nullptr, segt), 0);
usleep (TEST_SLEEP_MS);
delete model;
+ delete buffer;
+ delete segt;
}
class TestInputService : public InputService
/** saturate requests up to the maximum (in thread pool) */
uint32_t num_threads = ThreadPool::getInstance().getNumThreads();
- Buffer * buffer = nullptr;
+ Buffer * buffer = new Buffer (new HWmemDevice);
+ buffer->setDriverAPI (api.get());
+ EXPECT_EQ (buffer->alloc (4096), 0);
+
for (uint32_t i = 0; i < num_threads + 1; i++)
EXPECT_EQ (service.submit (api.get(), i, model, buffer, callback), 0);
wait_callbacks (num_called, num_threads + 1, m, cv);
delete model;
+ delete buffer;
}
/**
/** saturate requests up to the maximum (in thread pool) */
uint32_t num_threads = ThreadPool::getInstance().getNumThreads();
- Buffer * buffer = nullptr;
+ Buffer * buffer = new Buffer (new HWmemDevice);
+ buffer->setDriverAPI (api.get());
+ EXPECT_EQ (buffer->alloc (4096), 0);
+
for (uint32_t i = 0; i < num_threads + 1; i++)
EXPECT_EQ (service.submit (api.get(), i, model, buffer, callback), 0);
wait_callbacks (num_called, num_threads, m, cv);
delete model;
+ delete buffer;
}
/**
std::mutex m;
auto callback = std::bind (test_callback_sleep, &num_called, &m, &cv);
- SegmentTable * segt = nullptr;
+
+ SegmentTable * segt = new SegmentTable (new HWmemDevice);
+ segt->setDriverAPI (api.get());
+ EXPECT_EQ (segt->alloc (4096), 0);
EXPECT_EQ (service.submit (api.get(), 0, model, segt, callback), 0);
EXPECT_EQ (service.submit (api.get(), 1, model, segt, callback), 0);
wait_callbacks (num_called, 2, m, cv);
delete model;
+ delete segt;
}
/**
std::mutex m;
auto callback = std::bind (test_callback_sleep, &num_called, &m, &cv);
- SegmentTable * segt = nullptr;
+
+ SegmentTable * segt = new SegmentTable (new HWmemDevice);
+ segt->setDriverAPI (api.get());
+ EXPECT_EQ (segt->alloc (4096), 0);
EXPECT_NE (service.submit (nullptr, 0, nullptr, segt, nullptr), 0);
EXPECT_NE (service.submit (api.get(), 0, nullptr, segt, callback), 0);
wait_callbacks (num_called, 1, m, cv);
delete model;
+ delete segt;
}
/**
EXPECT_GT (req->getID (), (uint32_t) 0);
req->setModel (model.get ());
- req->setBuffer (buffer.get ());
+ req->setInferData (buffer.get ());
req->setCallback (callback);
req->setForceStop (force_stop);
EXPECT_EQ (req->getModel (), model.get ());
- EXPECT_EQ (req->getBuffer (), buffer.get ());
+ EXPECT_EQ (req->getInferData (), buffer.get ());
EXPECT_NE (req->getCallback (), nullptr); /** cannot check the binded func */
EXPECT_EQ (req->getForceStop (), force_stop);
EXPECT_EQ (req->getOpmode (), NPUINPUT_HOST);
EXPECT_GT (req->getID (), (uint32_t) 0);
- /** set nullptr for model, buffer, segmentTable, maybe duplicate with constructor*/
req->setModel (nullptr);
- req->setBuffer (nullptr);
- req->setSegmentTable (nullptr);
+ req->setInferData (nullptr);
req->setForceStop (force_stop);
req->setHwDevice (hwDevice);
req->setStopped();
-
EXPECT_EQ (req->getModel (), nullptr);
- EXPECT_EQ (req->getBuffer (), nullptr);
- EXPECT_EQ (req->getSegmentTable (), nullptr);
+ EXPECT_EQ (req->getInferData (), nullptr);
EXPECT_EQ (req->getCallback (), nullptr);
EXPECT_EQ (req->getForceStop (), force_stop);
EXPECT_EQ (req->getHwDevice (), hwDevice);
Request * req = new Request (NPUINPUT_HOST);
req->setModel (model.get ());
- req->setBuffer (buffer.get ());
+ req->setInferData (buffer.get ());
req->setCallback (callback);
Request * req2 = new Request (NPUINPUT_HOST);
req2->setModel (model.get ());
- req2->setBuffer (buffer.get ());
+ req2->setInferData (buffer.get ());
req2->setCallback (callback);
Request * req3 = new Request (NPUINPUT_HOST);
req3->setModel (model.get ());
- req3->setBuffer (buffer.get ());
+ req3->setInferData (buffer.get ());
/** it's fine to have no callback */
Scheduler * sched;
sched = new Scheduler (api.get ());
- EXPECT_EQ (sched->submitRequest (req), 0);
- EXPECT_EQ (sched->submitRequest (req2), 0);
- EXPECT_EQ (sched->submitRequest (req3), 0);
+ EXPECT_EQ (sched->submitRequest (req), req->getID ());
+ EXPECT_EQ (sched->submitRequest (req2), req2->getID ());
+ EXPECT_EQ (sched->submitRequest (req3), req3->getID ());
/** requests are automatically destroyed */
wait_callbacks (num_called, 2, m, cv);
Request * req = new Request (NPUINPUT_HOST);
req->setModel (model.get ());
- req->setBuffer (buffer.get ());
+ req->setInferData (buffer.get ());
req->setCallback (callback);
Scheduler * sched;
/** no request provided */
sched = new Scheduler (api.get ());
EXPECT_NE (sched->submitRequest (nullptr), 0);
- EXPECT_EQ (sched->submitRequest (req), 0);
+ EXPECT_EQ (sched->submitRequest (req), req->getID ());
/** it waits until all requests are finished */
delete sched;
/** requests with not-supported opmode */
req = new Request (NPUINPUT_INTERNAL_CAM);
req->setModel (model.get());
- req->setBuffer (buffer.get());
+ req->setInferData (buffer.get());
EXPECT_NE (sched->submitRequest (req), 0);
req = new Request (NPUINPUT_I2S_MIC);
req->setModel (model.get());
- req->setBuffer (buffer.get());
+ req->setInferData (buffer.get());
EXPECT_NE (sched->submitRequest (req), 0);
/* skip */
return;
- EXPECT_EQ (tester.run (model_id, true), 0);
- EXPECT_EQ (tester.run (model_id, false), 0);
+ EXPECT_GT (tester.run (model_id, true), 0);
+ EXPECT_GT (tester.run (model_id, false), 0);
EXPECT_EQ (tester.runAll (true), 0);
EXPECT_EQ (tester.runAll (false), 0);