This patch adds a common API to cover runNPU_sync/async.
It also allow for users to provide pre-allocated input/output.
Signed-off-by: Dongju Chae <dongju.chae@samsung.com>
/**
* @brief Receives NPU results.
* @param[in] output NPU results. callback() should free this.
- * @param[in] sequence The sequence number returned with runNPU_async.
- * @param[in] data The data given as a parameter to the runNPU_async call.
+ * @param[in] req_id The request id returned with runNPU_*.
+ * @param[in] data The data given as a parameter to the runNPU_* call.
*
* @detail The callback function has the responsibility to free the given
- * output buffer (output).
- *
- * Values of sequence grow monotonically with a given model.
- * Therefore, if a sequence value is skipped, it means that
- * the inference request has been preemptied.
- *
- * The API implementation should be aware of bottlenecks.
- * Be prepared to handle if this callback takes too much time.
- * Approach 1: timeout.
- * Approach 2: thread pooling for callback invocation.
- * Approach 3: 1 + 2.
- */
-typedef void (*npuOutputNotify) (output_buffers *output, uint64_t sequence,
+ * output buffer (output) when its type is BUFFER_MAPPED.
+ */
+typedef void (*npuOutputNotify) (output_buffers *output, int req_id,
void *data);
+/**
+ * @brief deprecated mode.
+ */
typedef enum {
NPUASYNC_DROP_OLD, /**< If there is an unprocessed input data
in the double buffer, overwrite it */
} npu_async_mode;
/**
+ * @brief NPU model inference mode (used in runNPU_model)
+ */
+typedef enum {
+ NPU_INFER_BLOCKING = 0, /**< Blocking. Wait for NPU to finish inference */
+ NPU_INFER_NON_BLOCKING, /**< Non-blocking. Invoke a callback when available */
+} npu_infer_mode;
+
+/**
* @brief Description of priority for NPU inference requests
* @details NPU Engine currently supports three priorities; low, mid, and high.
* requests with higher priority are always handled preferentially than
npuConstraint constraint);
/**
- * @brief Execute inference. Wait (block) until the output is available.
+ * @brief Execute inference.
* @param[in] dev The NPU device handle
- * @param[in] model_id The model to be inferred.
+ * @param[in] model_id The model id to be inferred
+ * @param[in] mode Configures how this inference works.
+ * @param[in] input The input data to be inferred.
+ * @param[in/out] [nullable] output The output data to be filled in.
+ * @param[in] [nullable] cb The output callback handler
+ * @param[in] [nullable] data The data to pass to callback handler
+ * @return @c positive id if no error. otherwise a negative error value
+ * @note This API allows for users to use pre-allocated (dmabuf) input/output buffers
+ * to avoid unnecessary memcpy. Make sure that they have 'BUFFER_DMABUF' types.
+ */
+int runNPU_model (npudev_h dev, uint32_t model_id, npu_infer_mode mode,
+ const input_buffers *input, output_buffers *output,
+ npuOutputNotify cb, void *data);
+
+/**
+ * @brief Execute inference. Blocking call (wait until output is available).
+ * @param[in] dev The NPU device handle
+ * @param[in] model_id The model id to be inferred
* @param[in] input The input data to be inferred.
- * @param[out] output The output result. The caller MUST allocate appropriately before calling this.
+ * @param[out] output The output result to be filled.
* @return @c positive id if no error. otherwise a negative error value
*
- * @detail This is a syntactic sugar of runNPU_async().
- * CAUTION: There is a memcpy for the output buffer.
+ * @note This is a syntactic sugar of runNPU_model() but deprecated.
+ * Please use runNPU_model().
+ * @detail There is a memcpy for the output buffer.
*/
int runNPU_sync (npudev_h dev, uint32_t model_id, const input_buffers *input,
output_buffers *output);
/**
+ * @brief Invoke NPU inference. Unblocking call.
+ * @param[in] dev The NPU device handle
+ * @param[in] model_id The model id to be inferred
+ * @param[in] input The input data to be inferred.
+ * @param[in] [nullable] cb The output callback handler
+ * @param[out] [nullable] sequence The sequence number (deprecated).
+ * @param[in] [nullable] data The data to pass to callback handler
+ * @param[in] mode Configures how this operation works (deprecated).
+ * @return @c positive id if no error. otherwise a negative error value
+ *
+ * @note This is a syntactic sugar of runNPU_model() but deprecated.
+ * Please use runNPU_model().
+ * @detail There is a memcpy for the output buffer.
+ */
+int runNPU_async (npudev_h dev, uint32_t model_id, const input_buffers *input,
+ npuOutputNotify cb, uint64_t *sequence, void *data,
+ npu_async_mode mode);
+
+/**
* @brief Let NPU accept input frames from its internal source continuously
* @param[in] dev The NPU device handle
* @param[in] model_id The model to be inferred.
int stopNPU_internalInput (npudev_h dev, int id);
/**
- * @brief Invoke NPU inference. Unblocking call.
- * @param[in] dev The NPU device handle
- * @param[in] model_id The model to be inferred.
- * @param[in] input The input data to be inferred.
- * @param[in] cb The output buffer handler.
- * @param[out] sequence The sequence number returned with runNPU_async.
- * @param[in] data The data given as a parameter to the runNPU_async call.
- * @param[in] mode Configures how this operation works.
- * @return @c positive id if no error. otherwise a negative error value
- */
-int runNPU_async (npudev_h dev, uint32_t model_id, const input_buffers *input,
- npuOutputNotify cb, uint64_t *sequence, void *data,
- npu_async_mode mode);
-
-/**
* @brief get the current memory status for the given device
* @param[in] dev The NPU device handle
* @param[out] alloc_total The size of allocated memory until now
public:
callbackSync (output_buffers *output) : output_ (output), done_ (false) {}
- static void callback (output_buffers *output, uint64_t sequence, void *data) {
+ static void callback (output_buffers *output, int req_id, void *data) {
callbackSync *sync = static_cast<callbackSync *> (data);
- sync->callback (output, sequence);
+ sync->callback (output, req_id);
}
- void callback (output_buffers *output, uint64_t sequence) {
+ void callback (output_buffers *output, int req_id) {
if (output_ != nullptr && output != nullptr) {
/** just copy internal variables of output buffers */
memcpy (output_, output, sizeof (output_buffers));
};
/**
- * @brief Execute inference. Wait (block) until the output is available.
- * @param[in] modelid The model to be inferred.
- * @param[in] input The input data to be inferred.
- * @param[out] output The output result.
- * @return @c positive id if no error. otherwise a negative error value
- */
-int
-HostHandler::runSync (uint32_t modelid, const input_buffers *input,
- output_buffers *output) {
- callbackSync sync (output);
- int status =
- runAsync (modelid, input, callbackSync::callback,
- static_cast<void *> (&sync), NPUASYNC_DROP_OLD, nullptr);
- if (status > 0) {
- /** sync needs to wait callback */
- sync.wait ();
- }
- return status;
-}
-
-/**
- * @brief Invoke NPU inference. Unblocking call.
- * @param[in] modelid The model to be inferred.
+ * @brief Execute inference.
+ * @param[in] model_id The model id to be inferred
+ * @param[in] mode Configures how this inference works.
* @param[in] input The input data to be inferred.
- * @param[in] cb The output buffer handler.
- * @param[in] cb_data The data given as a parameter to the runNPU_async call.
- * @param[in] mode Configures how this operation works.
- * @param[out] sequence The sequence number returned with runNPU_async.
+ * @param[in/out] [nullable] output The output data to be filled in.
+ * @param[in] [nullable] cb The output callback handler
+ * @param[in] [nullable] data The idata to pass to callback handler
* @return @c positive id if no error. otherwise a negative error value
*/
int
-HostHandler::runAsync (uint32_t modelid, const input_buffers *input,
- npuOutputNotify cb, void *cb_data, npu_async_mode mode,
- uint64_t *sequence) {
+HostHandler::runModel (uint32_t modelid, npu_infer_mode mode,
+ const input_buffers *input, output_buffers *output,
+ npuOutputNotify cb, void *data) {
Model *model = nullptr;
+ int req_id;
if (device_->needModel ()) {
model = getModel (modelid);
return -EINVAL;
}
- device_->setAsyncMode (mode);
+ switch (mode) {
+ case NPU_INFER_BLOCKING: {
+ callbackSync sync (output);
+ req_id =
+ device_->run (NPUINPUT_HOST, model, input, output,
+ callbackSync::callback, static_cast<void *> (&sync));
+ if (req_id > 0)
+ sync.wait ();
+ } break;
+ case NPU_INFER_NON_BLOCKING:
+ req_id = device_->run (NPUINPUT_HOST, model, input, output, cb, data);
+ break;
+ default:
+ return -EINVAL;
+ }
- int req_id =
- device_->run (NPUINPUT_HOST, model, input, cb, cb_data, sequence);
if (req_id > 0)
profiler_->appendRequest (req_id, model);
/** @brief implementation of TRIV2's run() */
int
TrinityVision2::run (npu_input_opmode opmode, const Model *model,
- const input_buffers *input, npuOutputNotify cb,
- void *cb_data, uint64_t *sequence) {
+ const input_buffers *input, output_buffers *output,
+ npuOutputNotify cb, void *cb_data) {
if (!initialized ()) {
logerr (TAG, "Uninitialized device; should use libnpuhost APIs\n");
return -EPERM;
const_cast<Model *> (model)->updateDataInfo ();
/** this device uses segment table */
- SegmentTable *segt = prepareSegmentTable (model, input);
+ SegmentTable *segt = prepareSegmentTable (model, input, output);
if (segt == nullptr) {
logerr (TAG, "Failed to create segment table instance\n");
return -EINVAL;
req->setCallback (
std::bind (&TrinityVision2::callback, this, req, cb, cb_data));
- if (sequence && req->getID () > 0) {
- *sequence = (uint32_t) req->getID ();
- }
-
return scheduler_->submitRequest (req);
}
for (uint32_t idx = 0; idx < output.num_buffers; idx++) {
uint32_t output_tensor_size = model->getOutputTensorSize (idx);
-
- output.bufs[idx].type = BUFFER_MAPPED;
- output.bufs[idx].size = output_tensor_size;
- /** user needs to free this */
- output.bufs[idx].addr = calloc (1, output_tensor_size);
+ HWmem *output_segment = segt->getOutputSegment (idx);
+
+ if (output_segment->isExternal ()) {
+ output.bufs[idx].type = BUFFER_DMABUF;
+ output.bufs[idx].size = output_segment->getSize ();
+ output.bufs[idx].addr = output_segment->getData ();
+ output.bufs[idx].dmabuf = output_segment->getDmabuf ();
+ output.bufs[idx].offset = output_segment->getOffset ();
+ } else {
+ output.bufs[idx].type = BUFFER_MAPPED;
+ output.bufs[idx].size = output_tensor_size;
+ /** user needs to free this */
+ output.bufs[idx].addr = calloc (1, output_tensor_size);
#if defined(ENABLE_FPGA_WORKAROUND)
- api_->fpga_memcpy (segt->getOutputSegment (idx)->getDmabuf (),
- segt->getOutputSegmentOffset (idx),
- output.bufs[idx].addr, output.bufs[idx].size);
+ api_->fpga_memcpy (output_segment->getDmabuf (),
+ segt->getOutputSegmentOffset (idx),
+ output.bufs[idx].addr, output.bufs[idx].size);
#else
- auto func = std::bind (TrinityVision2::manipulateData, model, idx, false,
- std::placeholders::_1, std::placeholders::_2,
- std::placeholders::_3);
- int status =
- comm_.insertGenericBuffer (segt->getOutputSegment (idx)->getData () +
- segt->getOutputSegmentOffset (idx),
- &output.bufs[idx], func);
+ auto func = std::bind (TrinityVision2::manipulateData, model, idx, false,
+ std::placeholders::_1, std::placeholders::_2,
+ std::placeholders::_3);
+ int status =
+ comm_.insertGenericBuffer (segt->getOutputSegment (idx)->getData () +
+ segt->getOutputSegmentOffset (idx),
+ &output.bufs[idx], func);
- if (status != 0) {
- logerr (TAG, "Failed to return output buffer: %d\n", status);
- }
+ if (status != 0) {
+ logerr (TAG, "Failed to return output buffer: %d\n", status);
+ }
#endif
+ }
}
cb (&output, req->getID (), cb_data);
int deallocGenericBuffer (generic_buffer *buffer);
int deallocGenericBuffer (generic_buffers *buffers);
- int runSync (uint32_t modelid, const input_buffers *input,
- output_buffers *output = nullptr);
- int runAsync (uint32_t modelid, const input_buffers *input,
- npuOutputNotify cb = nullptr, void *cb_data = nullptr,
- npu_async_mode mode = NPUASYNC_WAIT,
- uint64_t *sequence = nullptr);
+ int runModel (uint32_t modelid, npu_infer_mode mode,
+ const input_buffers *input, output_buffers *output = nullptr,
+ npuOutputNotify cb = nullptr, void *data = nullptr);
int runInternal (uint32_t modelid, npu_input_opmode opmode,
std::string hw_dev);
int stopInternal (int id);
}
virtual int run (npu_input_opmode opmode, const Model *model,
- const input_buffers *input, npuOutputNotify cb = nullptr,
- void *cb_data = nullptr, uint64_t *sequence = nullptr) = 0;
+ const input_buffers *input, output_buffers *output = nullptr,
+ npuOutputNotify cb = nullptr, void *cb_data = nullptr) = 0;
virtual int runInternal (npu_input_opmode opmode, const Model *model,
std::string hw_dev) {
uint32_t *size);
int run (npu_input_opmode opmode, const Model *model,
- const input_buffers *input, npuOutputNotify cb = nullptr,
- void *cb_data = nullptr, uint64_t *sequence = nullptr);
+ const input_buffers *input, output_buffers *output = nullptr,
+ npuOutputNotify cb = nullptr, void *cb_data = nullptr);
int runInternal (npu_input_opmode opmode, const Model *model,
std::string hw_dev);
const output_buffers *output) {
const Metadata *meta = model->getMetadata ();
- if (output == nullptr)
+ if (output == nullptr || output->num_buffers == 0)
return 0;
for (uint32_t i = 0; i < meta->getOutputNum (); i++) {
}
/**
- * @brief Execute inference. Wait (block) until the output is available.
+ * @brief Execute inference.
* @param[in] dev The NPU device handle
- * @param[in] modelid The model to be inferred.
+ * @param[in] model_id The model id to be inferred
+ * @param[in] mode Configures how this inference works.
* @param[in] input The input data to be inferred.
- * @param[out] output The output result. The caller MUST allocate appropriately before calling this.
- * @return @c 0 if no error. otherwise a negative error value
+ * @param[in/out] [nullable] output The output data to be filled in.
+ * @param[in] [nullable] cb The output callback handler
+ * @param[in] [nullable] data The data to pass to callback handler
+ * @return @c positive id if no error. otherwise a negative error value
+ * @note This API allows for users to use pre-allocated (dmabuf) input/output buffers
+ * to avoid unnecessary memcpy. Make sure that they have 'BUFFER_DMABUF' types.
+ */
+int
+runNPU_model (npudev_h dev, uint32_t modelid, npu_infer_mode mode,
+ const input_buffers *input, output_buffers *output,
+ npuOutputNotify cb, void *data) {
+ INIT_HOST_HANDLER (host_handler, dev);
+
+ return host_handler->runModel (modelid, mode, input, output, cb, data);
+}
+
+/**
+ * @brief Execute inference. Blocking call (wait until output is available).
+ * @param[in] dev The NPU device handle
+ * @param[in] model_id The model id to be inferred
+ * @param[in] input The input data to be inferred.
+ * @param[out] output The output result to be filled.
+ * @return @c positive id if no error. otherwise a negative error value
*
- * @detail This is a syntactic sugar of runNPU_async().
- * CAUTION: There is a memcpy for the output buffer.
+ * @note This is a syntactic sugar of runNPU_model() but deprecated.
+ * Please use runNPU_model().
+ * @detail There is a memcpy for the output buffer.
*/
int
runNPU_sync (npudev_h dev, uint32_t modelid, const input_buffers *input,
output_buffers *output) {
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->runSync (modelid, input, output);
+ /* runNPU_sync() assumes that output_buffers is filled by npu-engine */
+ memset (output, '\x00', sizeof (output_buffers));
+ return runNPU_model (dev, modelid, NPU_INFER_BLOCKING, input, output, nullptr,
+ nullptr);
}
/**
* @brief Invoke NPU inference. Unblocking call.
* @param[in] dev The NPU device handle
- * @param[in] modelid The model to be inferred.
+ * @param[in] model_id The model id to be inferred
* @param[in] input The input data to be inferred.
- * @param[in] cb The output buffer handler.
- * @param[out] sequence The sequence number returned with runNPU_async.
- * @param[in] data The data given as a parameter to the runNPU_async call.
- * @param[in] mode Configures how this operation works.
- * @return @c 0 if no error. otherwise a negative error value
+ * @param[in] [nullable] cb The output callback handler
+ * @param[out] [nullable] sequence The sequence number (deprecated).
+ * @param[in] [nullable] data The data to pass to callback handler
+ * @param[in] mode Configures how this operation works (deprecated).
+ * @return @c positive id if no error. otherwise a negative error value
+ *
+ * @note This is a syntactic sugar of runNPU_model() but deprecated.
+ * Please use runNPU_model().
+ * @detail There is a memcpy for the output buffer.
*/
int
runNPU_async (npudev_h dev, uint32_t modelid, const input_buffers *input,
npuOutputNotify cb, uint64_t *sequence, void *data,
npu_async_mode mode) {
- INIT_HOST_HANDLER (host_handler, dev);
-
- return host_handler->runAsync (modelid, input, cb, data, mode, sequence);
+ return runNPU_model (dev, modelid, NPU_INFER_NON_BLOCKING, input, nullptr, cb,
+ data);
}
/**
return 0;
}
- static void callback (output_buffers *output, uint64_t sequence, void *data) {
+ static void callback (output_buffers *output, int req_id, void *data) {
TesterTRIV2 *tester = static_cast<TesterTRIV2 *> (data);
tester->compare_result (output);
} user_cb_data;
static void
-user_cb (output_buffers *output, uint64_t sequence, void *data) {
+user_cb (output_buffers *output, int req_id, void *data) {
user_cb_data *cb_data = static_cast<user_cb_data *> (data);
std::unique_lock<std::mutex> lock (cb_data->m);
cb_data->num_called++;
user_cb_data data;
data.num_called = 0;
- EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data), 0);
- EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data), 0);
- EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data), 0);
-
- wait_callbacks (data.num_called, max_called, data.m, data.cv);
- EXPECT_EQ (data.num_called, max_called);
-
- /** with sequence */
- uint64_t sequence;
- data.num_called = 0;
-
- EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data, &sequence),
+ EXPECT_GT (device->run (opmode, model, &input_buf, nullptr, user_cb, &data),
0);
- EXPECT_EQ (sequence, (uint64_t) 7); /* 7th run request */
- EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data, &sequence),
+ EXPECT_GT (device->run (opmode, model, &input_buf, nullptr, user_cb, &data),
0);
- EXPECT_EQ (sequence, (uint64_t) 8);
- EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data, &sequence),
+ EXPECT_GT (device->run (opmode, model, &input_buf, nullptr, user_cb, &data),
0);
- EXPECT_EQ (sequence, (uint64_t) 9);
wait_callbacks (data.num_called, max_called, data.m, data.cv);
EXPECT_EQ (data.num_called, max_called);
user_cb_data data;
data.num_called = 0;
- EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data), 0);
- EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data), 0);
- EXPECT_GT (device->run (opmode, model, &input_buf, user_cb, &data), 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf, nullptr, user_cb, &data),
+ 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf, nullptr, user_cb, &data),
+ 0);
+ EXPECT_GT (device->run (opmode, model, &input_buf, nullptr, user_cb, &data),
+ 0);
EXPECT_EQ (device->stop (false),
0); /* wait until all requests are resolved */
usleep (TEST_SLEEP_MS); /** TODO remove this when stop() is implemented */
}
/**
- * @brief test HostHandler's runSync ()
+ * @brief test HostHandler's runModel (sync)
*/
TEST (ne_core_handler_test, handler_triv2_run_sync) {
std::unique_ptr<Device> device (
EXPECT_EQ (handler->allocGenericBuffer (&input), 0);
- /** runSync from host handler */
+ /** runModel (sync) from host handler */
output_buffers output;
output.num_buffers = 0;
- EXPECT_GT (handler->runSync (modelid, &input), 0);
- EXPECT_GT (handler->runSync (modelid, &input, &output), 0);
+ npu_infer_mode mode = NPU_INFER_BLOCKING;
+ EXPECT_GE (handler->runModel (modelid, mode, &input), 0);
+ EXPECT_GE (handler->runModel (modelid, mode, &input, &output), 0);
EXPECT_EQ (handler->unregisterModels (), 0);
EXPECT_EQ (handler->deallocGenericBuffer (&input), 0);
}
/**
- * @brief test HostHandler's runSync () with error handling
+ * @brief test HostHandler's runModel (sync) with error handling
*/
-TEST (ne_core_handler_test, handler_triv2_run_sync_n) {
+TEST (ne_core_handler_test, handler_triv2_run_model_n) {
std::unique_ptr<Device> device (
Device::createInstance (NPUCOND_TRIV2_CONN_SOCIP, 0));
ASSERT_NE (device.get (), nullptr);
output.num_buffers = 0;
/** TRIV2 always requires model and input buffers */
- EXPECT_LT (handler->runSync (modelid, nullptr), 0);
- EXPECT_LT (handler->runSync (modelid + 1, &input), 0);
- EXPECT_LT (handler->runSync (modelid + 1, &input, &output), 0);
+ npu_infer_mode mode = NPU_INFER_BLOCKING;
+ EXPECT_LT (handler->runModel (modelid, mode, nullptr), 0);
+ EXPECT_LT (handler->runModel (modelid + 1, mode, &input), 0);
+ EXPECT_LT (handler->runModel (modelid + 1, mode, &input, &output), 0);
EXPECT_EQ (handler->unregisterModels (), 0);
EXPECT_EQ (handler->deallocGenericBuffer (&input), 0);
}
/**
- * @brief test HostHandler's runAsync ()
+ * @brief test HostHandler's runModel (async)
*/
TEST (ne_core_handler_test, handler_triv2_run_async) {
std::unique_ptr<Device> device (
EXPECT_EQ (handler->allocGenericBuffer (&input), 0);
- /** runAsync from host handler */
- EXPECT_GT (handler->runAsync (modelid, &input), 0);
- EXPECT_GT (handler->runAsync (modelid, &input), 0);
- EXPECT_GT (handler->runAsync (modelid, &input), 0);
+ /** runModel (async) from host handler */
+ npu_infer_mode mode = NPU_INFER_NON_BLOCKING;
+ EXPECT_GE (handler->runModel (modelid, mode, &input), 0);
+ EXPECT_GE (handler->runModel (modelid, mode, &input), 0);
+ EXPECT_GE (handler->runModel (modelid, mode, &input), 0);
usleep (TEST_SLEEP_MS);
}
/**
- * @brief test HostHandler's runAsync () with error handling
+ * @brief test HostHandler's runModel (async) with error handling
*/
TEST (ne_core_handler_test, handler_triv2_run_async_n) {
std::unique_ptr<Device> device (
EXPECT_EQ (handler->allocGenericBuffer (&input), 0);
/** TRIV2 always requires model and input buffers */
- EXPECT_LT (handler->runAsync (modelid, nullptr), 0);
- EXPECT_LT (handler->runAsync (modelid + 1, &input), 0);
+ npu_infer_mode mode = NPU_INFER_NON_BLOCKING;
+ EXPECT_LT (handler->runModel (modelid, mode, nullptr), 0);
+ EXPECT_LT (handler->runModel (modelid + 1, mode, &input), 0);
EXPECT_EQ (handler->unregisterModels (), 0);
EXPECT_EQ (handler->deallocGenericBuffer (&input), 0);
/** @brief callback with verification */
void
-UtilTrinity::callbackVerify (output_buffers *output, uint64_t sequence,
- void *data) {
+UtilTrinity::callbackVerify (output_buffers *output, int req_id, void *data) {
bool success = true;
UtilModel *model = static_cast<UtilModel *> (data);
/** @brief callback without verification */
void
-UtilTrinity::callback (output_buffers *output, uint64_t sequence, void *data) {
+UtilTrinity::callback (output_buffers *output, int req_id, void *data) {
for (uint32_t idx = 0; idx < output->num_buffers; idx++)
free (output->bufs[idx].addr);
UtilModel *findModel (uint32_t model_id);
- static void callbackVerify (output_buffers *output, uint64_t sequence,
- void *data);
- static void callback (output_buffers *output, uint64_t sequence, void *data);
+ static void callbackVerify (output_buffers *output, int req_id, void *data);
+ static void callback (output_buffers *output, int req_id, void *data);
uint32_t wait ();