This commit revises unit/apptests to sync with updated interfaces.
Signed-off-by: Dongju Chae <dongju.chae@samsung.com>
/** @todo add more */
} npuConstraint;
-#define DEFAULT_TIMEOUT (1000) /**< default timeout, 1000ms */
+#define DEFAULT_TIMEOUT (3000) /**< default timeout, 3000ms */
#define DEFAULT_PRIORITY (NPU_PRIORITY_MID)
/**
#define MAX_THREADS (8)
#endif
-/** Default issue timeout in ms */
-#define DEFAULT_TIMEOUT (1000)
-
/** Get the host inservice element */
#define HOST_PRIV(INPUT_SERVICE) ((host_priv_t *) INPUT_SERVICE->pdata)
#define TEST_WAIT() pthread_cond_wait(&priv.cond, &priv.mutex)
typedef struct {
- uint64_t cur_sequence;
+ uint32_t num_callbacks;
pthread_mutex_t mutex;
pthread_cond_t cond;
- int result;
} test_priv;
#if defined(ENABLE_EMUL)
{
TEST_LOCK();
- /** as async mode is NPUASYNC_WAIT, sequnce number should be matched */
- if (sequence != ++priv.cur_sequence)
- priv.result = -1;
+ ++priv.num_callbacks;
free (output);
if ((err = registerNPUmodel(dev, model, &model_id)) != 0)
goto out_free_all;
- priv.cur_sequence = 0;
- priv.result = 0;
+ priv.num_callbacks = 0;
/** run NPU inference */
for (i = 0; i < num_tests; i++) {
/** wait until all callbacks are called */
TEST_LOCK();
- while (priv.cur_sequence != num_tests && priv.result != -1)
+ while (priv.num_callbacks != num_tests)
TEST_WAIT();
- err = priv.result;
+ err = 0;
TEST_UNLOCK();
out_unregister:
ASSERT_EQ (ret, RET_SUCCESS);
set_default_val_to_meta (&meta, "DummyModel");
+#ifdef ENABLE_BUFfERING
/* Try a huge size of memory (set SIZE_MAX to buffer_size) */
meta.buffer_size = SIZE_MAX;
ret = -1;
model = NULL;
}
ASSERT_NE (ret, RET_SUCCESS);
+#endif
/* Try a huge size of memory (almost UINT32_MAX) */
set_default_val_to_meta (&meta, "DummyModel");
EXPECT_EQ (host_handle->setOpMode(NPUINPUT_HOST, true, id[1], version[1],
test_cb, ptr[1]), 0);
+#ifdef ENABLE_BUFFERING
GET_MEM()->resize_buffers(NPUBIN_META_SIZE * 2);
buffer_ptr = host_handle->getCurrentInputBuffer(NPUASYNC_WAIT, &err);
EXPECT_EQ (err, 0);
- EXPECT_EQ (host_handle->validateBuffer(buffer_ptr, DEFAULT_PRIORITY,
- get_timestamp(DEFAULT_TIMEOUT)), 0);
+#else
+ buffer_ptr = GET_MEM()->create_buffer(NPUBIN_META_SIZE * 2, &err);
+ EXPECT_EQ (err, 0);
+#endif
+ EXPECT_EQ (host_handle->validateBuffer(buffer_ptr, NPU_PRIORITY_LOW, DEFAULT_TIMEOUT), 0);
sleep (2);
+
/** stop the model1 */
EXPECT_EQ (host_handle->setOpMode(NPUINPUT_STOP, true, id[1], version[1],
test_cb, ptr[1]), 0);
}
/* case 1: no data manipulation (depth == 64) */
- GET_MEM()->resize_buffers(2 * num_tensors * size_tensor);
-
for (uint32_t i = 0; i < num_tensors; i++) {
/* let's assume those tensors are 'size_tensor' apart from each other */
meta.input_offsets[i] = 2 * i * size_tensor;
meta.input_dims[i][3] = 64;
}
+#ifdef ENABLE_BUFFERING
+ GET_MEM()->resize_buffers(2 * num_tensors * size_tensor);
buffer_ptr = host_handle->getCurrentInputBuffer(NPUASYNC_WAIT, &err);
+#else
+ buffer_ptr = GET_MEM()->create_buffer (2 * num_tensors * size_tensor, &err);
+#endif
EXPECT_EQ (err, 0);
ASSERT_EQ (host_handle->feedInputBuffer (&meta, &info, &input, buffer_ptr), 0);
GET_MEM()->reset_buffer(buffer_ptr);
/* case 2: data manipulation (depth == 16) */
- GET_MEM()->resize_buffers(2 * 4 * num_tensors * size_tensor);
-
for (uint32_t i = 0; i < num_tensors; i++) {
/* let's assume those tensors are 'size_tensor' apart from each other */
meta.input_offsets[i] = 2 * 4 * i * size_tensor;
meta.input_dims[i][3] = 16;
}
+#ifdef ENABLE_BUFFERING
+ GET_MEM()->resize_buffers(2 * 4 * num_tensors * size_tensor);
buffer_ptr = host_handle->getCurrentInputBuffer(NPUASYNC_WAIT, &err);
+#else
+ buffer_ptr = GET_MEM()->create_buffer (2 * 4 * num_tensors * size_tensor, &err);
+#endif
EXPECT_EQ (err, 0);
ASSERT_EQ (host_handle->feedInputBuffer (&meta, &info, &input, buffer_ptr), 0);
GET_MEM()->reset_buffer(buffer_ptr);
/* case 3: data manipulation (depth == 32) */
- GET_MEM()->resize_buffers(2 * 2 * num_tensors * size_tensor);
-
for (uint32_t i = 0; i < num_tensors; i++) {
/* let's assume those tensors are 'size_tensor' apart from each other */
meta.input_offsets[i] = 2 * 2 * i * size_tensor;
meta.input_dims[i][3] = 32;
}
+#ifdef ENABLE_BUFFERING
+ GET_MEM()->resize_buffers(2 * 2 * num_tensors * size_tensor);
buffer_ptr = host_handle->getCurrentInputBuffer(NPUASYNC_WAIT, &err);
+#else
+ buffer_ptr = GET_MEM()->create_buffer (2 * 2 * num_tensors * size_tensor, &err);
+#endif
EXPECT_EQ (err, 0);
ASSERT_EQ (host_handle->feedInputBuffer (&meta, &info, &input, buffer_ptr), 0);
GET_MEM()->reset_buffer(buffer_ptr);
/* case 4: data manipulation (depth == 128) */
- GET_MEM()->resize_buffers(2 * num_tensors * size_tensor);
-
for (uint32_t i = 0; i < num_tensors; i++) {
/* let's assume those tensors are 'size_tensor' apart from each other */
meta.input_offsets[i] = 2 * i * size_tensor;
meta.input_dims[i][3] = 128;
}
+#ifdef ENABLE_BUFFERING
+ GET_MEM()->resize_buffers(2 * num_tensors * size_tensor);
buffer_ptr = host_handle->getCurrentInputBuffer(NPUASYNC_WAIT, &err);
+#else
+ buffer_ptr = GET_MEM()->create_buffer (2 * num_tensors * size_tensor, &err);
+#endif
EXPECT_EQ (err, 0);
ASSERT_EQ (host_handle->feedInputBuffer (&meta, &info, &input, buffer_ptr), 0);
}
/* case 1: no data manipulation (depth == 64) */
- GET_MEM()->resize_buffers(2 * num_tensors * size_tensor);
-
for (uint32_t i = 0; i < num_tensors; i++) {
/* let's assume those tensors are 'size_tensor' apart from each other */
meta.input_offsets[i] = 2 * i * size_tensor;
meta.input_quant_z[i] = 127;
}
+#ifdef ENABLE_BUFFERING
+ GET_MEM()->resize_buffers(2 * num_tensors * size_tensor);
buffer_ptr = host_handle->getCurrentInputBuffer(NPUASYNC_WAIT, &err);
+#else
+ buffer_ptr = GET_MEM()->create_buffer (2 * num_tensors * size_tensor, &err);
+#endif
EXPECT_EQ (err, 0);
ASSERT_EQ (host_handle->feedInputBuffer (&meta, &info, &input, buffer_ptr), 0);
GET_MEM()->reset_buffer(buffer_ptr);
/* case 2: data manipulation (depth == 32) */
- GET_MEM()->resize_buffers(2 * 2 * num_tensors * size_tensor);
-
for (uint32_t i = 0; i < num_tensors; i++) {
/* let's assume those tensors are 'size_tensor' apart from each other */
meta.input_offsets[i] = 2 * 2 * i * size_tensor;
meta.input_quant_z[i] = 127;
}
+#ifdef ENABLE_BUFFERING
+ GET_MEM()->resize_buffers(2 * 2 * num_tensors * size_tensor);
buffer_ptr = host_handle->getCurrentInputBuffer(NPUASYNC_WAIT, &err);
+#else
+ buffer_ptr = GET_MEM()->create_buffer (2 * 2 * num_tensors * size_tensor, &err);
+#endif
EXPECT_EQ (err, 0);
ASSERT_EQ (host_handle->feedInputBuffer (&meta, &info, &input, buffer_ptr), 0);
void test_cb (buffer *buf, void *data)
{
model *model_ptr;
- buffer *buffer_ptr;
- int err;
- buffer_state state;
EXPECT_NE (data, nullptr);
pthread_cond_broadcast (&cond);
pthread_mutex_unlock (&mutex);
+#ifdef ENABLE_BUFFERING
+ int err;
+ buffer_state state;
+ buffer *buffer_ptr;
+
buffer_ptr = GET_MEM()->get_next_buffer(NPUASYNC_WAIT, BUFFER_ROLE_OUTPUT, &err);
EXPECT_EQ (err, 0);
+ EXPECT_EQ (buf, buffer_ptr);
+
/** change to EMPTY */
EXPECT_EQ (GET_MEM()->return_buffer(buffer_ptr), 0);
state = buffer_get_state (buffer_ptr);
EXPECT_EQ (state, BUFFER_STATE_EMPTY);
+#else
+ GET_MEM()->reset_buffer(buf);
+#endif
}
/**
/** calling other functions without configure */
EXPECT_LT (n4_start (SMODEL_OPS_NPU), 0);
EXPECT_LT (n4_stop (SMODEL_OPS_NPU, STOP_PREEMPT), 0);
- EXPECT_LT (n4_dataReady(DEFAULT_PRIORITY, get_timestamp(DEFAULT_TIMEOUT)), 0);
+ EXPECT_LT (n4_dataReady(NULL, NPU_PRIORITY_LOW, DEFAULT_TIMEOUT), 0);
/** correct configuration */
EXPECT_EQ (n4_configure (SMODEL_OPS_NPU, model_ptr, test_cb, model_ptr), 0);
/** calling data ready without start */
EXPECT_LT (n4_stop (SMODEL_OPS_NPU, STOP_PREEMPT), 0);
- EXPECT_LT (n4_dataReady(DEFAULT_PRIORITY, get_timestamp(DEFAULT_TIMEOUT)), 0);
+ EXPECT_LT (n4_dataReady(NULL, NPU_PRIORITY_LOW, DEFAULT_TIMEOUT), 0);
/** starting wrong device */
EXPECT_LT (n4_start (SMODEL_OPS_END), 0);
/** correct start device */
EXPECT_EQ (n4_start (SMODEL_OPS_NPU), 0);
+#ifdef ENABLE_BUFFERING
/** setup data */
GET_MEM()->resize_buffers(NPUBIN_META_SIZE * 5);
buffer_ptr = GET_MEM()->get_next_buffer(NPUASYNC_WAIT, BUFFER_ROLE_INPUT, &err);
EXPECT_EQ (err, 0);
EXPECT_EQ (GET_MEM()->return_buffer(buffer_ptr), 0);
/** call n4_dataReady() here */
- EXPECT_EQ (n4_dataReady(DEFAULT_PRIORITY, get_timestamp(DEFAULT_TIMEOUT)), 0);
+ EXPECT_EQ (n4_dataReady(NULL, NPU_PRIORITY_LOW, DEFAULT_TIMEOUT), 0);
+#else
+ buffer_ptr = GET_MEM()->create_buffer(NPUBIN_META_SIZE * 5, &err);
+ EXPECT_EQ (err, 0);
+ EXPECT_EQ (n4_dataReady(buffer_ptr, NPU_PRIORITY_LOW, DEFAULT_TIMEOUT), 0);
+#endif
/** let the completion callback be called */
sleep(SLEEP_DURATION);
/** correct stopping device */
/** correct start device */
EXPECT_EQ (n4_start (SMODEL_OPS_NPU), 0);
+#ifdef ENABLE_BUFFERING
/** setup data */
GET_MEM()->resize_buffers(NPUBIN_META_SIZE * 4);
buffer_ptr = GET_MEM()->get_next_buffer(NPUASYNC_WAIT, BUFFER_ROLE_INPUT, &err);
EXPECT_EQ (err, 0);
EXPECT_EQ (GET_MEM()->return_buffer(buffer_ptr), 0);
/** call n4_dataReady() here */
- EXPECT_EQ (n4_dataReady(DEFAULT_PRIORITY, get_timestamp(DEFAULT_TIMEOUT)), 0);
+ EXPECT_EQ (n4_dataReady(NULL, NPU_PRIORITY_LOW, DEFAULT_TIMEOUT), 0);
+#else
+ buffer_ptr = GET_MEM()->create_buffer(NPUBIN_META_SIZE * 4, &err);
+ EXPECT_EQ (err, 0);
+ EXPECT_EQ (n4_dataReady(buffer_ptr, NPU_PRIORITY_LOW, DEFAULT_TIMEOUT), 0);
+#endif
+
/** completion callback to be called */
sleep(SLEEP_DURATION);
/** correct stopping device */
/** correct start device */
EXPECT_EQ (n4_start (SMODEL_OPS_NPU), 0);
+#ifdef ENABLE_BUFFERING
/** setup data */
GET_MEM()->resize_buffers(NPUBIN_META_SIZE * 3);
buffer_ptr = GET_MEM()->get_next_buffer(NPUASYNC_WAIT, BUFFER_ROLE_INPUT, &err);
EXPECT_EQ (GET_MEM()->return_buffer(buffer_ptr), 0);
/** call n4_dataReady() here */
- EXPECT_EQ (n4_dataReady(DEFAULT_PRIORITY, get_timestamp(DEFAULT_TIMEOUT)), 0);
+ EXPECT_EQ (n4_dataReady(NULL, NPU_PRIORITY_LOW, DEFAULT_TIMEOUT), 0);
+#else
+ buffer_ptr = GET_MEM()->create_buffer(NPUBIN_META_SIZE * 3, &err);
+ EXPECT_EQ (err, 0);
+ EXPECT_EQ (n4_dataReady(buffer_ptr, NPU_PRIORITY_LOW, DEFAULT_TIMEOUT), 0);
+#endif
+
/** wait for the device to stop and completion callback to be called */
sleep(SLEEP_DURATION);
/** correct stopping device */
/** correct start device */
EXPECT_EQ (n4_start (SMODEL_OPS_NPU), 0);
+#ifdef ENABLE_BUFFERING
GET_MEM()->resize_buffers(NPUBIN_META_SIZE * 2);
+#endif
pthread_mutex_lock (&mutex);
callback_count = 0;
pthread_mutex_unlock (&mutex);
for (idx = 0; idx < num_callbacks; idx ++) {
+#ifdef ENABLE_BUFFERING
/** setup data */
buffer_ptr = GET_MEM()->get_next_buffer(NPUASYNC_WAIT, BUFFER_ROLE_INPUT, &err);
EXPECT_EQ (err, 0);
EXPECT_EQ (GET_MEM()->return_buffer(buffer_ptr), 0);
/** call n4_dataReady() here */
- EXPECT_EQ (n4_dataReady(DEFAULT_PRIORITY, get_timestamp(DEFAULT_TIMEOUT)), 0);
+ EXPECT_EQ (n4_dataReady(NULL, NPU_PRIORITY_LOW, DEFAULT_TIMEOUT), 0);
+#else
+ buffer_ptr = GET_MEM()->create_buffer(NPUBIN_META_SIZE * 2, &err);
+ EXPECT_EQ (err, 0);
+ EXPECT_EQ (n4_dataReady(buffer_ptr, NPU_PRIORITY_LOW, DEFAULT_TIMEOUT), 0);
+#endif
}
pthread_mutex_lock (&mutex);
ASSERT_EQ (hwmem->size, slice_size * 7);
ASSERT_EQ (mem->get_used_size(), slice_size * 7);
+#ifdef ENABLE_BUFFERING
/* config I/O buffer size (>= 3x size for triple buffering) */
ASSERT_EQ (mem->resize_buffers (slice_size), 0);
ASSERT_EQ (mem->get_used_size(), slice_size * 7 + slice_size * 3);
ASSERT_NE (mem->resize_buffers (slice_size * 2), 0);
ASSERT_EQ (mem->resize_buffers (0), 0);
ASSERT_EQ (mem->get_used_size(), slice_size * 7);
+#endif
mem->dealloc (hwmem);
ASSERT_EQ (mem->get_used_size(), 0);
ASSERT_EQ (hwmem->size, slice_size * 7);
ASSERT_EQ (mem->get_used_size(), slice_size * 7);
+#ifdef ENABLE_BUFFERING
/* config I/O buffer size (>= 3x size for triple buffering) */
ASSERT_EQ (mem->resize_buffers (slice_size), 0);
ASSERT_EQ (mem->get_used_size(), slice_size * 7 + slice_size * 3);
ASSERT_EQ (mem->get_used_size(), slice_size * 7 + (slice_size / 2) * 3);
ASSERT_EQ (mem->resize_buffers (0), 0);
ASSERT_EQ (mem->get_used_size(), slice_size * 7);
+#endif
mem->dealloc (hwmem);
ASSERT_EQ (mem->get_used_size(), 0);
mem->init (conf->reserved_mem_size, &pool_size);
}
+#ifdef ENABLE_BUFFERING
/**
* @brief prepare input buffer
*/
pthread_join (thread[1], (void **)&status);
pthread_join (thread[2], (void **)&status);
}
+#endif
/**
* @brief test for memory compaction
mem->dealloc (hwmem[3]);
}
+#ifdef ENABLE_BUFFERING
/**
* @brief data struct to pass mode and sync primitive to helper thread */
typedef struct {
pthread_mutex_destroy (&mutex1);
}
+#endif
/**
* @brief main function for unit test