/**
* @brief memory chunk state
+ * @note here, 'modified' state means all kinds of behaviors (e.g., compaction, resize)
+ * which can change the offset and size of used chunks
*/
typedef enum {
- CHUNK_STATE_FREE, /**< it can be compacted or activated later */
- CHUNK_STATE_ACTIVATED, /**< it's activated by a user; it cannot be compacted */
- CHUNK_STATE_COMPACT, /**< it's being compacted */
+ CHUNK_STATE_FREE, /**< free state: this chunk can be compacted or activated later */
+ CHUNK_STATE_ACTIVATED, /**< activated state: this chunk is being used by a client */
+ CHUNK_STATE_MODIFIED, /**< modified state: this chunk is being modified */
} chunk_state;
/**
uint64_t offset; /**< chunk offset */
uint64_t size; /**< chunk size */
chunk_state state; /**< chunk state */
+
list_node list; /**< list element */
pthread_mutex_t mutex; /**< mutex for locking */
+ pthread_cond_t cond; /**< cond for broadcast */
} chunk;
/**
} hwmem_type;
/**
- * @brief private data structure for buffer
+ * @brief private data structure for hwmem
*/
typedef struct {
- chunk *chunk; /**< associated chunk */
- hwmem_type type; /**< hwmem type */
+ chunk *chunk; /**< associated chunk */
+ hwmem_type type; /**< hwmem type */
} hwmem_priv;
/**
* @brief private data structure for buffer
*/
typedef struct {
- hwmem *hwmem; /**< inherit hwmem */
- buffer_state state; /**< buffer state */
+ hwmem *hwmem; /**< inherit hwmem */
+ buffer_state state; /**< buffer state */
pthread_mutex_t mutex; /**< mutex for locking */
pthread_cond_t cond; /**< cond for broadcasting */
} buffer_priv;
* @brief set chunk state
* @param[in] target the target chunk
* @param[in] state the state to change
- * @return ture if successful
+ * @param[in] sync the flag to indicate whether it needs to wait if failed
+ * @return 0 if no error, otherwise a negative errno
*
- * @note it should be handled exclusively to prevent
- * race conditions for hwmem activation and memory compaction
+ * @note it should be handled exclusively to prevent race conditions
*/
-static bool
-chunk_set_state (chunk *target, chunk_state state)
+static int
+chunk_set_state (chunk *target, chunk_state state, bool sync)
{
+ int err = EINVAL;
+
if (!target)
- return false;
+ goto out;
pthread_mutex_lock (&target->mutex);
- if (target->state == CHUNK_STATE_FREE)
- target->state = state;
- pthread_mutex_unlock (&target->mutex);
- return target->state == state;
+ if (target->state == CHUNK_STATE_ACTIVATED ||
+ target->state == CHUNK_STATE_MODIFIED) {
+ if (state == CHUNK_STATE_FREE)
+ pthread_cond_broadcast (&target->cond);
+ else if (sync)
+ while (target->state != CHUNK_STATE_FREE)
+ pthread_cond_wait (&target->cond, &target->mutex);
+ else
+ goto out_unlock;
+ }
+
+ target->state = state;
+ err = 0;
+
+out_unlock:
+ pthread_mutex_unlock (&target->mutex);
+out:
+ return -err;
}
/**
/* control its state */
pthread_mutex_init (&new_chunk->mutex, NULL);
+ pthread_cond_init (&new_chunk->cond, NULL);
return new_chunk;
}
}
/**
- * @brief create a chunk with new size
+ * @brief create a chunk with new size
* @param[in] target the chunk instance
* @param[in] size size to be changed
* @return new chunk instance
+ * @note we should be very careful to resize a used chunk
+ * because chunk's state (e.g., offset) can be changed.
+ * So, we need to ensure that nobody is using this hwmem.
*/
static chunk*
chunk_resize (chunk *target, uint64_t size)
{
- chunk_state state;
+ chunk_state state;
assert (target != NULL);
assert (size > 0);
- /**
- * just return old chunk and alloc new chunk (to minimize fragmentations)
- * but, we need to restore its chunk state after allocation
- */
+ /* save its state for new chunk */
state = target->state;
+ /* wait until it has a free state */
+ chunk_set_state (target, CHUNK_STATE_MODIFIED, true);
+
+ /* just return old chunk and alloc new chunk (to minimize fragmentations) */
chunk_return (target);
target = chunk_alloc (size);
+ /* make it free */
+ chunk_set_state (target, CHUNK_STATE_FREE, true);
+
+ /* restore the state */
target->state = state;
return target;
/* find a target chunk starting at top */
list_for_each_entry_reverse (target, mpriv.used_chunks, list) {
/* should be not busy (i.e., no processing here) and other conditions (TBD)? */
- if (chunk_set_state (target, CHUNK_STATE_COMPACT)) {
+ if (chunk_set_state (target, CHUNK_STATE_MODIFIED, false) == 0) {
chunk *next_chunk, *free_chunk = NULL;
/* find a free chunk starting at bottom */
}
/* failure */
- assert (target->state == CHUNK_STATE_COMPACT);
+ assert (target->state == CHUNK_STATE_MODIFIED);
target->state = CHUNK_STATE_FREE;
}
}
target->offset = offset;
chunk_add (&mpriv.used_chunks, target);
- assert (target->state == CHUNK_STATE_COMPACT);
+ assert (target->state == CHUNK_STATE_MODIFIED);
target->state = CHUNK_STATE_FREE;
}
}
/**
- * @brief resize the buffer's hwmem
+ * @brief resize the buffer's hwmem (or create buffer instance)
* @param[in] buffer_p the pointer of buffer instance
* @param[in] size size to be changed
* @return true if successful
{
assert (buffer_p);
+ /* if size is zero, destroy buffer */
if (size == 0) {
- if (*buffer_p)
+ if (*buffer_p)
buffer_destroy (*buffer_p);
*buffer_p = NULL;
return true;
assert (priv);
return hwmem_resize (priv->hwmem, size);
- } else
+ } else /* empty buffer */
return ((*buffer_p = buffer_create (size)) != NULL);
}
}
}
/**
- * @brief resize the allocation of hwm
+ * @brief resize the allocation of hwmem
* @param[in] hwmem the created hwmem instance
* @param[in] size the new memory size
* @return 0 if no error, otherwise a negative error value
* @param[in] size new buffer size
* @return 0 if no error, otherwise a negative error value
*
- * @note this would be used only when a model is unregistered,
- * because the buffer size is the maximum size for all models.
+ * @note its latency can be increased due to the memory compaction.
*/
-static int
+ static int
mem_resize_buffers (uint64_t size)
{
int err = 0;
MEM_LOCK ();
- /* resize buffers if larger ones are required */
- if (size > mpriv.buffer_size) {
- bool first = false;
- int idx;
+ if (size != mpriv.buffer_size) {
+ int idx = 0;
- /* it's the first resize; we need to initialize buffers */
- if (mpriv.buffer_size == 0)
- first = true;
-
- for (idx = 0; idx < MEM_NUM_BUFFERS; idx++) {
+ for (; idx < MEM_NUM_BUFFERS; idx++) {
if (!buffer_resize (&mpriv.buffer[idx], size)) {
/* failed; rollback existing buffers */
for (idx = idx -1; idx >= 0; idx--)
goto out;
}
mpriv.buffer[idx]->size = size;
- if (first) {
- buffer_priv *priv = mpriv.buffer[idx]->priv;
-
- mpriv.buffer_head = 0;
- buffer_change_state (priv, BUFFER_STATE_EMPTY);
-
- /**
- * Theses buffers are always activated.
- * after buffer configuration, it can not be affected by compaction
- */
- hwmem_activate (priv->hwmem);
+ if (mpriv.buffer_size == 0) {
+ /* it's the first resize; we need to initialize buffers */
+ mpriv.buffer_idx[idx] = BUFFER_STATE_INVAL;
+ buffer_change_state (mpriv.buffer[idx]->priv, BUFFER_STATE_EMPTY);
}
}
/**
* @brief get the next I/O buffer dedicated to the requsted role.
* @param[in] role the role of buffer
+ * @param[out] err set with error-number if there is an error
* @return the next buffer if no error, otherwise NULL
*
* @note it needs to wait to get a valid buffer if another component is still using it.
*/
static buffer *
-mem_get_next_buffer (buffer_role role)
+mem_get_next_buffer (buffer_role role, int *err)
{
- buffer *buf = NULL;
+ buffer *buffer = NULL;
+ buffer_priv *priv;
- if (!(role >= BUFFER_ROLE_INPUT && role <= BUFFER_ROLE_OUTPUT))
+ if (!(role >= BUFFER_ROLE_INPUT && role <= BUFFER_ROLE_OUTPUT)) {
+ *err = -EINVAL;
goto out;
+ }
- buf = wait_until_available (role);
- if (buf) {
- buffer_priv *priv = buf->priv;
+ buffer = wait_until_available (role);
- hwmem_activate (priv->hwmem);
- }
+ assert (buffer);
+
+ priv = buffer->priv;
+ *err = hwmem_activate (priv->hwmem);
out:
- return buf;
+ return buffer;
}
/**
hwmem_activate (hwmem *hwmem)
{
hwmem_priv *priv;
-
- if (!hwmem || !hwmem->priv)
- RETURN_ERROR_MSG (EINVAL, "invalid hwmem; internal structure does not exist");
+
+ if (!hwmem || !hwmem->priv) {
+ logerr (MEM_TAG, "invalid hwmem; internal structure does not exist\n");
+ return -EINVAL;
+ }
priv = hwmem->priv;
assert (priv->chunk);
- if (priv->chunk->state != CHUNK_STATE_FREE ||
- !chunk_set_state (priv->chunk, CHUNK_STATE_ACTIVATED))
- RETURN_ERROR (EBUSY);
-
- return 0;
+ return chunk_set_state (priv->chunk, CHUNK_STATE_ACTIVATED, true);
}
/**
hwmem_deactivate (hwmem *hwmem)
{
hwmem_priv *priv;
-
- if (!hwmem || !hwmem->priv)
- RETURN_ERROR_MSG (EINVAL, "invalid hwmem; internal structure does not exist");
+
+ if (!hwmem || !hwmem->priv) {
+ logerr (MEM_TAG, "invalid hwmem; internal structure does not exist\n");
+ return -EINVAL;
+ }
priv = hwmem->priv;
assert (priv->chunk);
- if (priv->chunk->state != CHUNK_STATE_ACTIVATED)
- RETURN_ERROR_MSG (EINVAL, "invalid deactivation request");
-
- priv->chunk->state = CHUNK_STATE_FREE;
-
- return 0;
+ return chunk_set_state (priv->chunk, CHUNK_STATE_FREE, true);
}
/**