#define GEM_NAME "cgem" /* cma gem */
/**
- * @brief memory allocator's lock for private data
+ * @brief memory allocator's lock for private data
*/
#define MEM_LOCK() pthread_mutex_lock(&mpriv.mutex)
#define MEM_UNLOCK() pthread_mutex_unlock(&mpriv.mutex)
*/
typedef struct {
int fd; /**< file descriptor of a GEM driver */
- int dmabuf; /**< dmabuf fd handle */
+ int dmabuf; /**< dmabuf fd handle */
uint32_t handle; /**< GEM buffer object handle for memory pool */
uint64_t size; /**< memory pool size */
/**
* @brief create new chunk instance
* @param[in] offset offset
- * @param[in] size size
+ * @param[in] size size
* @return chunk instance, if no memory, NULL
*/
static chunk *
new_chunk->state = CHUNK_STATE_FREE;
/* control its state */
- pthread_mutex_init (&new_chunk->mutex, NULL);
+ pthread_mutex_init (&new_chunk->mutex, NULL);
return new_chunk;
}
list_for_each_entry (cur, mpriv.free_chunks, list) {
if (cur->size >= size) {
offset = cur->offset;
-
+
cur->offset += size;
cur->size -= size;
-
+
if (cur->size == 0) {
list_del (&mpriv.free_chunks, &cur->list);
free (cur);
}
-
+
break;
}
}
}
/**
- * @brief create a chunk with new size
+ * @brief create a chunk with new size
* @param[in] target the chunk instance
* @param[in] size size to be changed
* @return new chunk instance
static chunk*
chunk_resize (chunk *target, uint64_t size)
{
- chunk_state state;
+ chunk_state state;
assert (target != NULL);
assert (size > 0);
/**
- * just return old chunk and alloc new chunk (to minimize fragmentations)
+ * just return old chunk and alloc new chunk (to minimize fragmentations)
* but, we need to restore its chunk state after allocation
*/
- state = target->state;
-
+ state = target->state;
+
chunk_return (target);
target = chunk_alloc (size);
-
+
target->state = state;
return target;
list_for_each_entry (cur, mpriv.free_chunks, list) {
if (largest) {
- if (largest->size < cur->size)
+ if (largest->size < cur->size)
largest = cur;
- } else
+ } else
largest = cur;
}
* @brief memory compaction algorithm
* @note This memory compaction algorithm is similar to one implemented by Mel Gorman
* (refer to https://lwn.net/Articles/368869).
- *
+ *
* It first starts at TOP to find a used chunk which is not busy.
* then, starting at BOTTOM, it searchs the free chunk where the used chunk is migrated.
* After compaction, a newly created free chunk can be merged with prev or next chunks.
list_del (&mpriv.used_chunks, &target->list);
return target;
- } else
+ } else
/* case 2) this chunk is directly attached to the free chunk */
if (free_chunk->offset + free_chunk->size == target->offset) {
*offset_dst = target->offset - free_chunk->size;
/* try to perform compaction */
target = chunk_compact_try (&offset);
- /* no available free memory; stop compaction */
+ /* no available free memory; stop compaction */
if (target == NULL)
break;
/* apply compaction as we detected */
- memcpy ((void*) ((uint64_t) mpriv.virtaddr + offset),
+ memcpy ((void*) ((uint64_t) mpriv.virtaddr + offset),
(void*) ((uint64_t) mpriv.virtaddr + target->offset), target->size);
target->offset = offset;
return (chunk_get_largest() >= size);
}
-/* hwmem */
+/* hwmem */
/**
* @brief create new instance of hwmem (and use mmap() with a target offset)
assert (priv->chunk != NULL);
priv->chunk = chunk_resize (priv->chunk, size);
- if (priv->chunk)
+ if (priv->chunk)
hwmem->size = size;
}
return (hwmem->size == size);
}
-/* buffer */
+/* buffer */
/**
* @brief create new instance of I/O buffer
{
buffer * new_buffer = NULL;
hwmem * new_hwmem = hwmem_alloc (size, HWMEM_TYPE_BUFFER);
-
- if (!new_hwmem && chunk_compact (size))
+
+ if (!new_hwmem && chunk_compact (size))
/* try again after compaction */
new_hwmem = hwmem_alloc (size, HWMEM_TYPE_BUFFER);
if (new_hwmem) {
buffer_priv *priv = malloc (sizeof (buffer_priv));
-
+
priv->state = BUFFER_STATE_EMPTY;
priv->hwmem = new_hwmem;
assert (buffer->priv);
hwmem_free (((buffer_priv *) buffer->priv)->hwmem);
-
+
free (buffer->priv);
free (buffer);
}
assert (buffer_p);
if (size == 0) {
- if (*buffer_p)
+ if (*buffer_p)
buffer_destroy (*buffer_p);
*buffer_p = NULL;
return true;
assert (priv);
return hwmem_resize (priv->hwmem, size);
- } else
+ } else
return ((*buffer_p = buffer_create (size)) != NULL);
}
}
* @param[in] state the buffer state to be changed
* @return 0 if no error, otherwise a negative error value
*/
-static int
+static int
buffer_change_state (buffer_priv *priv, buffer_state state)
{
bool state_changed = false;
default:
break;
}
-
+
if (!state_changed)
RETURN_ERROR (EINVAL);
/**
* @brief initialize memory allocator and reserve its memory pool (physically contiguous).
- * @param[in] size_in the maximum memory size of memory pool
+ * @param[in] size_in the maximum memory size of memory pool
* @param[out] size_out the allocated size (i.e., zero means a failure)
* @return 0 if no error, otherwise a negative error value
*
do {
/**
- * the minimum size of allocation is MEM_BASE_SIZE
+ * the minimum size of allocation is MEM_BASE_SIZE
* note that gem_create() supports only 32bit allocation (currently)
*/
uint32_t size_in_32 = size_in;
if (mpriv.buffer[idx]) {
buffer_destroy (mpriv.buffer[idx]);
mpriv.buffer[idx] = NULL;
- mpriv.buffer_idx[idx] = 0;
}
}
mpriv.buffer_size = 0;
list_del (&mpriv.free_chunks, &chunk->list);
chunk_free (chunk);
}
-
+
munmap (mpriv.virtaddr, mpriv.size);
/* close dmabuf handle */
- close (mpriv.dmabuf);
+ close (mpriv.dmabuf);
/* release the gem buffer object handle */
gem_destroy (mpriv.fd, mpriv.handle);
* @param[in] size the requested memory size
* @param[out] hwmem_p if no error, otherwise NULL pointer
* @return 0 if no error, otherwise a negative error value
- *
+ *
* @note its latency can be increased due to the memory compaction.
*/
static int
new_hwmem = hwmem_alloc (size, HWMEM_TYPE_MODEL);
- if (!new_hwmem && chunk_compact (size))
+ if (!new_hwmem && chunk_compact (size))
/* try again after compaction */
new_hwmem = hwmem_alloc (size, HWMEM_TYPE_MODEL);
MEM_UNLOCK();
- if (new_hwmem) {
- *hwmem_p = new_hwmem;
+ if (new_hwmem) {
+ *hwmem_p = new_hwmem;
return 0;
- } else
+ } else
RETURN_ERROR (ENOMEM);
}
* @param[in] hwmem the created hwmem instance
* @param[in] size the new memory size
* @return 0 if no error, otherwise a negative error value
- *
+ *
* @note its latency can be increased due to the memory compaction.
*/
static int
mpriv.buffer_head = 0;
buffer_change_state (priv, BUFFER_STATE_EMPTY);
- /**
- * Theses buffers are always activated.
- * after buffer configuration, it can not be affected by compaction
+ /**
+ * Theses buffers are always activated.
+ * after buffer configuration, it can not be affected by compaction
*/
hwmem_activate (priv->hwmem);
}
/* wait until the buffer head becomes input ready */
while (buffer_get_state (buf) != BUFFER_STATE_INPUT_READY)
MEM_WAIT();
-
+
mpriv.buffer_head = (mpriv.buffer_head + 1) % MEM_NUM_BUFFERS;
-
+
/* notify buffer head is changed */
MEM_WAKEUP();
MEM_UNLOCK();
* @param[in] role the role of buffer
* @return the target buffer
*/
-static buffer*
+static buffer*
wait_until_available (buffer_role role)
{
buffer *buf = NULL;
out:
MEM_UNLOCK ();
- return buffer;
+ return buf;
}
/**
buf = wait_until_available (role);
if (buf) {
- buffer_priv *priv = buf->priv;
+ buffer_priv *priv = buf->priv;
hwmem_activate (priv->hwmem);
}
/**
* @brief return the buffer for next requesters
* @param[in] buffer the buffer instance
- * @return 0 if no error, otherwise negative values
+ * @return 0 if no error, otherwise negative values
*
* @note this should be called after finishing some processing with this buffer.
- * then, the caller does not use this buffer anymore, but its contents
+ * then, the caller does not use this buffer anymore, but its contents
* would be valid for other users.
*/
static int
MEM_UNLOCK();
/* deactivate buffer */
- if (err == 0)
+ if (err == 0)
hwmem_deactivate (priv->hwmem);
RETURN_ERROR (err);
/**
* @brief get the memory pool size
- * @return the memory pool size
+ * @return the memory pool size
*/
static uint64_t
mem_get_size (void)
* @param[out] offset its offset
* @return 0 if no error, otherwise a negative error value
*/
-int
+int
hwmem_get_offset (hwmem *hwmem, uint64_t *offset)
{
hwmem_priv *priv;
-
+
if (!hwmem || !hwmem->priv)
RETURN_ERROR_MSG (EINVAL, "invalid hwmem; internal structure does not exist");
int hwmem_get_data (hwmem *hwmem, void **ptr)
{
hwmem_priv *priv;
-
+
if (!hwmem || !hwmem->priv)
RETURN_ERROR_MSG (EINVAL, "invalid hwmem; internal structure does not exist");
hwmem_activate (hwmem *hwmem)
{
hwmem_priv *priv;
-
+
if (!hwmem || !hwmem->priv)
RETURN_ERROR_MSG (EINVAL, "invalid hwmem; internal structure does not exist");
hwmem_deactivate (hwmem *hwmem)
{
hwmem_priv *priv;
-
+
if (!hwmem || !hwmem->priv)
RETURN_ERROR_MSG (EINVAL, "invalid hwmem; internal structure does not exist");
buffer_get_hwmem (buffer *buffer, hwmem **hwmem)
{
buffer_priv *priv;
-
+
if (!buffer || !buffer->priv)
RETURN_ERROR_MSG (EINVAL, "invalid hwmem; internal structure does not exist");
RETURN_ERROR_MSG (EINVAL, "invalid hwmem pointer");
priv = buffer->priv;
- *hwmem = priv->hwmem;
+ *hwmem = priv->hwmem;
return 0;
}
* @param[in] buffer the buffer instance
* @return buffer state
*/
-buffer_state
+buffer_state
buffer_get_state (buffer *buffer)
{
buffer_priv *priv;
-
+
if (!buffer || !buffer->priv)
return BUFFER_STATE_INVAL;
* @return mem instance
*/
mem *
-mem_get_instance (void)
+mem_get_instance (void)
{
return &mem_instance;
}