* @prio_node: Entry in the priority queue tree
* @prio_entry: Queue tree node entry in the list of one priority
* @priority: Queueing priority of the message
- * @off: Offset into the shmem file in the receiver's pool
- * @size: The number of bytes used in the pool
+ * @slice: Allocated slice in the receiver's pool
* @memfds: Arrays of offsets where to update the installed
* fd number
* @memfds_fp: Array memfd files queued up for this message
struct rb_node prio_node;
struct list_head prio_entry;
s64 priority;
- size_t off;
- size_t size;
+ struct kdbus_pool_slice *slice;
size_t *memfds;
struct file **memfds_fp;
unsigned int memfds_count;
static int kdbus_conn_payload_add(struct kdbus_conn *conn,
struct kdbus_conn_queue *queue,
const struct kdbus_kmsg *kmsg,
- size_t off, size_t items, size_t vec_data)
+ size_t items, size_t vec_data)
{
const struct kdbus_item *item;
int ret;
else
it->vec.offset = ~0ULL;
it->vec.size = item->vec.size;
- ret = kdbus_pool_write(conn->pool, off + items,
- it, it->size);
+ ret = kdbus_pool_slice_copy(queue->slice, items,
+ it, it->size);
if (ret < 0)
return ret;
items += KDBUS_ALIGN8(it->size);
* null-bytes to the buffer which the \0-bytes
* record would have shifted the alignment.
*/
- kdbus_pool_write_user(conn->pool,
- off + vec_data,
- (char __user *)
- "\0\0\0\0\0\0\0", pad);
+ kdbus_pool_slice_copy_user(queue->slice, vec_data,
+ (char __user *) "\0\0\0\0\0\0\0", pad);
vec_data += pad;
break;
}
/* copy kdbus_vec data from sender to receiver */
- ret = kdbus_pool_write_user(conn->pool, off + vec_data,
+ ret = kdbus_pool_slice_copy_user(queue->slice, vec_data,
KDBUS_PTR(item->vec.address), item->vec.size);
if (ret < 0)
return ret;
it->size = sizeof(tmp);
it->memfd.size = item->memfd.size;
it->memfd.fd = -1;
- ret = kdbus_pool_write(conn->pool, off + items,
- it, it->size);
+ ret = kdbus_pool_slice_copy(queue->slice, items,
+ it, it->size);
if (ret < 0)
return ret;
size_t meta = 0;
size_t vec_data;
size_t want, have;
- size_t off;
int ret = 0;
BUG_ON(!mutex_is_locked(&conn->lock));
}
/* allocate the needed space in the pool of the receiver */
- ret = kdbus_pool_alloc_range(conn->pool, want, &off);
+ ret = kdbus_pool_slice_alloc(conn->pool, &queue->slice, want);
if (ret < 0)
goto exit;
/* copy the message header */
- ret = kdbus_pool_write(conn->pool, off, &kmsg->msg, size);
+ ret = kdbus_pool_slice_copy(queue->slice, 0, &kmsg->msg, size);
if (ret < 0)
goto exit_pool_free;
/* update the size */
- ret = kdbus_pool_write(conn->pool, off, &msg_size,
- sizeof(kmsg->msg.size));
+ ret = kdbus_pool_slice_copy(queue->slice, 0, &msg_size,
+ sizeof(kmsg->msg.size));
if (ret < 0)
goto exit_pool_free;
it->type = KDBUS_ITEM_DST_NAME;
memcpy(it->str, kmsg->dst_name, dst_name_len);
- ret = kdbus_pool_write(conn->pool, off + size, it, it->size);
+ ret = kdbus_pool_slice_copy(queue->slice, size, it, it->size);
if (ret < 0)
goto exit_pool_free;
}
/* add PAYLOAD items */
if (payloads > 0) {
ret = kdbus_conn_payload_add(conn, queue, kmsg,
- off, payloads, vec_data);
+ payloads, vec_data);
if (ret < 0)
goto exit_pool_free;
}
it->type = KDBUS_ITEM_FDS;
it->size = KDBUS_ITEM_HEADER_SIZE +
(kmsg->fds_count * sizeof(int));
- ret = kdbus_pool_write(conn->pool, off + fds,
+ ret = kdbus_pool_slice_copy(queue->slice, fds,
it, KDBUS_ITEM_HEADER_SIZE);
if (ret < 0)
goto exit_pool_free;
/* append message metadata/credential items */
if (meta > 0) {
- ret = kdbus_pool_write(conn->pool, off + meta,
- kmsg->meta->data, kmsg->meta->size);
+ ret = kdbus_pool_slice_copy(queue->slice, meta,
+ kmsg->meta->data, kmsg->meta->size);
if (ret < 0)
goto exit_pool_free;
}
- /* copy some properties of the message to the queue entry */
- queue->off = off;
- queue->size = want;
queue->priority = kmsg->msg.priority;
-
*q = queue;
return 0;
exit_pool_free:
- kdbus_pool_free_range(conn->pool, off);
+ kdbus_pool_slice_free(queue->slice);
exit:
kdbus_conn_queue_cleanup(queue);
return ret;
}
/* copy the array into the message item */
- ret = kdbus_pool_write(conn->pool, queue->off + queue->fds, fds, size);
+ ret = kdbus_pool_slice_copy(queue->slice, queue->fds, fds, size);
if (ret < 0)
goto remove_unused;
* the locations of the values in the buffer.
*/
for (i = 0; i < queue->memfds_count; i++) {
- ret = kdbus_pool_write(conn->pool,
- queue->off + queue->memfds[i],
- &fds[i], sizeof(int));
+ ret = kdbus_pool_slice_copy(queue->slice, queue->memfds[i],
+ &fds[i], sizeof(int));
if (ret < 0)
goto remove_unused;
}
}
kfree(memfds);
- kdbus_pool_flush_dcache(conn->pool, queue->off, queue->size);
+ kdbus_pool_slice_flush(queue->slice);
return 0;
}
kdbus_conn_queue_remove(conn, queue);
- kdbus_pool_free_range(conn->pool, queue->off);
+ kdbus_pool_slice_free(queue->slice);
mutex_unlock(&conn->lock);
if (reply)
}
/* Give the offset back to the caller. */
- recv->offset = queue->off;
+ recv->offset = kdbus_pool_slice_offset(queue->slice);
/*
* Just return the location of the next message. Do not install
* not with peek.
*/
if (recv->flags & KDBUS_RECV_PEEK) {
- kdbus_pool_flush_dcache(conn->pool, queue->off, queue->size);
+ kdbus_pool_slice_flush(queue->slice);
goto exit_unlock;
}
if (ret == 0)
ret = kdbus_conn_msg_install(conn_src, queue);
- kmsg->msg.offset_reply = queue->off;
+ kmsg->msg.offset_reply =
+ kdbus_pool_slice_offset(queue->slice);
kdbus_conn_queue_cleanup(queue);
}
mutex_unlock(&conn_src->lock);
queue->cookie, ¬ify_list);
kdbus_conn_queue_remove(conn, queue);
- kdbus_pool_free_range(conn->pool, queue->off);
+ kdbus_pool_slice_free(queue->slice);
kdbus_conn_queue_cleanup(queue);
}
list_splice_init(&conn->reply_list, &reply_list);
if (name_id > 0 && q->dst_name_id != name_id)
continue;
- ret = kdbus_pool_move(conn_dst->pool, conn_src->pool,
- &q->off, q->size);
+ ret = kdbus_pool_move_slice(conn_dst->pool, conn_src->pool,
+ &q->slice);
if (ret < 0)
kdbus_conn_queue_cleanup(q);
else
struct kdbus_conn_info info = {};
struct kdbus_meta *meta = NULL;
char *name = NULL;
- size_t off, pos;
+ struct kdbus_pool_slice *slice;
+ size_t pos;
int ret = 0;
u64 flags;
u32 hash;
info.size += meta->size;
}
- ret = kdbus_pool_alloc_range(conn->pool, info.size, &off);
+ ret = kdbus_pool_slice_alloc(conn->pool, &slice, info.size);
if (ret < 0)
goto exit;
- ret = kdbus_pool_write(conn->pool, off, &info, sizeof(info));
+ ret = kdbus_pool_slice_copy(slice, 0, &info, sizeof(info));
if (ret < 0)
goto exit_free;
-
- pos = off + sizeof(info);
+ pos = sizeof(info);
if (conn->meta->domain == owner_conn->meta->domain) {
- ret = kdbus_pool_write(conn->pool, pos, owner_conn->meta->data,
- owner_conn->meta->size);
+ ret = kdbus_pool_slice_copy(slice, pos, owner_conn->meta->data,
+ owner_conn->meta->size);
if (ret < 0)
goto exit_free;
}
if (meta) {
- ret = kdbus_pool_write(conn->pool, pos, meta->data, meta->size);
+ ret = kdbus_pool_slice_copy(slice, pos, meta->data, meta->size);
if (ret < 0)
goto exit_free;
}
/* write back the offset */
- cmd_info->offset = off;
- kdbus_pool_flush_dcache(conn->pool, off, info.size);
+ cmd_info->offset = kdbus_pool_slice_offset(slice);
+ kdbus_pool_slice_flush(slice);
exit_free:
if (ret < 0)
- kdbus_pool_free_range(conn->pool, off);
+ kdbus_pool_slice_free(slice);
exit:
kdbus_meta_free(meta);
/* init entry, so we can unconditionally remove it */
INIT_LIST_HEAD(&conn->monitor_entry);
- ret = kdbus_pool_new(conn->name, hello->pool_size, &conn->pool);
+ ret = kdbus_pool_new(conn->name, &conn->pool, hello->pool_size);
if (ret < 0)
goto exit_free_conn;
static int kdbus_name_list_write(struct kdbus_conn *conn,
struct kdbus_conn *c,
+ struct kdbus_pool_slice *slice,
size_t *pos,
struct kdbus_name_entry *e,
bool write)
};
/* write record */
- ret = kdbus_pool_write(conn->pool, p, &n, len);
+ ret = kdbus_pool_slice_copy(slice, p, &n, len);
if (ret < 0)
return ret;
p += len;
/* append name */
if (e) {
- ret = kdbus_pool_write(conn->pool, p, e->name, nlen);
+ ret = kdbus_pool_slice_copy(slice, p, e->name, nlen);
if (ret < 0)
return ret;
p += KDBUS_ALIGN8(nlen);
}
static int kdbus_name_list_all(struct kdbus_conn *conn, u64 flags,
+ struct kdbus_pool_slice *slice,
size_t *pos, bool write)
{
struct kdbus_conn *c;
if ((flags & KDBUS_NAME_LIST_ACTIVATORS) &&
a && a != c) {
- ret = kdbus_name_list_write(conn, a, &p,
- e, write);
+ ret = kdbus_name_list_write(conn, a,
+ slice, &p, e, write);
if (ret < 0)
return ret;
if (flags & KDBUS_NAME_LIST_NAMES ||
c->flags & KDBUS_HELLO_ACTIVATOR) {
- ret = kdbus_name_list_write(conn, c, &p,
- e, write);
+ ret = kdbus_name_list_write(conn, c,
+ slice, &p, e, write);
if (ret < 0)
return ret;
list_for_each_entry(q, &c->names_queue_list,
conn_entry) {
- ret = kdbus_name_list_write(conn, c, &p,
- q->entry, write);
+ ret = kdbus_name_list_write(conn, c,
+ slice, &p, q->entry, write);
if (ret < 0)
return ret;
/* nothing added so far, just add the unique ID */
if (!added && flags & KDBUS_NAME_LIST_UNIQUE) {
- ret = kdbus_name_list_write(conn, c, &p, NULL, write);
+ ret = kdbus_name_list_write(conn, c,
+ slice, &p, NULL, write);
if (ret < 0)
return ret;
}
{
struct kdbus_policy_db *policy_db;
struct kdbus_name_list list = {};
- size_t size, off, pos;
+ struct kdbus_pool_slice *slice;
+ size_t pos;
int ret;
policy_db = conn->ep->policy_db;
if (policy_db)
mutex_lock(&policy_db->entries_lock);
- /* size of header */
- size = sizeof(struct kdbus_name_list);
-
- /* size of records */
- ret = kdbus_name_list_all(conn, cmd->flags, &size, false);
+ /* size of header + records */
+ pos = sizeof(struct kdbus_name_list);
+ ret = kdbus_name_list_all(conn, cmd->flags, NULL, &pos, false);
if (ret < 0)
goto exit_unlock;
- ret = kdbus_pool_alloc_range(conn->pool, size, &off);
+ ret = kdbus_pool_slice_alloc(conn->pool, &slice, pos);
if (ret < 0)
goto exit_unlock;
- /* copy header */
- pos = off;
- list.size = size;
-
- ret = kdbus_pool_write(conn->pool, pos,
- &list, sizeof(struct kdbus_name_list));
+ /* copy the header, specifying the overall size */
+ list.size = pos;
+ ret = kdbus_pool_slice_copy(slice, 0,
+ &list, sizeof(struct kdbus_name_list));
if (ret < 0)
goto exit_pool_free;
- pos += sizeof(struct kdbus_name_list);
- /* copy data */
- ret = kdbus_name_list_all(conn, cmd->flags, &pos, true);
+ /* copy the records */
+ pos = sizeof(struct kdbus_name_list);
+ ret = kdbus_name_list_all(conn, cmd->flags, slice, &pos, true);
if (ret < 0)
goto exit_pool_free;
- cmd->offset = off;
- kdbus_pool_flush_dcache(conn->pool, off, size);
+ cmd->offset = kdbus_pool_slice_offset(slice);
+ kdbus_pool_slice_flush(slice);
exit_pool_free:
if (ret < 0)
- kdbus_pool_free_range(conn->pool, off);
+ kdbus_pool_slice_free(slice);
exit_unlock:
if (policy_db)
mutex_unlock(&policy_db->entries_lock);
mutex_unlock(®->lock);
mutex_unlock(&conn->bus->lock);
-
return ret;
}
};
/**
- * struct kdbus_slice - allocated element in kdbus_pool
+ * struct kdbus_pool_slice - allocated element in kdbus_pool
+ * @pool: Pool this slice belongs to
* @off: Offset of slice in the shmem file
* @size: Size of slice
* @entry: Entry in "all slices" list
* tree is organized by slice size, the busy tree organized by buffer
* offset.
*/
-struct kdbus_slice {
+struct kdbus_pool_slice {
+ struct kdbus_pool *pool;
size_t off;
size_t size;
bool free;
};
-static struct kdbus_slice *kdbus_pool_slice_new(size_t off, size_t size)
+static struct kdbus_pool_slice *kdbus_pool_slice_new(struct kdbus_pool *pool,
+ size_t off, size_t size)
{
- struct kdbus_slice *slice;
+ struct kdbus_pool_slice *slice;
slice = kzalloc(sizeof(*slice), GFP_KERNEL);
if (!slice)
return NULL;
+ slice->pool = pool;
slice->off = off;
slice->size = size;
slice->free = true;
/* insert a slice into the free tree */
static void kdbus_pool_add_free_slice(struct kdbus_pool *pool,
- struct kdbus_slice *slice)
+ struct kdbus_pool_slice *slice)
{
struct rb_node **n;
struct rb_node *pn = NULL;
n = &pool->slices_free.rb_node;
while (*n) {
- struct kdbus_slice *pslice;
+ struct kdbus_pool_slice *pslice;
pn = *n;
- pslice = rb_entry(pn, struct kdbus_slice, rb_node);
+ pslice = rb_entry(pn, struct kdbus_pool_slice, rb_node);
if (slice->size < pslice->size)
n = &pn->rb_left;
else
/* insert a slice into the busy tree */
static void kdbus_pool_add_busy_slice(struct kdbus_pool *pool,
- struct kdbus_slice *slice)
+ struct kdbus_pool_slice *slice)
{
struct rb_node **n;
struct rb_node *pn = NULL;
n = &pool->slices_busy.rb_node;
while (*n) {
- struct kdbus_slice *pslice;
+ struct kdbus_pool_slice *pslice;
pn = *n;
- pslice = rb_entry(pn, struct kdbus_slice, rb_node);
+ pslice = rb_entry(pn, struct kdbus_pool_slice, rb_node);
if (slice->off < pslice->off)
n = &pn->rb_left;
else if (slice->off > pslice->off)
rb_insert_color(&slice->rb_node, &pool->slices_busy);
}
-/* find a slice by its pool offset */
-static struct kdbus_slice *kdbus_pool_find_slice(struct kdbus_pool *pool,
- size_t off)
+/**
+ * kdbus_pool_slice_find() - find a slice by its offset
+ * @pool: The receiver's pool
+ * @off: The offset of the slice in the pool
+ *
+ * Return: allocated slice, NULL on failure.
+ */
+struct kdbus_pool_slice *kdbus_pool_slice_find(struct kdbus_pool *pool,
+ size_t off)
{
struct rb_node *n;
n = pool->slices_busy.rb_node;
while (n) {
- struct kdbus_slice *s;
+ struct kdbus_pool_slice *s;
- s = rb_entry(n, struct kdbus_slice, rb_node);
+ s = rb_entry(n, struct kdbus_pool_slice, rb_node);
if (off < s->off)
n = n->rb_left;
else if (off > s->off)
return NULL;
}
-/* allocate a slice from the pool with the given size */
-static int kdbus_pool_alloc_slice(struct kdbus_pool *pool,
- size_t size, struct kdbus_slice **slice)
+/**
+ * kdbus_pool_slice_alloc() - allocate memory from a pool
+ * @pool: The receiver's pool
+ * @slice: Slice allocated from the the pool
+ * @size: The number of bytes to allocate
+ *
+ * The returned slice is used for kdbus_pool_slice_free() to
+ * free the allocated memory.
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+int kdbus_pool_slice_alloc(struct kdbus_pool *pool,
+ struct kdbus_pool_slice **slice, size_t size)
{
size_t slice_size = KDBUS_ALIGN8(size);
struct rb_node *n, *found = NULL;
- struct kdbus_slice *s;
+ struct kdbus_pool_slice *s;
+ int ret = 0;
/* search a free slice with the closest matching size */
+ mutex_lock(&pool->lock);
n = pool->slices_free.rb_node;
while (n) {
- s = rb_entry(n, struct kdbus_slice, rb_node);
+ s = rb_entry(n, struct kdbus_pool_slice, rb_node);
if (slice_size < s->size) {
found = n;
n = n->rb_left;
/* no exact match, use the closest one */
if (!n)
- s = rb_entry(found, struct kdbus_slice, rb_node);
+ s = rb_entry(found, struct kdbus_pool_slice, rb_node);
/* move slice from free to the busy tree */
rb_erase(found, &pool->slices_free);
/* we got a slice larger than what we asked for? */
if (s->size > slice_size) {
- struct kdbus_slice *s_new;
+ struct kdbus_pool_slice *s_new;
/* split-off the remainder of the size to its own slice */
- s_new = kdbus_pool_slice_new(s->off + slice_size,
+ s_new = kdbus_pool_slice_new(pool, s->off + slice_size,
s->size - slice_size);
- if (!s_new)
- return -ENOMEM;
+ if (!s_new) {
+ ret = -ENOMEM;
+ goto exit_unlock;
+ };
list_add(&s_new->entry, &s->entry);
kdbus_pool_add_free_slice(pool, s_new);
s->free = false;
pool->busy += s->size;
+ mutex_unlock(&pool->lock);
+
*slice = s;
- return 0;
+exit_unlock:
+ return ret;
}
-/* return an allocated slice back to the pool */
-static void kdbus_pool_free_slice(struct kdbus_pool *pool,
- struct kdbus_slice *slice)
+/**
+ * kdbus_pool_slice_free() - give allocated memory back to the pool
+ * @slice: Slice allocated from the the pool
+ *
+ * The slice was returned by the call to kdbus_pool_alloc_slice(), the
+ * memory is returned to the pool.
+ */
+void kdbus_pool_slice_free(struct kdbus_pool_slice *slice)
{
+ struct kdbus_pool *pool = slice->pool;
+
+ mutex_lock(&slice->pool->lock);
rb_erase(&slice->rb_node, &pool->slices_busy);
pool->busy -= slice->size;
/* merge with the next free slice */
if (!list_is_last(&slice->entry, &pool->slices)) {
- struct kdbus_slice *s;
+ struct kdbus_pool_slice *s;
- s = list_entry(slice->entry.next, struct kdbus_slice, entry);
+ s = list_entry(slice->entry.next,
+ struct kdbus_pool_slice, entry);
if (s->free) {
rb_erase(&s->rb_node, &pool->slices_free);
list_del(&s->entry);
/* merge with previous free slice */
if (pool->slices.next != &slice->entry) {
- struct kdbus_slice *s;
+ struct kdbus_pool_slice *s;
- s = list_entry(slice->entry.prev, struct kdbus_slice, entry);
+ s = list_entry(slice->entry.prev, struct kdbus_pool_slice, entry);
if (s->free) {
rb_erase(&s->rb_node, &pool->slices_free);
list_del(&slice->entry);
slice->free = true;
kdbus_pool_add_free_slice(pool, slice);
+ mutex_unlock(&slice->pool->lock);
+}
+
+/**
+ * kdbus_pool_slice_offset() - return the slice offset in the pool
+ * @slice: The Slice
+ *
+ * Return: the offset in bytes.
+ */
+size_t kdbus_pool_slice_offset(const struct kdbus_pool_slice *slice)
+{
+ return slice->off;
}
/**
* kdbus_pool_new() - create a new pool
* @name: Name of the (deleted) file which shows up in
* /proc, used for debugging
- * @size: Maximum size of the pool
* @pool: Newly allocated pool
+ * @size: Maximum size of the pool
*
* Return: 0 on success, negative errno on failure.
*/
-int kdbus_pool_new(const char *name, size_t size, struct kdbus_pool **pool)
+int kdbus_pool_new(const char *name, struct kdbus_pool **pool, size_t size)
{
- struct kdbus_slice *s;
+ struct kdbus_pool_slice *s;
struct kdbus_pool *p;
struct file *f;
int ret;
}
/* allocate first slice spanning the entire pool */
- s = kdbus_pool_slice_new(0, size);
+ s = kdbus_pool_slice_new(p, 0, size);
if (!s) {
ret = -ENOMEM;
goto exit_put_shmem;
*/
void kdbus_pool_free(struct kdbus_pool *pool)
{
- struct kdbus_slice *s, *tmp;
+ struct kdbus_pool_slice *s, *tmp;
if (!pool)
return;
return size;
}
-/**
- * kdbus_pool_alloc_range() - allocate memory from a pool
- * @pool: The receiver's pool
- * @size: The number of bytes to allocate
- * @off: The offset in bytes in the pool's file
- *
- *
- * The returned offset is used for kdbus_pool_free() to
- * free the allocated memory.
- *
- * Return: 0 on success, negative errno on failure.
- */
-int kdbus_pool_alloc_range(struct kdbus_pool *pool, size_t size, size_t *off)
-{
- struct kdbus_slice *s;
- int ret;
-
- mutex_lock(&pool->lock);
- ret = kdbus_pool_alloc_slice(pool, size, &s);
- mutex_unlock(&pool->lock);
-
- if (ret < 0)
- return ret;
-
- *off = s->off;
- return 0;
-}
-
-/**
- * kdbus_pool_free_range() - give allocated memory back to the pool
- * @pool: The receiver's pool
- * @off: Offset of allocated memory
- *
- * The offset was returned by the call to kdbus_pool_alloc_range(), the
- * memory is returned to the pool.
- *
- * Return: 0 on success, negative errno on failure.
- */
-int kdbus_pool_free_range(struct kdbus_pool *pool, size_t off)
-{
- struct kdbus_slice *slice;
- int ret = 0;
-
- if (!pool)
- return 0;
-
- mutex_lock(&pool->lock);
- if (off >= pool->size) {
- ret = -EINVAL;
- goto exit_unlock;
- }
-
- slice = kdbus_pool_find_slice(pool, off);
- if (!slice) {
- ret = -ENXIO;
- goto exit_unlock;
- }
-
- kdbus_pool_free_slice(pool, slice);
-
-exit_unlock:
- mutex_unlock(&pool->lock);
- return ret;
-}
-
/* copy data from a file to a page in the receiver's pool */
static int kdbus_pool_copy_file(struct page *p, size_t start,
struct file *f, size_t off, size_t count)
}
/* copy data to the receiver's pool */
-static size_t kdbus_pool_copy(struct file *f_dst, size_t off_dst,
+static size_t kdbus_pool_copy(const struct kdbus_pool_slice *slice, size_t off,
const void __user *data, struct file *f_src,
size_t off_src, size_t len)
{
+ struct file *f_dst = slice->pool->f;
struct address_space *mapping = f_dst->f_mapping;
const struct address_space_operations *aops = mapping->a_ops;
- unsigned long fpos = off_dst;
+ unsigned long fpos = slice->off + off;
unsigned long rem = len;
size_t pos = 0;
int ret = 0;
+ BUG_ON(off + len > slice->size);
+
while (rem > 0) {
struct page *p;
unsigned long o;
}
/**
- * kdbus_pool_write_user() - copy user memory to the pool
- * @pool: The receiver's pool
- * @off: Offset of allocated memory
- * @data: User memory
+ * kdbus_pool_slice_copy_user() - copy user memory to a slice
+ * @slice: The slice to write to
+ * @off: Offset in the slice to write to
+ * @data: User memory to copy from
* @len: Number of bytes to copy
*
- * The offset was returned by the call to kdbus_pool_alloc_range().
+ * The offset was returned by the call to kdbus_pool_alloc_slice().
* The user memory at @data will be copied to the @off in the allocated
- * memory in the pool.
+ * slice in the pool.
*
* Return: the numbers of bytes copied, negative errno on failure.
*/
-ssize_t kdbus_pool_write_user(const struct kdbus_pool *pool, size_t off,
+ssize_t kdbus_pool_slice_copy_user(const struct kdbus_pool_slice *slice, size_t off,
const void __user *data, size_t len)
{
- return kdbus_pool_copy(pool->f, off, data, NULL, 0, len);
+ return kdbus_pool_copy(slice, off, data, NULL, 0, len);
}
/**
- * kdbus_pool_write() - copy kernel memory to the pool
- * @pool: The receiver's pool
- * @off: Offset of allocated memory
- * @data: User memory
+ * kdbus_pool_slice_copy() - copy kernel memory to a slice
+ * @slice: The slice to write to
+ * @off: Offset in the slice to write to
+ * @data: Kernel memory to copy from
* @len: Number of bytes to copy
*
- * The offset was returned by the call to kdbus_pool_alloc_range().
+ * The slice was returned by the call to kdbus_pool_alloc_slice().
* The user memory at @data will be copied to the @off in the allocated
- * memory in the pool.
+ * slice in the pool.
*
* Return: the numbers of bytes copied, negative errno on failure.
*/
-ssize_t kdbus_pool_write(const struct kdbus_pool *pool, size_t off,
- const void *data, size_t len)
+ssize_t kdbus_pool_slice_copy(const struct kdbus_pool_slice *slice, size_t off,
+ const void *data, size_t len)
{
mm_segment_t old_fs;
ssize_t ret;
old_fs = get_fs();
set_fs(get_ds());
- ret = kdbus_pool_copy(pool->f, off, (const void __user *)data, NULL, 0, len);
+ ret = kdbus_pool_copy(slice, off,
+ (const void __user *)data, NULL, 0, len);
set_fs(old_fs);
return ret;
}
/**
- * kdbus_pool_write() - move memory from one pool into another one
+ * kdbus_pool_move_slice() - move memory from one pool into another one
* @dst_pool: The receiver's pool to copy to
* @src_pool: The receiver's pool to copy from
- * @off: Offset of allocated memory in the source pool,
- * Updated with the offset in the destination pool
- * @len: Number of bytes to copy
+ * @slice: Reference to the slice to copy from the source;
+ * updated with the newly allocated slice in the
+ * destination
*
* Move memory from one pool to another. Memory will be allocated in the
* destination pool, the memory copied over, and the free()d in source
*
* Return: 0 on success, negative errno on failure.
*/
-int kdbus_pool_move(struct kdbus_pool *dst_pool,
- struct kdbus_pool *src_pool,
- size_t *off, size_t len)
+int kdbus_pool_move_slice(struct kdbus_pool *dst_pool,
+ struct kdbus_pool *src_pool,
+ struct kdbus_pool_slice **slice)
{
mm_segment_t old_fs;
- size_t new_off;
+ struct kdbus_pool_slice *slice_new;
int ret;
- ret = kdbus_pool_alloc_range(dst_pool, len, &new_off);
+ ret = kdbus_pool_slice_alloc(dst_pool, &slice_new, (*slice)->size);
if (ret < 0)
return ret;
old_fs = get_fs();
set_fs(get_ds());
- ret = kdbus_pool_copy(dst_pool->f, new_off,
- NULL, src_pool->f, *off, len);
+ ret = kdbus_pool_copy(slice_new, 0, NULL,
+ src_pool->f, (*slice)->off, (*slice)->size);
set_fs(old_fs);
if (ret < 0)
goto exit_free;
- ret = kdbus_pool_free_range(src_pool, *off);
- if (ret < 0)
- goto exit_free;
+ kdbus_pool_slice_free(*slice);
- *off = new_off;
+ *slice = slice_new;
return 0;
exit_free:
- kdbus_pool_free_range(dst_pool, new_off);
+ kdbus_pool_slice_free(slice_new);
return ret;
}
/**
- * kdbus_pool_flush_dcache() - flush memory area in the pool
- * @pool: The receiver's pool
- * @off: Offset to the memory
- * @len: Number of bytes to flush
+ * kdbus_pool_slice_flush() - flush dcache memory area of a slice
+ * @slice: The allocated slice to flush
*
* Dcache flushes are delayed to happen only right before the receiver
* gets the new buffer area announced. The mapped buffer is always
* read-only for the receiver, and only the area of the announced message
* needs to be flushed.
*/
-void kdbus_pool_flush_dcache(const struct kdbus_pool *pool,
- size_t off, size_t len)
+void kdbus_pool_slice_flush(const struct kdbus_pool_slice *slice)
{
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
- struct address_space *mapping = pool->f->f_mapping;
- pgoff_t first = off >> PAGE_CACHE_SHIFT;
- pgoff_t last = (off + len + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
+ struct address_space *mapping = slice->pool->f->f_mapping;
+ pgoff_t first = slice->off >> PAGE_CACHE_SHIFT;
+ pgoff_t last = (slice->off + slice->len +
+ PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
pgoff_t i;
for (i = first; i < last; i++) {