4 This file is part of PulseAudio.
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
32 #include <pulse/xmalloc.h>
34 #include <pulsecore/shm.h>
35 #include <pulsecore/log.h>
36 #include <pulsecore/hashmap.h>
40 #define PA_MEMPOOL_SLOTS_MAX 128
41 #define PA_MEMPOOL_SLOT_SIZE (16*1024)
43 #define PA_MEMEXPORT_SLOTS_MAX 128
45 #define PA_MEMIMPORT_SLOTS_MAX 128
46 #define PA_MEMIMPORT_SEGMENTS_MAX 16
48 struct pa_memimport_segment {
59 /* Called whenever an imported memory block is no longer
61 pa_memimport_release_cb_t release_cb;
64 PA_LLIST_FIELDS(pa_memimport);
67 struct memexport_slot {
68 PA_LLIST_FIELDS(struct memexport_slot);
75 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
76 PA_LLIST_HEAD(struct memexport_slot, free_slots);
77 PA_LLIST_HEAD(struct memexport_slot, used_slots);
80 /* Called whenever a client from which we imported a memory block
81 which we in turn exported to another client dies and we need to
82 revoke the memory block accordingly */
83 pa_memexport_revoke_cb_t revoke_cb;
86 PA_LLIST_FIELDS(pa_memexport);
90 PA_LLIST_FIELDS(struct mempool_slot);
91 /* the actual data follows immediately hereafter */
97 unsigned n_blocks, n_init;
99 PA_LLIST_HEAD(pa_memimport, imports);
100 PA_LLIST_HEAD(pa_memexport, exports);
102 /* A list of free slots that may be reused */
103 PA_LLIST_HEAD(struct mempool_slot, free_slots);
105 pa_mempool_stat stat;
108 static void segment_detach(pa_memimport_segment *seg);
110 static void stat_add(pa_memblock*b) {
114 AO_fetch_and_add1_release_write(&b->pool->stat.n_allocated);
115 AO_fetch_and_add_release_write(&b->pool->stat.allocated_size, (AO_t) b->length);
117 AO_fetch_and_add1_release_write(&b->pool->stat.n_accumulated);
118 AO_fetch_and_add_release_write(&b->pool->stat.accumulated_size, (AO_t) b->length);
120 if (b->type == PA_MEMBLOCK_IMPORTED) {
121 AO_fetch_and_add1_release_write(&b->pool->stat.n_imported);
122 AO_fetch_and_add_release_write(&b->pool->stat.imported_size, (AO_t) b->length);
125 AO_fetch_and_add1_release_write(&b->pool->stat.n_allocated_by_type[b->type]);
126 AO_fetch_and_add1_release_write(&b->pool->stat.n_accumulated_by_type[b->type]);
129 static void stat_remove(pa_memblock *b) {
133 assert(AO_load_acquire_read(&b->pool->stat.n_allocated) > 0);
134 assert(AO_load_acquire_read(&b->pool->stat.allocated_size) >= (AO_t) b->length);
136 AO_fetch_and_sub1_release_write(&b->pool->stat.n_allocated);
137 AO_fetch_and_add_release_write(&b->pool->stat.allocated_size, (AO_t) (-b->length));
139 if (b->type == PA_MEMBLOCK_IMPORTED) {
140 assert(AO_load_acquire_read(&b->pool->stat.n_imported) > 0);
141 assert(AO_load_acquire_read(&b->pool->stat.imported_size) >= (AO_t) b->length);
143 AO_fetch_and_sub1_release_write(&b->pool->stat.n_imported);
144 AO_fetch_and_add_release_write(&b->pool->stat.imported_size, (AO_t) (-b->length));
147 AO_fetch_and_sub1_release_write(&b->pool->stat.n_allocated_by_type[b->type]);
150 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
152 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
158 if (!(b = pa_memblock_new_pool(p, length)))
159 b = memblock_new_appended(p, length);
164 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
170 b = pa_xmalloc(sizeof(pa_memblock) + length);
171 b->type = PA_MEMBLOCK_APPENDED;
175 b->data = (uint8_t*) b + sizeof(pa_memblock);
182 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
183 struct mempool_slot *slot;
187 slot = p->free_slots;
188 PA_LLIST_REMOVE(struct mempool_slot, p->free_slots, slot);
189 } else if (p->n_init < p->n_blocks)
190 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * p->n_init++));
192 pa_log_debug("Pool full");
193 AO_fetch_and_add1_release_write(&p->stat.n_pool_full);
200 static void* mempool_slot_data(struct mempool_slot *slot) {
203 return (uint8_t*) slot + sizeof(struct mempool_slot);
206 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
208 assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
209 assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
211 return ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size;
214 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
217 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
220 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
223 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
224 pa_memblock *b = NULL;
225 struct mempool_slot *slot;
230 if (p->block_size - sizeof(struct mempool_slot) >= sizeof(pa_memblock) + length) {
232 if (!(slot = mempool_allocate_slot(p)))
235 b = mempool_slot_data(slot);
236 b->type = PA_MEMBLOCK_POOL;
237 b->data = (uint8_t*) b + sizeof(pa_memblock);
239 } else if (p->block_size - sizeof(struct mempool_slot) >= length) {
241 if (!(slot = mempool_allocate_slot(p)))
244 b = pa_xnew(pa_memblock, 1);
245 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
246 b->data = mempool_slot_data(slot);
248 pa_log_debug("Memory block too large for pool: %u > %u", length, p->block_size - sizeof(struct mempool_slot));
249 AO_fetch_and_add1_release_write(&p->stat.n_too_large_for_pool);
262 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, int read_only) {
269 b = pa_xnew(pa_memblock, 1);
270 b->type = PA_MEMBLOCK_FIXED;
271 b->read_only = read_only;
281 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, void (*free_cb)(void *p), int read_only) {
289 b = pa_xnew(pa_memblock, 1);
290 b->type = PA_MEMBLOCK_USER;
291 b->read_only = read_only;
295 b->per_type.user.free_cb = free_cb;
302 pa_memblock* pa_memblock_ref(pa_memblock*b) {
304 assert(PA_REFCNT_VALUE(b) > 0);
310 void pa_memblock_unref(pa_memblock*b) {
312 assert(PA_REFCNT_VALUE(b) > 0);
314 if (PA_REFCNT_DEC(b) > 0)
320 case PA_MEMBLOCK_USER :
321 assert(b->per_type.user.free_cb);
322 b->per_type.user.free_cb(b->data);
326 case PA_MEMBLOCK_FIXED:
327 case PA_MEMBLOCK_APPENDED :
331 case PA_MEMBLOCK_IMPORTED : {
332 pa_memimport_segment *segment;
334 segment = b->per_type.imported.segment;
336 assert(segment->import);
338 pa_hashmap_remove(segment->import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
339 segment->import->release_cb(segment->import, b->per_type.imported.id, segment->import->userdata);
341 if (-- segment->n_blocks <= 0)
342 segment_detach(segment);
348 case PA_MEMBLOCK_POOL_EXTERNAL:
349 case PA_MEMBLOCK_POOL: {
350 struct mempool_slot *slot;
352 slot = mempool_slot_by_ptr(b->pool, b->data);
355 PA_LLIST_PREPEND(struct mempool_slot, b->pool->free_slots, slot);
357 if (b->type == PA_MEMBLOCK_POOL_EXTERNAL)
363 case PA_MEMBLOCK_TYPE_MAX:
369 static void memblock_make_local(pa_memblock *b) {
372 AO_fetch_and_sub1_release_write(&b->pool->stat.n_allocated_by_type[b->type]);
374 if (b->length <= b->pool->block_size - sizeof(struct mempool_slot)) {
375 struct mempool_slot *slot;
377 if ((slot = mempool_allocate_slot(b->pool))) {
379 /* We can move it into a local pool, perfect! */
381 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
384 new_data = mempool_slot_data(slot);
385 memcpy(new_data, b->data, b->length);
391 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
392 b->type = PA_MEMBLOCK_USER;
393 b->per_type.user.free_cb = pa_xfree;
395 b->data = pa_xmemdup(b->data, b->length);
398 AO_fetch_and_add1_release_write(&b->pool->stat.n_allocated_by_type[b->type]);
399 AO_fetch_and_add1_release_write(&b->pool->stat.n_accumulated_by_type[b->type]);
402 void pa_memblock_unref_fixed(pa_memblock *b) {
404 assert(PA_REFCNT_VALUE(b) > 0);
405 assert(b->type == PA_MEMBLOCK_FIXED);
407 if (PA_REFCNT_VALUE(b) > 1)
408 memblock_make_local(b);
410 pa_memblock_unref(b);
413 static void memblock_replace_import(pa_memblock *b) {
414 pa_memimport_segment *seg;
417 assert(b->type == PA_MEMBLOCK_IMPORTED);
419 assert(AO_load_acquire_read(&b->pool->stat.n_imported) > 0);
420 assert(AO_load_acquire_read(&b->pool->stat.imported_size) >= (AO_t) b->length);
421 AO_fetch_and_sub1_release_write(&b->pool->stat.n_imported);
422 AO_fetch_and_add_release_write(&b->pool->stat.imported_size, (AO_t) - b->length);
424 seg = b->per_type.imported.segment;
430 PA_UINT32_TO_PTR(b->per_type.imported.id));
432 memblock_make_local(b);
434 if (-- seg->n_blocks <= 0)
438 pa_mempool* pa_mempool_new(int shared) {
442 p = pa_xnew(pa_mempool, 1);
445 ps = (size_t) sysconf(_SC_PAGESIZE);
446 #elif defined(PAGE_SIZE)
447 ps = (size_t) PAGE_SIZE;
449 ps = 4096; /* Let's hope it's like x86. */
452 p->block_size = (PA_MEMPOOL_SLOT_SIZE/ps)*ps;
454 if (p->block_size < ps)
457 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
459 assert(p->block_size > sizeof(struct mempool_slot));
461 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
468 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
469 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
470 PA_LLIST_HEAD_INIT(struct mempool_slot, p->free_slots);
472 memset(&p->stat, 0, sizeof(p->stat));
477 void pa_mempool_free(pa_mempool *p) {
481 pa_memimport_free(p->imports);
484 pa_memexport_free(p->exports);
486 if (AO_load_acquire_read(&p->stat.n_allocated) > 0)
487 pa_log_warn("WARNING! Memory pool destroyed but not all memory blocks freed!");
489 pa_shm_free(&p->memory);
493 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
499 void pa_mempool_vacuum(pa_mempool *p) {
500 struct mempool_slot *slot;
504 for (slot = p->free_slots; slot; slot = slot->next)
505 pa_shm_punch(&p->memory, (uint8_t*) slot + sizeof(struct mempool_slot) - (uint8_t*) p->memory.ptr, p->block_size - sizeof(struct mempool_slot));
508 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
511 if (!p->memory.shared)
519 int pa_mempool_is_shared(pa_mempool *p) {
522 return !!p->memory.shared;
525 /* For recieving blocks from other nodes */
526 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
532 i = pa_xnew(pa_memimport, 1);
534 i->segments = pa_hashmap_new(NULL, NULL);
535 i->blocks = pa_hashmap_new(NULL, NULL);
537 i->userdata = userdata;
539 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
543 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
545 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
546 pa_memimport_segment* seg;
548 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
551 seg = pa_xnew(pa_memimport_segment, 1);
553 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
561 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
565 static void segment_detach(pa_memimport_segment *seg) {
568 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
569 pa_shm_free(&seg->memory);
573 void pa_memimport_free(pa_memimport *i) {
579 /* If we've exported this block further we need to revoke that export */
580 for (e = i->pool->exports; e; e = e->next)
581 memexport_revoke_blocks(e, i);
583 while ((b = pa_hashmap_get_first(i->blocks)))
584 memblock_replace_import(b);
586 assert(pa_hashmap_size(i->segments) == 0);
588 pa_hashmap_free(i->blocks, NULL, NULL);
589 pa_hashmap_free(i->segments, NULL, NULL);
591 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
595 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
597 pa_memimport_segment *seg;
601 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
604 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
605 if (!(seg = segment_attach(i, shm_id)))
608 if (offset+size > seg->memory.size)
611 b = pa_xnew(pa_memblock, 1);
612 b->type = PA_MEMBLOCK_IMPORTED;
616 b->data = (uint8_t*) seg->memory.ptr + offset;
618 b->per_type.imported.id = block_id;
619 b->per_type.imported.segment = seg;
621 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
630 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
634 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id))))
637 memblock_replace_import(b);
641 /* For sending blocks to other nodes */
642 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
648 if (!p->memory.shared)
651 e = pa_xnew(pa_memexport, 1);
653 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
654 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
657 e->userdata = userdata;
659 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
663 void pa_memexport_free(pa_memexport *e) {
666 while (e->used_slots)
667 pa_memexport_process_release(e, e->used_slots - e->slots);
669 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
673 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
679 if (!e->slots[id].block)
682 /* pa_log("Processing release for %u", id); */
684 assert(AO_load_acquire_read(&e->pool->stat.n_exported) > 0);
685 assert(AO_load_acquire_read(&e->pool->stat.exported_size) >= (AO_t) e->slots[id].block->length);
687 AO_fetch_and_sub1_release_write(&e->pool->stat.n_exported);
688 AO_fetch_and_add_release_write(&e->pool->stat.exported_size, (AO_t) -e->slots[id].block->length);
690 pa_memblock_unref(e->slots[id].block);
691 e->slots[id].block = NULL;
693 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
694 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
699 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
700 struct memexport_slot *slot, *next;
704 for (slot = e->used_slots; slot; slot = next) {
708 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
709 slot->block->per_type.imported.segment->import != i)
712 idx = slot - e->slots;
713 e->revoke_cb(e, idx, e->userdata);
714 pa_memexport_process_release(e, idx);
718 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
724 if (b->type == PA_MEMBLOCK_IMPORTED ||
725 b->type == PA_MEMBLOCK_POOL ||
726 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
727 assert(b->pool == p);
728 return pa_memblock_ref(b);
731 if (!(n = pa_memblock_new_pool(p, b->length)))
734 memcpy(n->data, b->data, b->length);
738 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
740 struct memexport_slot *slot;
748 assert(b->pool == e->pool);
750 if (!(b = memblock_shared_copy(e->pool, b)))
754 slot = e->free_slots;
755 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
756 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX) {
757 slot = &e->slots[e->n_init++];
759 pa_memblock_unref(b);
763 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
765 *block_id = slot - e->slots;
767 /* pa_log("Got block id %u", *block_id); */
769 if (b->type == PA_MEMBLOCK_IMPORTED) {
770 assert(b->per_type.imported.segment);
771 memory = &b->per_type.imported.segment->memory;
773 assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
775 memory = &b->pool->memory;
778 assert(b->data >= memory->ptr);
779 assert((uint8_t*) b->data + b->length <= (uint8_t*) memory->ptr + memory->size);
781 *shm_id = memory->id;
782 *offset = (uint8_t*) b->data - (uint8_t*) memory->ptr;
785 AO_fetch_and_add1_release_write(&e->pool->stat.n_exported);
786 AO_fetch_and_add_release_write(&e->pool->stat.exported_size, (AO_t) b->length);