4 This file is part of PulseAudio.
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as
11 published by the Free Software Foundation; either version 2.1 of the
12 License, or (at your option) any later version.
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details.
19 You should have received a copy of the GNU Lesser General Public
20 License along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
35 #include <pulse/xmalloc.h>
37 #include <pulsecore/shm.h>
38 #include <pulsecore/log.h>
39 #include <pulsecore/hashmap.h>
43 #define PA_MEMPOOL_SLOTS_MAX 128
44 #define PA_MEMPOOL_SLOT_SIZE (16*1024)
46 #define PA_MEMEXPORT_SLOTS_MAX 128
48 #define PA_MEMIMPORT_SLOTS_MAX 128
49 #define PA_MEMIMPORT_SEGMENTS_MAX 16
51 struct pa_memimport_segment {
62 /* Called whenever an imported memory block is no longer
64 pa_memimport_release_cb_t release_cb;
67 PA_LLIST_FIELDS(pa_memimport);
70 struct memexport_slot {
71 PA_LLIST_FIELDS(struct memexport_slot);
78 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
79 PA_LLIST_HEAD(struct memexport_slot, free_slots);
80 PA_LLIST_HEAD(struct memexport_slot, used_slots);
83 /* Called whenever a client from which we imported a memory block
84 which we in turn exported to another client dies and we need to
85 revoke the memory block accordingly */
86 pa_memexport_revoke_cb_t revoke_cb;
89 PA_LLIST_FIELDS(pa_memexport);
93 PA_LLIST_FIELDS(struct mempool_slot);
94 /* the actual data follows immediately hereafter */
100 unsigned n_blocks, n_init;
102 PA_LLIST_HEAD(pa_memimport, imports);
103 PA_LLIST_HEAD(pa_memexport, exports);
105 /* A list of free slots that may be reused */
106 PA_LLIST_HEAD(struct mempool_slot, free_slots);
108 pa_mempool_stat stat;
111 static void segment_detach(pa_memimport_segment *seg);
113 static void stat_add(pa_memblock*b) {
117 pa_atomic_inc(&b->pool->stat.n_allocated);
118 pa_atomic_add(&b->pool->stat.allocated_size, b->length);
120 pa_atomic_inc(&b->pool->stat.n_accumulated);
121 pa_atomic_add(&b->pool->stat.accumulated_size, b->length);
123 if (b->type == PA_MEMBLOCK_IMPORTED) {
124 pa_atomic_inc(&b->pool->stat.n_imported);
125 pa_atomic_add(&b->pool->stat.imported_size, b->length);
128 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
129 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
132 static void stat_remove(pa_memblock *b) {
136 assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
137 assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
139 pa_atomic_dec(&b->pool->stat.n_allocated);
140 pa_atomic_sub(&b->pool->stat.allocated_size, b->length);
142 if (b->type == PA_MEMBLOCK_IMPORTED) {
143 assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
144 assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
146 pa_atomic_dec(&b->pool->stat.n_imported);
147 pa_atomic_sub(&b->pool->stat.imported_size, b->length);
150 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
153 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
155 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
161 if (!(b = pa_memblock_new_pool(p, length)))
162 b = memblock_new_appended(p, length);
167 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
173 b = pa_xmalloc(sizeof(pa_memblock) + length);
174 b->type = PA_MEMBLOCK_APPENDED;
178 b->data = (uint8_t*) b + sizeof(pa_memblock);
185 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
186 struct mempool_slot *slot;
190 slot = p->free_slots;
191 PA_LLIST_REMOVE(struct mempool_slot, p->free_slots, slot);
192 } else if (p->n_init < p->n_blocks)
193 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * p->n_init++));
195 pa_log_debug("Pool full");
196 pa_atomic_inc(&p->stat.n_pool_full);
203 static void* mempool_slot_data(struct mempool_slot *slot) {
206 return (uint8_t*) slot + sizeof(struct mempool_slot);
209 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
211 assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
212 assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
214 return ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size;
217 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
220 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
223 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
226 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
227 pa_memblock *b = NULL;
228 struct mempool_slot *slot;
233 if (p->block_size - sizeof(struct mempool_slot) >= sizeof(pa_memblock) + length) {
235 if (!(slot = mempool_allocate_slot(p)))
238 b = mempool_slot_data(slot);
239 b->type = PA_MEMBLOCK_POOL;
240 b->data = (uint8_t*) b + sizeof(pa_memblock);
242 } else if (p->block_size - sizeof(struct mempool_slot) >= length) {
244 if (!(slot = mempool_allocate_slot(p)))
247 b = pa_xnew(pa_memblock, 1);
248 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
249 b->data = mempool_slot_data(slot);
251 pa_log_debug("Memory block too large for pool: %u > %u", length, p->block_size - sizeof(struct mempool_slot));
252 pa_atomic_inc(&p->stat.n_too_large_for_pool);
265 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, int read_only) {
272 b = pa_xnew(pa_memblock, 1);
273 b->type = PA_MEMBLOCK_FIXED;
274 b->read_only = read_only;
284 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, void (*free_cb)(void *p), int read_only) {
292 b = pa_xnew(pa_memblock, 1);
293 b->type = PA_MEMBLOCK_USER;
294 b->read_only = read_only;
298 b->per_type.user.free_cb = free_cb;
305 pa_memblock* pa_memblock_ref(pa_memblock*b) {
307 assert(PA_REFCNT_VALUE(b) > 0);
313 void pa_memblock_unref(pa_memblock*b) {
315 assert(PA_REFCNT_VALUE(b) > 0);
317 if (PA_REFCNT_DEC(b) > 0)
323 case PA_MEMBLOCK_USER :
324 assert(b->per_type.user.free_cb);
325 b->per_type.user.free_cb(b->data);
329 case PA_MEMBLOCK_FIXED:
330 case PA_MEMBLOCK_APPENDED :
334 case PA_MEMBLOCK_IMPORTED : {
335 pa_memimport_segment *segment;
337 segment = b->per_type.imported.segment;
339 assert(segment->import);
341 pa_hashmap_remove(segment->import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
342 segment->import->release_cb(segment->import, b->per_type.imported.id, segment->import->userdata);
344 if (-- segment->n_blocks <= 0)
345 segment_detach(segment);
351 case PA_MEMBLOCK_POOL_EXTERNAL:
352 case PA_MEMBLOCK_POOL: {
353 struct mempool_slot *slot;
355 slot = mempool_slot_by_ptr(b->pool, b->data);
358 PA_LLIST_PREPEND(struct mempool_slot, b->pool->free_slots, slot);
360 if (b->type == PA_MEMBLOCK_POOL_EXTERNAL)
366 case PA_MEMBLOCK_TYPE_MAX:
372 static void memblock_make_local(pa_memblock *b) {
375 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
377 if (b->length <= b->pool->block_size - sizeof(struct mempool_slot)) {
378 struct mempool_slot *slot;
380 if ((slot = mempool_allocate_slot(b->pool))) {
382 /* We can move it into a local pool, perfect! */
384 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
387 new_data = mempool_slot_data(slot);
388 memcpy(new_data, b->data, b->length);
394 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
395 b->type = PA_MEMBLOCK_USER;
396 b->per_type.user.free_cb = pa_xfree;
398 b->data = pa_xmemdup(b->data, b->length);
401 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
402 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
405 void pa_memblock_unref_fixed(pa_memblock *b) {
407 assert(PA_REFCNT_VALUE(b) > 0);
408 assert(b->type == PA_MEMBLOCK_FIXED);
410 if (PA_REFCNT_VALUE(b) > 1)
411 memblock_make_local(b);
413 pa_memblock_unref(b);
416 static void memblock_replace_import(pa_memblock *b) {
417 pa_memimport_segment *seg;
420 assert(b->type == PA_MEMBLOCK_IMPORTED);
422 assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
423 assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
424 pa_atomic_dec(&b->pool->stat.n_imported);
425 pa_atomic_sub(&b->pool->stat.imported_size, b->length);
427 seg = b->per_type.imported.segment;
433 PA_UINT32_TO_PTR(b->per_type.imported.id));
435 memblock_make_local(b);
437 if (-- seg->n_blocks <= 0)
441 pa_mempool* pa_mempool_new(int shared) {
445 p = pa_xnew(pa_mempool, 1);
448 ps = (size_t) sysconf(_SC_PAGESIZE);
449 #elif defined(PAGE_SIZE)
450 ps = (size_t) PAGE_SIZE;
452 ps = 4096; /* Let's hope it's like x86. */
455 p->block_size = (PA_MEMPOOL_SLOT_SIZE/ps)*ps;
457 if (p->block_size < ps)
460 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
462 assert(p->block_size > sizeof(struct mempool_slot));
464 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
471 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
472 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
473 PA_LLIST_HEAD_INIT(struct mempool_slot, p->free_slots);
475 memset(&p->stat, 0, sizeof(p->stat));
480 void pa_mempool_free(pa_mempool *p) {
484 pa_memimport_free(p->imports);
487 pa_memexport_free(p->exports);
489 if (pa_atomic_load(&p->stat.n_allocated) > 0)
490 pa_log_warn("WARNING! Memory pool destroyed but not all memory blocks freed!");
492 pa_shm_free(&p->memory);
496 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
502 void pa_mempool_vacuum(pa_mempool *p) {
503 struct mempool_slot *slot;
507 for (slot = p->free_slots; slot; slot = slot->next)
508 pa_shm_punch(&p->memory, (uint8_t*) slot + sizeof(struct mempool_slot) - (uint8_t*) p->memory.ptr, p->block_size - sizeof(struct mempool_slot));
511 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
514 if (!p->memory.shared)
522 int pa_mempool_is_shared(pa_mempool *p) {
525 return !!p->memory.shared;
528 /* For recieving blocks from other nodes */
529 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
535 i = pa_xnew(pa_memimport, 1);
537 i->segments = pa_hashmap_new(NULL, NULL);
538 i->blocks = pa_hashmap_new(NULL, NULL);
540 i->userdata = userdata;
542 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
546 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
548 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
549 pa_memimport_segment* seg;
551 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
554 seg = pa_xnew(pa_memimport_segment, 1);
556 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
564 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
568 static void segment_detach(pa_memimport_segment *seg) {
571 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
572 pa_shm_free(&seg->memory);
576 void pa_memimport_free(pa_memimport *i) {
582 /* If we've exported this block further we need to revoke that export */
583 for (e = i->pool->exports; e; e = e->next)
584 memexport_revoke_blocks(e, i);
586 while ((b = pa_hashmap_get_first(i->blocks)))
587 memblock_replace_import(b);
589 assert(pa_hashmap_size(i->segments) == 0);
591 pa_hashmap_free(i->blocks, NULL, NULL);
592 pa_hashmap_free(i->segments, NULL, NULL);
594 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
598 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
600 pa_memimport_segment *seg;
604 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
607 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
608 if (!(seg = segment_attach(i, shm_id)))
611 if (offset+size > seg->memory.size)
614 b = pa_xnew(pa_memblock, 1);
615 b->type = PA_MEMBLOCK_IMPORTED;
619 b->data = (uint8_t*) seg->memory.ptr + offset;
621 b->per_type.imported.id = block_id;
622 b->per_type.imported.segment = seg;
624 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
633 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
637 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id))))
640 memblock_replace_import(b);
644 /* For sending blocks to other nodes */
645 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
651 if (!p->memory.shared)
654 e = pa_xnew(pa_memexport, 1);
656 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
657 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
660 e->userdata = userdata;
662 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
666 void pa_memexport_free(pa_memexport *e) {
669 while (e->used_slots)
670 pa_memexport_process_release(e, e->used_slots - e->slots);
672 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
676 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
682 if (!e->slots[id].block)
685 /* pa_log("Processing release for %u", id); */
687 assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
688 assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) e->slots[id].block->length);
690 pa_atomic_dec(&e->pool->stat.n_exported);
691 pa_atomic_sub(&e->pool->stat.exported_size, e->slots[id].block->length);
693 pa_memblock_unref(e->slots[id].block);
694 e->slots[id].block = NULL;
696 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
697 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
702 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
703 struct memexport_slot *slot, *next;
707 for (slot = e->used_slots; slot; slot = next) {
711 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
712 slot->block->per_type.imported.segment->import != i)
715 idx = slot - e->slots;
716 e->revoke_cb(e, idx, e->userdata);
717 pa_memexport_process_release(e, idx);
721 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
727 if (b->type == PA_MEMBLOCK_IMPORTED ||
728 b->type == PA_MEMBLOCK_POOL ||
729 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
730 assert(b->pool == p);
731 return pa_memblock_ref(b);
734 if (!(n = pa_memblock_new_pool(p, b->length)))
737 memcpy(n->data, b->data, b->length);
741 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
743 struct memexport_slot *slot;
751 assert(b->pool == e->pool);
753 if (!(b = memblock_shared_copy(e->pool, b)))
757 slot = e->free_slots;
758 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
759 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX) {
760 slot = &e->slots[e->n_init++];
762 pa_memblock_unref(b);
766 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
768 *block_id = slot - e->slots;
770 /* pa_log("Got block id %u", *block_id); */
772 if (b->type == PA_MEMBLOCK_IMPORTED) {
773 assert(b->per_type.imported.segment);
774 memory = &b->per_type.imported.segment->memory;
776 assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
778 memory = &b->pool->memory;
781 assert(b->data >= memory->ptr);
782 assert((uint8_t*) b->data + b->length <= (uint8_t*) memory->ptr + memory->size);
784 *shm_id = memory->id;
785 *offset = (uint8_t*) b->data - (uint8_t*) memory->ptr;
788 pa_atomic_inc(&e->pool->stat.n_exported);
789 pa_atomic_add(&e->pool->stat.exported_size, b->length);