2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/macro.h>
46 #include <pulsecore/flist.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/memtrap.h>
52 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
53 * note that the footprint is usually much smaller, since the data is
54 * stored in SHM and our OS does not commit the memory before we use
55 * it for the first time. */
56 #define PA_MEMPOOL_SLOTS_MAX 1024
57 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
59 #define PA_MEMEXPORT_SLOTS_MAX 128
61 #define PA_MEMIMPORT_SLOTS_MAX 160
62 #define PA_MEMIMPORT_SEGMENTS_MAX 16
65 PA_REFCNT_DECLARE; /* the reference counter */
68 pa_memblock_type_t type;
70 pa_bool_t read_only:1;
71 pa_bool_t is_silence:1;
76 pa_atomic_t n_acquired;
77 pa_atomic_t please_signal;
81 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
87 pa_memimport_segment *segment;
92 struct pa_memimport_segment {
99 /* A collection of multiple segments */
100 struct pa_memimport {
104 pa_hashmap *segments;
107 /* Called whenever an imported memory block is no longer
109 pa_memimport_release_cb_t release_cb;
112 PA_LLIST_FIELDS(pa_memimport);
115 struct memexport_slot {
116 PA_LLIST_FIELDS(struct memexport_slot);
120 struct pa_memexport {
124 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
126 PA_LLIST_HEAD(struct memexport_slot, free_slots);
127 PA_LLIST_HEAD(struct memexport_slot, used_slots);
130 /* Called whenever a client from which we imported a memory block
131 which we in turn exported to another client dies and we need to
132 revoke the memory block accordingly */
133 pa_memexport_revoke_cb_t revoke_cb;
136 PA_LLIST_FIELDS(pa_memexport);
140 pa_semaphore *semaphore;
149 PA_LLIST_HEAD(pa_memimport, imports);
150 PA_LLIST_HEAD(pa_memexport, exports);
152 /* A list of free slots that may be reused */
153 pa_flist *free_slots;
155 pa_mempool_stat stat;
158 static void segment_detach(pa_memimport_segment *seg);
160 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
162 /* No lock necessary */
163 static void stat_add(pa_memblock*b) {
167 pa_atomic_inc(&b->pool->stat.n_allocated);
168 pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
170 pa_atomic_inc(&b->pool->stat.n_accumulated);
171 pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
173 if (b->type == PA_MEMBLOCK_IMPORTED) {
174 pa_atomic_inc(&b->pool->stat.n_imported);
175 pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
178 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
179 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
182 /* No lock necessary */
183 static void stat_remove(pa_memblock *b) {
187 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
188 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
190 pa_atomic_dec(&b->pool->stat.n_allocated);
191 pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
193 if (b->type == PA_MEMBLOCK_IMPORTED) {
194 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
195 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
197 pa_atomic_dec(&b->pool->stat.n_imported);
198 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
201 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
204 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
206 /* No lock necessary */
207 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
213 if (!(b = pa_memblock_new_pool(p, length)))
214 b = memblock_new_appended(p, length);
219 /* No lock necessary */
220 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
226 /* If -1 is passed as length we choose the size for the caller. */
228 if (length == (size_t) -1)
229 length = p->block_size - PA_ALIGN(sizeof(pa_memblock));
231 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
234 b->type = PA_MEMBLOCK_APPENDED;
235 b->read_only = b->is_silence = FALSE;
236 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
238 pa_atomic_store(&b->n_acquired, 0);
239 pa_atomic_store(&b->please_signal, 0);
245 /* No lock necessary */
246 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
247 struct mempool_slot *slot;
250 if (!(slot = pa_flist_pop(p->free_slots))) {
253 /* The free list was empty, we have to allocate a new entry */
255 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
256 pa_atomic_dec(&p->n_init);
258 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
261 if (pa_log_ratelimit(PA_LOG_DEBUG))
262 pa_log_debug("Pool full");
263 pa_atomic_inc(&p->stat.n_pool_full);
268 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
269 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
270 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
277 /* No lock necessary, totally redundant anyway */
278 static inline void* mempool_slot_data(struct mempool_slot *slot) {
282 /* No lock necessary */
283 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
286 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
287 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
289 return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
292 /* No lock necessary */
293 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
296 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
299 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
302 /* No lock necessary */
303 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
304 pa_memblock *b = NULL;
305 struct mempool_slot *slot;
306 static int mempool_disable = 0;
311 if (mempool_disable == 0)
312 mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;
314 if (mempool_disable > 0)
317 /* If -1 is passed as length we choose the size for the caller: we
318 * take the largest size that fits in one of our slots. */
320 if (length == (size_t) -1)
321 length = pa_mempool_block_size_max(p);
323 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
325 if (!(slot = mempool_allocate_slot(p)))
328 b = mempool_slot_data(slot);
329 b->type = PA_MEMBLOCK_POOL;
330 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
332 } else if (p->block_size >= length) {
334 if (!(slot = mempool_allocate_slot(p)))
337 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
338 b = pa_xnew(pa_memblock, 1);
340 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
341 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
344 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
345 pa_atomic_inc(&p->stat.n_too_large_for_pool);
351 b->read_only = b->is_silence = FALSE;
353 pa_atomic_store(&b->n_acquired, 0);
354 pa_atomic_store(&b->please_signal, 0);
360 /* No lock necessary */
361 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, pa_bool_t read_only) {
366 pa_assert(length != (size_t) -1);
369 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
370 b = pa_xnew(pa_memblock, 1);
374 b->type = PA_MEMBLOCK_FIXED;
375 b->read_only = read_only;
376 b->is_silence = FALSE;
377 pa_atomic_ptr_store(&b->data, d);
379 pa_atomic_store(&b->n_acquired, 0);
380 pa_atomic_store(&b->please_signal, 0);
386 /* No lock necessary */
387 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, pa_bool_t read_only) {
393 pa_assert(length != (size_t) -1);
396 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
397 b = pa_xnew(pa_memblock, 1);
401 b->type = PA_MEMBLOCK_USER;
402 b->read_only = read_only;
403 b->is_silence = FALSE;
404 pa_atomic_ptr_store(&b->data, d);
406 pa_atomic_store(&b->n_acquired, 0);
407 pa_atomic_store(&b->please_signal, 0);
409 b->per_type.user.free_cb = free_cb;
415 /* No lock necessary */
416 pa_bool_t pa_memblock_is_read_only(pa_memblock *b) {
418 pa_assert(PA_REFCNT_VALUE(b) > 0);
420 return b->read_only && PA_REFCNT_VALUE(b) == 1;
423 /* No lock necessary */
424 pa_bool_t pa_memblock_is_silence(pa_memblock *b) {
426 pa_assert(PA_REFCNT_VALUE(b) > 0);
428 return b->is_silence;
431 /* No lock necessary */
432 void pa_memblock_set_is_silence(pa_memblock *b, pa_bool_t v) {
434 pa_assert(PA_REFCNT_VALUE(b) > 0);
439 /* No lock necessary */
440 pa_bool_t pa_memblock_ref_is_one(pa_memblock *b) {
444 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
449 /* No lock necessary */
450 void* pa_memblock_acquire(pa_memblock *b) {
452 pa_assert(PA_REFCNT_VALUE(b) > 0);
454 pa_atomic_inc(&b->n_acquired);
456 return pa_atomic_ptr_load(&b->data);
459 /* No lock necessary, in corner cases locks by its own */
460 void pa_memblock_release(pa_memblock *b) {
463 pa_assert(PA_REFCNT_VALUE(b) > 0);
465 r = pa_atomic_dec(&b->n_acquired);
468 /* Signal a waiting thread that this memblock is no longer used */
469 if (r == 1 && pa_atomic_load(&b->please_signal))
470 pa_semaphore_post(b->pool->semaphore);
473 size_t pa_memblock_get_length(pa_memblock *b) {
475 pa_assert(PA_REFCNT_VALUE(b) > 0);
480 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
482 pa_assert(PA_REFCNT_VALUE(b) > 0);
487 /* No lock necessary */
488 pa_memblock* pa_memblock_ref(pa_memblock*b) {
490 pa_assert(PA_REFCNT_VALUE(b) > 0);
496 static void memblock_free(pa_memblock *b) {
499 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
504 case PA_MEMBLOCK_USER :
505 pa_assert(b->per_type.user.free_cb);
506 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
510 case PA_MEMBLOCK_FIXED:
511 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
516 case PA_MEMBLOCK_APPENDED:
518 /* We could attached it unused_memblocks, but that would
519 * probably waste some considerable memory */
523 case PA_MEMBLOCK_IMPORTED: {
524 pa_memimport_segment *segment;
525 pa_memimport *import;
527 /* FIXME! This should be implemented lock-free */
529 pa_assert_se(segment = b->per_type.imported.segment);
530 pa_assert_se(import = segment->import);
532 pa_mutex_lock(import->mutex);
534 pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
536 pa_assert(segment->n_blocks >= 1);
537 if (-- segment->n_blocks <= 0)
538 segment_detach(segment);
540 pa_mutex_unlock(import->mutex);
542 import->release_cb(import, b->per_type.imported.id, import->userdata);
544 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
550 case PA_MEMBLOCK_POOL_EXTERNAL:
551 case PA_MEMBLOCK_POOL: {
552 struct mempool_slot *slot;
555 pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));
557 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
559 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
560 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
561 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
565 /* The free list dimensions should easily allow all slots
566 * to fit in, hence try harder if pushing this slot into
567 * the free list fails */
568 while (pa_flist_push(b->pool->free_slots, slot) < 0)
572 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
578 case PA_MEMBLOCK_TYPE_MAX:
580 pa_assert_not_reached();
584 /* No lock necessary */
585 void pa_memblock_unref(pa_memblock*b) {
587 pa_assert(PA_REFCNT_VALUE(b) > 0);
589 if (PA_REFCNT_DEC(b) > 0)
596 static void memblock_wait(pa_memblock *b) {
599 if (pa_atomic_load(&b->n_acquired) > 0) {
600 /* We need to wait until all threads gave up access to the
601 * memory block before we can go on. Unfortunately this means
602 * that we have to lock and wait here. Sniff! */
604 pa_atomic_inc(&b->please_signal);
606 while (pa_atomic_load(&b->n_acquired) > 0)
607 pa_semaphore_wait(b->pool->semaphore);
609 pa_atomic_dec(&b->please_signal);
613 /* No lock necessary. This function is not multiple caller safe! */
614 static void memblock_make_local(pa_memblock *b) {
617 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
619 if (b->length <= b->pool->block_size) {
620 struct mempool_slot *slot;
622 if ((slot = mempool_allocate_slot(b->pool))) {
624 /* We can move it into a local pool, perfect! */
626 new_data = mempool_slot_data(slot);
627 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
628 pa_atomic_ptr_store(&b->data, new_data);
630 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
631 b->read_only = FALSE;
637 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
638 b->per_type.user.free_cb = pa_xfree;
639 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
641 b->type = PA_MEMBLOCK_USER;
642 b->read_only = FALSE;
645 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
646 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
650 /* No lock necessary. This function is not multiple caller safe*/
651 void pa_memblock_unref_fixed(pa_memblock *b) {
653 pa_assert(PA_REFCNT_VALUE(b) > 0);
654 pa_assert(b->type == PA_MEMBLOCK_FIXED);
656 if (PA_REFCNT_VALUE(b) > 1)
657 memblock_make_local(b);
659 pa_memblock_unref(b);
662 /* No lock necessary. */
663 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
667 pa_assert(PA_REFCNT_VALUE(b) > 0);
669 p = pa_memblock_acquire(b);
670 pa_will_need(p, b->length);
671 pa_memblock_release(b);
676 /* Self-locked. This function is not multiple-caller safe */
677 static void memblock_replace_import(pa_memblock *b) {
678 pa_memimport_segment *segment;
679 pa_memimport *import;
682 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
684 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
685 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
686 pa_atomic_dec(&b->pool->stat.n_imported);
687 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
689 pa_assert_se(segment = b->per_type.imported.segment);
690 pa_assert_se(import = segment->import);
692 pa_mutex_lock(import->mutex);
694 pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
696 memblock_make_local(b);
698 pa_assert(segment->n_blocks >= 1);
699 if (-- segment->n_blocks <= 0)
700 segment_detach(segment);
702 pa_mutex_unlock(import->mutex);
705 pa_mempool* pa_mempool_new(pa_bool_t shared, size_t size) {
707 char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
709 p = pa_xnew(pa_mempool, 1);
711 p->mutex = pa_mutex_new(TRUE, TRUE);
712 p->semaphore = pa_semaphore_new(0);
714 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
715 if (p->block_size < PA_PAGE_SIZE)
716 p->block_size = PA_PAGE_SIZE;
719 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
721 p->n_blocks = (unsigned) (size / p->block_size);
727 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
732 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
733 p->memory.shared ? "shared" : "private",
735 pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
736 pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
737 (unsigned long) pa_mempool_block_size_max(p));
739 memset(&p->stat, 0, sizeof(p->stat));
740 pa_atomic_store(&p->n_init, 0);
742 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
743 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
745 p->free_slots = pa_flist_new(p->n_blocks);
750 void pa_mempool_free(pa_mempool *p) {
753 pa_mutex_lock(p->mutex);
756 pa_memimport_free(p->imports);
759 pa_memexport_free(p->exports);
761 pa_mutex_unlock(p->mutex);
763 pa_flist_free(p->free_slots, NULL);
765 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
767 /* Ouch, somebody is retaining a memory block reference! */
773 /* Let's try to find at least one of those leaked memory blocks */
775 list = pa_flist_new(p->n_blocks);
777 for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
778 struct mempool_slot *slot;
781 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
782 b = mempool_slot_data(slot);
784 while ((k = pa_flist_pop(p->free_slots))) {
785 while (pa_flist_push(list, k) < 0)
793 pa_log("REF: Leaked memory block %p", b);
795 while ((k = pa_flist_pop(list)))
796 while (pa_flist_push(p->free_slots, k) < 0)
800 pa_flist_free(list, NULL);
804 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
809 pa_shm_free(&p->memory);
811 pa_mutex_free(p->mutex);
812 pa_semaphore_free(p->semaphore);
817 /* No lock necessary */
818 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
824 /* No lock necessary */
825 size_t pa_mempool_block_size_max(pa_mempool *p) {
828 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
831 /* No lock necessary */
832 void pa_mempool_vacuum(pa_mempool *p) {
833 struct mempool_slot *slot;
838 list = pa_flist_new(p->n_blocks);
840 while ((slot = pa_flist_pop(p->free_slots)))
841 while (pa_flist_push(list, slot) < 0)
844 while ((slot = pa_flist_pop(list))) {
845 pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
847 while (pa_flist_push(p->free_slots, slot))
851 pa_flist_free(list, NULL);
854 /* No lock necessary */
855 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
858 if (!p->memory.shared)
866 /* No lock necessary */
867 pa_bool_t pa_mempool_is_shared(pa_mempool *p) {
870 return !!p->memory.shared;
873 /* For recieving blocks from other nodes */
874 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
880 i = pa_xnew(pa_memimport, 1);
881 i->mutex = pa_mutex_new(TRUE, TRUE);
883 i->segments = pa_hashmap_new(NULL, NULL);
884 i->blocks = pa_hashmap_new(NULL, NULL);
886 i->userdata = userdata;
888 pa_mutex_lock(p->mutex);
889 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
890 pa_mutex_unlock(p->mutex);
895 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
897 /* Should be called locked */
898 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
899 pa_memimport_segment* seg;
901 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
904 seg = pa_xnew0(pa_memimport_segment, 1);
906 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
912 seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
914 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(seg->memory.id), seg);
918 /* Should be called locked */
919 static void segment_detach(pa_memimport_segment *seg) {
922 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
923 pa_shm_free(&seg->memory);
926 pa_memtrap_remove(seg->trap);
931 /* Self-locked. Not multiple-caller safe */
932 void pa_memimport_free(pa_memimport *i) {
938 pa_mutex_lock(i->mutex);
940 while ((b = pa_hashmap_first(i->blocks)))
941 memblock_replace_import(b);
943 pa_assert(pa_hashmap_size(i->segments) == 0);
945 pa_mutex_unlock(i->mutex);
947 pa_mutex_lock(i->pool->mutex);
949 /* If we've exported this block further we need to revoke that export */
950 for (e = i->pool->exports; e; e = e->next)
951 memexport_revoke_blocks(e, i);
953 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
955 pa_mutex_unlock(i->pool->mutex);
957 pa_hashmap_free(i->blocks, NULL, NULL);
958 pa_hashmap_free(i->segments, NULL, NULL);
960 pa_mutex_free(i->mutex);
966 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
967 pa_memblock *b = NULL;
968 pa_memimport_segment *seg;
972 pa_mutex_lock(i->mutex);
974 if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
979 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
982 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
983 if (!(seg = segment_attach(i, shm_id)))
986 if (offset+size > seg->memory.size)
989 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
990 b = pa_xnew(pa_memblock, 1);
994 b->type = PA_MEMBLOCK_IMPORTED;
996 b->is_silence = FALSE;
997 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
999 pa_atomic_store(&b->n_acquired, 0);
1000 pa_atomic_store(&b->please_signal, 0);
1001 b->per_type.imported.id = block_id;
1002 b->per_type.imported.segment = seg;
1004 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
1011 pa_mutex_unlock(i->mutex);
1016 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
1021 pa_mutex_lock(i->mutex);
1023 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1028 memblock_replace_import(b);
1031 pa_mutex_unlock(i->mutex);
1036 /* For sending blocks to other nodes */
1037 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1043 if (!p->memory.shared)
1046 e = pa_xnew(pa_memexport, 1);
1047 e->mutex = pa_mutex_new(TRUE, TRUE);
1049 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1050 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1053 e->userdata = userdata;
1055 pa_mutex_lock(p->mutex);
1056 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1057 pa_mutex_unlock(p->mutex);
1061 void pa_memexport_free(pa_memexport *e) {
1064 pa_mutex_lock(e->mutex);
1065 while (e->used_slots)
1066 pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots));
1067 pa_mutex_unlock(e->mutex);
1069 pa_mutex_lock(e->pool->mutex);
1070 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1071 pa_mutex_unlock(e->pool->mutex);
1073 pa_mutex_free(e->mutex);
1078 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1083 pa_mutex_lock(e->mutex);
1085 if (id >= e->n_init)
1088 if (!e->slots[id].block)
1091 b = e->slots[id].block;
1092 e->slots[id].block = NULL;
1094 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1095 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1097 pa_mutex_unlock(e->mutex);
1099 /* pa_log("Processing release for %u", id); */
1101 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1102 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1104 pa_atomic_dec(&e->pool->stat.n_exported);
1105 pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1107 pa_memblock_unref(b);
1112 pa_mutex_unlock(e->mutex);
1118 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1119 struct memexport_slot *slot, *next;
1123 pa_mutex_lock(e->mutex);
1125 for (slot = e->used_slots; slot; slot = next) {
1129 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1130 slot->block->per_type.imported.segment->import != i)
1133 idx = (uint32_t) (slot - e->slots);
1134 e->revoke_cb(e, idx, e->userdata);
1135 pa_memexport_process_release(e, idx);
1138 pa_mutex_unlock(e->mutex);
1141 /* No lock necessary */
1142 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1148 if (b->type == PA_MEMBLOCK_IMPORTED ||
1149 b->type == PA_MEMBLOCK_POOL ||
1150 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1151 pa_assert(b->pool == p);
1152 return pa_memblock_ref(b);
1155 if (!(n = pa_memblock_new_pool(p, b->length)))
1158 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1163 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1165 struct memexport_slot *slot;
1170 pa_assert(block_id);
1174 pa_assert(b->pool == e->pool);
1176 if (!(b = memblock_shared_copy(e->pool, b)))
1179 pa_mutex_lock(e->mutex);
1181 if (e->free_slots) {
1182 slot = e->free_slots;
1183 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1184 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1185 slot = &e->slots[e->n_init++];
1187 pa_mutex_unlock(e->mutex);
1188 pa_memblock_unref(b);
1192 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1194 *block_id = (uint32_t) (slot - e->slots);
1196 pa_mutex_unlock(e->mutex);
1197 /* pa_log("Got block id %u", *block_id); */
1199 data = pa_memblock_acquire(b);
1201 if (b->type == PA_MEMBLOCK_IMPORTED) {
1202 pa_assert(b->per_type.imported.segment);
1203 memory = &b->per_type.imported.segment->memory;
1205 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1207 memory = &b->pool->memory;
1210 pa_assert(data >= memory->ptr);
1211 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1213 *shm_id = memory->id;
1214 *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1217 pa_memblock_release(b);
1219 pa_atomic_inc(&e->pool->stat.n_exported);
1220 pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);