2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
34 #ifdef HAVE_VALGRIND_MEMCHECK_H
35 #include <valgrind/memcheck.h>
38 #include <pulse/xmalloc.h>
39 #include <pulse/def.h>
41 #include <pulsecore/shm.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/hashmap.h>
44 #include <pulsecore/semaphore.h>
45 #include <pulsecore/mutex.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/refcnt.h>
48 #include <pulsecore/llist.h>
49 #include <pulsecore/flist.h>
50 #include <pulsecore/core-util.h>
51 #include <pulsecore/memtrap.h>
55 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
56 * note that the footprint is usually much smaller, since the data is
57 * stored in SHM and our OS does not commit the memory before we use
58 * it for the first time. */
59 #define PA_MEMPOOL_SLOTS_MAX 1024
60 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
62 #define PA_MEMEXPORT_SLOTS_MAX 128
64 #define PA_MEMIMPORT_SLOTS_MAX 160
65 #define PA_MEMIMPORT_SEGMENTS_MAX 16
68 PA_REFCNT_DECLARE; /* the reference counter */
71 pa_memblock_type_t type;
79 pa_atomic_t n_acquired;
80 pa_atomic_t please_signal;
84 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
90 pa_memimport_segment *segment;
95 struct pa_memimport_segment {
102 /* A collection of multiple segments */
103 struct pa_memimport {
107 pa_hashmap *segments;
110 /* Called whenever an imported memory block is no longer
112 pa_memimport_release_cb_t release_cb;
115 PA_LLIST_FIELDS(pa_memimport);
118 struct memexport_slot {
119 PA_LLIST_FIELDS(struct memexport_slot);
123 struct pa_memexport {
127 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
129 PA_LLIST_HEAD(struct memexport_slot, free_slots);
130 PA_LLIST_HEAD(struct memexport_slot, used_slots);
133 /* Called whenever a client from which we imported a memory block
134 which we in turn exported to another client dies and we need to
135 revoke the memory block accordingly */
136 pa_memexport_revoke_cb_t revoke_cb;
139 PA_LLIST_FIELDS(pa_memexport);
143 pa_semaphore *semaphore;
152 PA_LLIST_HEAD(pa_memimport, imports);
153 PA_LLIST_HEAD(pa_memexport, exports);
155 /* A list of free slots that may be reused */
156 pa_flist *free_slots;
158 pa_mempool_stat stat;
161 static void segment_detach(pa_memimport_segment *seg);
163 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
165 /* No lock necessary */
166 static void stat_add(pa_memblock*b) {
170 pa_atomic_inc(&b->pool->stat.n_allocated);
171 pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
173 pa_atomic_inc(&b->pool->stat.n_accumulated);
174 pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
176 if (b->type == PA_MEMBLOCK_IMPORTED) {
177 pa_atomic_inc(&b->pool->stat.n_imported);
178 pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
181 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
182 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
185 /* No lock necessary */
186 static void stat_remove(pa_memblock *b) {
190 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
191 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
193 pa_atomic_dec(&b->pool->stat.n_allocated);
194 pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
196 if (b->type == PA_MEMBLOCK_IMPORTED) {
197 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
198 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
200 pa_atomic_dec(&b->pool->stat.n_imported);
201 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
204 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
207 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
209 /* No lock necessary */
210 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
216 if (!(b = pa_memblock_new_pool(p, length)))
217 b = memblock_new_appended(p, length);
222 /* No lock necessary */
223 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
229 /* If -1 is passed as length we choose the size for the caller. */
231 if (length == (size_t) -1)
232 length = pa_mempool_block_size_max(p);
234 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
237 b->type = PA_MEMBLOCK_APPENDED;
238 b->read_only = b->is_silence = false;
239 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
241 pa_atomic_store(&b->n_acquired, 0);
242 pa_atomic_store(&b->please_signal, 0);
248 /* No lock necessary */
249 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
250 struct mempool_slot *slot;
253 if (!(slot = pa_flist_pop(p->free_slots))) {
256 /* The free list was empty, we have to allocate a new entry */
258 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
259 pa_atomic_dec(&p->n_init);
261 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
264 if (pa_log_ratelimit(PA_LOG_DEBUG))
265 pa_log_debug("Pool full");
266 pa_atomic_inc(&p->stat.n_pool_full);
271 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
272 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
273 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
280 /* No lock necessary, totally redundant anyway */
281 static inline void* mempool_slot_data(struct mempool_slot *slot) {
285 /* No lock necessary */
286 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
289 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
290 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
292 return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
295 /* No lock necessary */
296 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
299 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
302 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
305 /* No lock necessary */
306 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
307 pa_memblock *b = NULL;
308 struct mempool_slot *slot;
309 static int mempool_disable = 0;
314 if (mempool_disable == 0)
315 mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;
317 if (mempool_disable > 0)
320 /* If -1 is passed as length we choose the size for the caller: we
321 * take the largest size that fits in one of our slots. */
323 if (length == (size_t) -1)
324 length = pa_mempool_block_size_max(p);
326 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
328 if (!(slot = mempool_allocate_slot(p)))
331 b = mempool_slot_data(slot);
332 b->type = PA_MEMBLOCK_POOL;
333 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
335 } else if (p->block_size >= length) {
337 if (!(slot = mempool_allocate_slot(p)))
340 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
341 b = pa_xnew(pa_memblock, 1);
343 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
344 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
347 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
348 pa_atomic_inc(&p->stat.n_too_large_for_pool);
354 b->read_only = b->is_silence = false;
356 pa_atomic_store(&b->n_acquired, 0);
357 pa_atomic_store(&b->please_signal, 0);
363 /* No lock necessary */
364 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, bool read_only) {
369 pa_assert(length != (size_t) -1);
372 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
373 b = pa_xnew(pa_memblock, 1);
377 b->type = PA_MEMBLOCK_FIXED;
378 b->read_only = read_only;
379 b->is_silence = false;
380 pa_atomic_ptr_store(&b->data, d);
382 pa_atomic_store(&b->n_acquired, 0);
383 pa_atomic_store(&b->please_signal, 0);
389 /* No lock necessary */
390 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, pa_free_cb_t free_cb, bool read_only) {
396 pa_assert(length != (size_t) -1);
399 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
400 b = pa_xnew(pa_memblock, 1);
404 b->type = PA_MEMBLOCK_USER;
405 b->read_only = read_only;
406 b->is_silence = false;
407 pa_atomic_ptr_store(&b->data, d);
409 pa_atomic_store(&b->n_acquired, 0);
410 pa_atomic_store(&b->please_signal, 0);
412 b->per_type.user.free_cb = free_cb;
418 /* No lock necessary */
419 bool pa_memblock_is_read_only(pa_memblock *b) {
421 pa_assert(PA_REFCNT_VALUE(b) > 0);
423 return b->read_only && PA_REFCNT_VALUE(b) == 1;
426 /* No lock necessary */
427 bool pa_memblock_is_silence(pa_memblock *b) {
429 pa_assert(PA_REFCNT_VALUE(b) > 0);
431 return b->is_silence;
434 /* No lock necessary */
435 void pa_memblock_set_is_silence(pa_memblock *b, bool v) {
437 pa_assert(PA_REFCNT_VALUE(b) > 0);
442 /* No lock necessary */
443 bool pa_memblock_ref_is_one(pa_memblock *b) {
447 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
452 /* No lock necessary */
453 void* pa_memblock_acquire(pa_memblock *b) {
455 pa_assert(PA_REFCNT_VALUE(b) > 0);
457 pa_atomic_inc(&b->n_acquired);
459 return pa_atomic_ptr_load(&b->data);
462 /* No lock necessary */
463 void *pa_memblock_acquire_chunk(const pa_memchunk *c) {
466 return (uint8_t *) pa_memblock_acquire(c->memblock) + c->index;
469 /* No lock necessary, in corner cases locks by its own */
470 void pa_memblock_release(pa_memblock *b) {
473 pa_assert(PA_REFCNT_VALUE(b) > 0);
475 r = pa_atomic_dec(&b->n_acquired);
478 /* Signal a waiting thread that this memblock is no longer used */
479 if (r == 1 && pa_atomic_load(&b->please_signal))
480 pa_semaphore_post(b->pool->semaphore);
483 size_t pa_memblock_get_length(pa_memblock *b) {
485 pa_assert(PA_REFCNT_VALUE(b) > 0);
490 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
492 pa_assert(PA_REFCNT_VALUE(b) > 0);
497 /* No lock necessary */
498 pa_memblock* pa_memblock_ref(pa_memblock*b) {
500 pa_assert(PA_REFCNT_VALUE(b) > 0);
506 static void memblock_free(pa_memblock *b) {
509 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
514 case PA_MEMBLOCK_USER :
515 pa_assert(b->per_type.user.free_cb);
516 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
520 case PA_MEMBLOCK_FIXED:
521 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
526 case PA_MEMBLOCK_APPENDED:
528 /* We could attach it to unused_memblocks, but that would
529 * probably waste some considerable amount of memory */
533 case PA_MEMBLOCK_IMPORTED: {
534 pa_memimport_segment *segment;
535 pa_memimport *import;
537 /* FIXME! This should be implemented lock-free */
539 pa_assert_se(segment = b->per_type.imported.segment);
540 pa_assert_se(import = segment->import);
542 pa_mutex_lock(import->mutex);
544 pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
546 pa_assert(segment->n_blocks >= 1);
547 if (-- segment->n_blocks <= 0)
548 segment_detach(segment);
550 pa_mutex_unlock(import->mutex);
552 import->release_cb(import, b->per_type.imported.id, import->userdata);
554 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
560 case PA_MEMBLOCK_POOL_EXTERNAL:
561 case PA_MEMBLOCK_POOL: {
562 struct mempool_slot *slot;
565 pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));
567 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
569 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
570 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
571 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
575 /* The free list dimensions should easily allow all slots
576 * to fit in, hence try harder if pushing this slot into
577 * the free list fails */
578 while (pa_flist_push(b->pool->free_slots, slot) < 0)
582 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
588 case PA_MEMBLOCK_TYPE_MAX:
590 pa_assert_not_reached();
594 /* No lock necessary */
595 void pa_memblock_unref(pa_memblock*b) {
597 pa_assert(PA_REFCNT_VALUE(b) > 0);
599 if (PA_REFCNT_DEC(b) > 0)
606 static void memblock_wait(pa_memblock *b) {
609 if (pa_atomic_load(&b->n_acquired) > 0) {
610 /* We need to wait until all threads gave up access to the
611 * memory block before we can go on. Unfortunately this means
612 * that we have to lock and wait here. Sniff! */
614 pa_atomic_inc(&b->please_signal);
616 while (pa_atomic_load(&b->n_acquired) > 0)
617 pa_semaphore_wait(b->pool->semaphore);
619 pa_atomic_dec(&b->please_signal);
623 /* No lock necessary. This function is not multiple caller safe! */
624 static void memblock_make_local(pa_memblock *b) {
627 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
629 if (b->length <= b->pool->block_size) {
630 struct mempool_slot *slot;
632 if ((slot = mempool_allocate_slot(b->pool))) {
634 /* We can move it into a local pool, perfect! */
636 new_data = mempool_slot_data(slot);
637 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
638 pa_atomic_ptr_store(&b->data, new_data);
640 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
641 b->read_only = false;
647 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
648 b->per_type.user.free_cb = pa_xfree;
649 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
651 b->type = PA_MEMBLOCK_USER;
652 b->read_only = false;
655 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
656 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
660 /* No lock necessary. This function is not multiple caller safe*/
661 void pa_memblock_unref_fixed(pa_memblock *b) {
663 pa_assert(PA_REFCNT_VALUE(b) > 0);
664 pa_assert(b->type == PA_MEMBLOCK_FIXED);
666 if (PA_REFCNT_VALUE(b) > 1)
667 memblock_make_local(b);
669 pa_memblock_unref(b);
672 /* No lock necessary. */
673 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
677 pa_assert(PA_REFCNT_VALUE(b) > 0);
679 p = pa_memblock_acquire(b);
680 pa_will_need(p, b->length);
681 pa_memblock_release(b);
686 /* Self-locked. This function is not multiple-caller safe */
687 static void memblock_replace_import(pa_memblock *b) {
688 pa_memimport_segment *segment;
689 pa_memimport *import;
692 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
694 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
695 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
696 pa_atomic_dec(&b->pool->stat.n_imported);
697 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
699 pa_assert_se(segment = b->per_type.imported.segment);
700 pa_assert_se(import = segment->import);
702 pa_mutex_lock(import->mutex);
704 pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
706 memblock_make_local(b);
708 pa_assert(segment->n_blocks >= 1);
709 if (-- segment->n_blocks <= 0)
710 segment_detach(segment);
712 pa_mutex_unlock(import->mutex);
715 pa_mempool* pa_mempool_new(bool shared, size_t size) {
717 char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
719 p = pa_xnew(pa_mempool, 1);
721 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
722 if (p->block_size < PA_PAGE_SIZE)
723 p->block_size = PA_PAGE_SIZE;
726 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
728 p->n_blocks = (unsigned) (size / p->block_size);
734 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
739 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
740 p->memory.shared ? "shared" : "private",
742 pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
743 pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
744 (unsigned long) pa_mempool_block_size_max(p));
746 memset(&p->stat, 0, sizeof(p->stat));
747 pa_atomic_store(&p->n_init, 0);
749 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
750 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
752 p->mutex = pa_mutex_new(true, true);
753 p->semaphore = pa_semaphore_new(0);
755 p->free_slots = pa_flist_new(p->n_blocks);
760 void pa_mempool_free(pa_mempool *p) {
763 pa_mutex_lock(p->mutex);
766 pa_memimport_free(p->imports);
769 pa_memexport_free(p->exports);
771 pa_mutex_unlock(p->mutex);
773 pa_flist_free(p->free_slots, NULL);
775 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
777 /* Ouch, somebody is retaining a memory block reference! */
783 /* Let's try to find at least one of those leaked memory blocks */
785 list = pa_flist_new(p->n_blocks);
787 for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
788 struct mempool_slot *slot;
791 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
792 b = mempool_slot_data(slot);
794 while ((k = pa_flist_pop(p->free_slots))) {
795 while (pa_flist_push(list, k) < 0)
803 pa_log("REF: Leaked memory block %p", b);
805 while ((k = pa_flist_pop(list)))
806 while (pa_flist_push(p->free_slots, k) < 0)
810 pa_flist_free(list, NULL);
814 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
819 pa_shm_free(&p->memory);
821 pa_mutex_free(p->mutex);
822 pa_semaphore_free(p->semaphore);
827 /* No lock necessary */
828 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
834 /* No lock necessary */
835 size_t pa_mempool_block_size_max(pa_mempool *p) {
838 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
841 /* No lock necessary */
842 void pa_mempool_vacuum(pa_mempool *p) {
843 struct mempool_slot *slot;
848 list = pa_flist_new(p->n_blocks);
850 while ((slot = pa_flist_pop(p->free_slots)))
851 while (pa_flist_push(list, slot) < 0)
854 while ((slot = pa_flist_pop(list))) {
855 pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
857 while (pa_flist_push(p->free_slots, slot))
861 pa_flist_free(list, NULL);
864 /* No lock necessary */
865 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
868 if (!p->memory.shared)
876 /* No lock necessary */
877 bool pa_mempool_is_shared(pa_mempool *p) {
880 return !!p->memory.shared;
883 /* For receiving blocks from other nodes */
884 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
890 i = pa_xnew(pa_memimport, 1);
891 i->mutex = pa_mutex_new(true, true);
893 i->segments = pa_hashmap_new(NULL, NULL);
894 i->blocks = pa_hashmap_new(NULL, NULL);
896 i->userdata = userdata;
898 pa_mutex_lock(p->mutex);
899 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
900 pa_mutex_unlock(p->mutex);
905 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
907 /* Should be called locked */
908 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
909 pa_memimport_segment* seg;
911 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
914 seg = pa_xnew0(pa_memimport_segment, 1);
916 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
922 seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
924 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(seg->memory.id), seg);
928 /* Should be called locked */
929 static void segment_detach(pa_memimport_segment *seg) {
932 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
933 pa_shm_free(&seg->memory);
936 pa_memtrap_remove(seg->trap);
941 /* Self-locked. Not multiple-caller safe */
942 void pa_memimport_free(pa_memimport *i) {
948 pa_mutex_lock(i->mutex);
950 while ((b = pa_hashmap_first(i->blocks)))
951 memblock_replace_import(b);
953 pa_assert(pa_hashmap_size(i->segments) == 0);
955 pa_mutex_unlock(i->mutex);
957 pa_mutex_lock(i->pool->mutex);
959 /* If we've exported this block further we need to revoke that export */
960 for (e = i->pool->exports; e; e = e->next)
961 memexport_revoke_blocks(e, i);
963 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
965 pa_mutex_unlock(i->pool->mutex);
967 pa_hashmap_free(i->blocks);
968 pa_hashmap_free(i->segments);
970 pa_mutex_free(i->mutex);
976 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
977 pa_memblock *b = NULL;
978 pa_memimport_segment *seg;
982 pa_mutex_lock(i->mutex);
984 if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
989 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
992 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
993 if (!(seg = segment_attach(i, shm_id)))
996 if (offset+size > seg->memory.size)
999 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
1000 b = pa_xnew(pa_memblock, 1);
1004 b->type = PA_MEMBLOCK_IMPORTED;
1005 b->read_only = true;
1006 b->is_silence = false;
1007 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
1009 pa_atomic_store(&b->n_acquired, 0);
1010 pa_atomic_store(&b->please_signal, 0);
1011 b->per_type.imported.id = block_id;
1012 b->per_type.imported.segment = seg;
1014 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
1021 pa_mutex_unlock(i->mutex);
1026 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
1031 pa_mutex_lock(i->mutex);
1033 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1038 memblock_replace_import(b);
1041 pa_mutex_unlock(i->mutex);
1046 /* For sending blocks to other nodes */
1047 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1053 if (!p->memory.shared)
1056 e = pa_xnew(pa_memexport, 1);
1057 e->mutex = pa_mutex_new(true, true);
1059 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1060 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1063 e->userdata = userdata;
1065 pa_mutex_lock(p->mutex);
1066 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1067 pa_mutex_unlock(p->mutex);
1071 void pa_memexport_free(pa_memexport *e) {
1074 pa_mutex_lock(e->mutex);
1075 while (e->used_slots)
1076 pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots));
1077 pa_mutex_unlock(e->mutex);
1079 pa_mutex_lock(e->pool->mutex);
1080 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1081 pa_mutex_unlock(e->pool->mutex);
1083 pa_mutex_free(e->mutex);
1088 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1093 pa_mutex_lock(e->mutex);
1095 if (id >= e->n_init)
1098 if (!e->slots[id].block)
1101 b = e->slots[id].block;
1102 e->slots[id].block = NULL;
1104 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1105 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1107 pa_mutex_unlock(e->mutex);
1109 /* pa_log("Processing release for %u", id); */
1111 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1112 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1114 pa_atomic_dec(&e->pool->stat.n_exported);
1115 pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1117 pa_memblock_unref(b);
1122 pa_mutex_unlock(e->mutex);
1128 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1129 struct memexport_slot *slot, *next;
1133 pa_mutex_lock(e->mutex);
1135 for (slot = e->used_slots; slot; slot = next) {
1139 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1140 slot->block->per_type.imported.segment->import != i)
1143 idx = (uint32_t) (slot - e->slots);
1144 e->revoke_cb(e, idx, e->userdata);
1145 pa_memexport_process_release(e, idx);
1148 pa_mutex_unlock(e->mutex);
1151 /* No lock necessary */
1152 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1158 if (b->type == PA_MEMBLOCK_IMPORTED ||
1159 b->type == PA_MEMBLOCK_POOL ||
1160 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1161 pa_assert(b->pool == p);
1162 return pa_memblock_ref(b);
1165 if (!(n = pa_memblock_new_pool(p, b->length)))
1168 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1173 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1175 struct memexport_slot *slot;
1180 pa_assert(block_id);
1184 pa_assert(b->pool == e->pool);
1186 if (!(b = memblock_shared_copy(e->pool, b)))
1189 pa_mutex_lock(e->mutex);
1191 if (e->free_slots) {
1192 slot = e->free_slots;
1193 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1194 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1195 slot = &e->slots[e->n_init++];
1197 pa_mutex_unlock(e->mutex);
1198 pa_memblock_unref(b);
1202 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1204 *block_id = (uint32_t) (slot - e->slots);
1206 pa_mutex_unlock(e->mutex);
1207 /* pa_log("Got block id %u", *block_id); */
1209 data = pa_memblock_acquire(b);
1211 if (b->type == PA_MEMBLOCK_IMPORTED) {
1212 pa_assert(b->per_type.imported.segment);
1213 memory = &b->per_type.imported.segment->memory;
1215 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1217 memory = &b->pool->memory;
1220 pa_assert(data >= memory->ptr);
1221 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1223 *shm_id = memory->id;
1224 *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1227 pa_memblock_release(b);
1229 pa_atomic_inc(&e->pool->stat.n_exported);
1230 pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);