2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details
17 You should have received a copy of the GNU Lesser General Public
18 License along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
32 #ifdef HAVE_VALGRIND_MEMCHECK_H
33 #include <valgrind/memcheck.h>
36 #include <pulse/xmalloc.h>
37 #include <pulse/def.h>
39 #include <pulsecore/shm.h>
40 #include <pulsecore/log.h>
41 #include <pulsecore/hashmap.h>
42 #include <pulsecore/semaphore.h>
43 #include <pulsecore/mutex.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/refcnt.h>
46 #include <pulsecore/llist.h>
47 #include <pulsecore/flist.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/memtrap.h>
53 /* We can allocate 64*1024*1024 bytes at maximum. That's 64MB. Please
54 * note that the footprint is usually much smaller, since the data is
55 * stored in SHM and our OS does not commit the memory before we use
56 * it for the first time. */
57 #define PA_MEMPOOL_SLOTS_MAX 1024
58 #define PA_MEMPOOL_SLOT_SIZE (64*1024)
60 #define PA_MEMEXPORT_SLOTS_MAX 128
62 #define PA_MEMIMPORT_SLOTS_MAX 160
63 #define PA_MEMIMPORT_SEGMENTS_MAX 16
66 PA_REFCNT_DECLARE; /* the reference counter */
69 pa_memblock_type_t type;
77 pa_atomic_t n_acquired;
78 pa_atomic_t please_signal;
82 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
84 /* If type == PA_MEMBLOCK_USER this is passed as free_cb argument */
90 pa_memimport_segment *segment;
95 struct pa_memimport_segment {
104 * If true, this segment's lifetime will not be limited by the
105 * number of active blocks (seg->n_blocks) using its shared memory.
106 * Rather, it will exist for the full lifetime of the memimport it
109 * This is done to support memfd blocks transport.
111 * To transfer memfd-backed blocks without passing their fd every
112 * time, thus minimizing overhead and avoiding fd leaks, a command
113 * is sent with the memfd fd as ancil data very early on.
115 * This command has an ID that identifies the memfd region. Further
116 * block references are then exclusively done using this ID. On the
117 * receiving end, such logic is enabled by the memimport's segment
118 * hash and 'permanent' segments below.
120 static bool segment_is_permanent(pa_memimport_segment *seg) {
122 return seg->memory.type == PA_MEM_TYPE_SHARED_MEMFD;
125 /* A collection of multiple segments */
126 struct pa_memimport {
130 pa_hashmap *segments;
133 /* Called whenever an imported memory block is no longer
135 pa_memimport_release_cb_t release_cb;
138 PA_LLIST_FIELDS(pa_memimport);
141 struct memexport_slot {
142 PA_LLIST_FIELDS(struct memexport_slot);
146 struct pa_memexport {
150 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
152 PA_LLIST_HEAD(struct memexport_slot, free_slots);
153 PA_LLIST_HEAD(struct memexport_slot, used_slots);
157 /* Called whenever a client from which we imported a memory block
158 which we in turn exported to another client dies and we need to
159 revoke the memory block accordingly */
160 pa_memexport_revoke_cb_t revoke_cb;
163 PA_LLIST_FIELDS(pa_memexport);
167 /* Reference count the mempool
169 * Any block allocation from the pool itself, or even just imported from
170 * another process through SHM and attached to it (PA_MEMBLOCK_IMPORTED),
171 * shall increase the refcount.
173 * This is done for per-client mempools: global references to blocks in
174 * the pool, or just to attached ones, can still be lingering around when
175 * the client connection dies and all per-client objects are to be freed.
176 * That is, current PulseAudio design does not guarantee that the client
177 * mempool blocks are referenced only by client-specific objects.
179 * For further details, please check:
180 * https://lists.freedesktop.org/archives/pulseaudio-discuss/2016-February/025587.html
184 pa_semaphore *semaphore;
193 bool is_remote_writable;
197 PA_LLIST_HEAD(pa_memimport, imports);
198 PA_LLIST_HEAD(pa_memexport, exports);
200 /* A list of free slots that may be reused */
201 pa_flist *free_slots;
203 pa_mempool_stat stat;
206 static void segment_detach(pa_memimport_segment *seg);
208 PA_STATIC_FLIST_DECLARE(unused_memblocks, 0, pa_xfree);
210 /* No lock necessary */
211 static void stat_add(pa_memblock*b) {
215 pa_atomic_inc(&b->pool->stat.n_allocated);
216 pa_atomic_add(&b->pool->stat.allocated_size, (int) b->length);
218 pa_atomic_inc(&b->pool->stat.n_accumulated);
219 pa_atomic_add(&b->pool->stat.accumulated_size, (int) b->length);
221 if (b->type == PA_MEMBLOCK_IMPORTED) {
222 pa_atomic_inc(&b->pool->stat.n_imported);
223 pa_atomic_add(&b->pool->stat.imported_size, (int) b->length);
226 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
227 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
230 /* No lock necessary */
231 static void stat_remove(pa_memblock *b) {
235 pa_assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
236 pa_assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
238 pa_atomic_dec(&b->pool->stat.n_allocated);
239 pa_atomic_sub(&b->pool->stat.allocated_size, (int) b->length);
241 if (b->type == PA_MEMBLOCK_IMPORTED) {
242 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
243 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
245 pa_atomic_dec(&b->pool->stat.n_imported);
246 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
249 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
252 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
254 /* No lock necessary */
255 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
261 if (!(b = pa_memblock_new_pool(p, length)))
262 b = memblock_new_appended(p, length);
267 /* No lock necessary */
268 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
274 /* If -1 is passed as length we choose the size for the caller. */
276 if (length == (size_t) -1)
277 length = pa_mempool_block_size_max(p);
279 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
282 pa_mempool_ref(b->pool);
283 b->type = PA_MEMBLOCK_APPENDED;
284 b->read_only = b->is_silence = false;
285 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
287 pa_atomic_store(&b->n_acquired, 0);
288 pa_atomic_store(&b->please_signal, 0);
294 /* No lock necessary */
295 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
296 struct mempool_slot *slot;
299 if (!(slot = pa_flist_pop(p->free_slots))) {
302 /* The free list was empty, we have to allocate a new entry */
304 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
305 pa_atomic_dec(&p->n_init);
307 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) idx));
310 if (pa_log_ratelimit(PA_LOG_DEBUG))
311 pa_log_debug("Pool full");
312 pa_atomic_inc(&p->stat.n_pool_full);
317 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
318 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
319 /* VALGRIND_MALLOCLIKE_BLOCK(slot, p->block_size, 0, 0); */
326 /* No lock necessary, totally redundant anyway */
327 static inline void* mempool_slot_data(struct mempool_slot *slot) {
331 /* No lock necessary */
332 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
335 pa_assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
336 pa_assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
338 return (unsigned) ((size_t) ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size);
341 /* No lock necessary */
342 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
345 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
348 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
351 /* No lock necessary */
352 bool pa_mempool_is_remote_writable(pa_mempool *p) {
354 return p->is_remote_writable;
357 /* No lock necessary */
358 void pa_mempool_set_is_remote_writable(pa_mempool *p, bool writable) {
360 pa_assert(!writable || pa_mempool_is_shared(p));
361 p->is_remote_writable = writable;
364 /* No lock necessary */
365 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
366 pa_memblock *b = NULL;
367 struct mempool_slot *slot;
368 static int mempool_disable = 0;
373 if (mempool_disable == 0)
374 mempool_disable = getenv("PULSE_MEMPOOL_DISABLE") ? 1 : -1;
376 if (mempool_disable > 0)
379 /* If -1 is passed as length we choose the size for the caller: we
380 * take the largest size that fits in one of our slots. */
382 if (length == (size_t) -1)
383 length = pa_mempool_block_size_max(p);
385 if (p->block_size >= PA_ALIGN(sizeof(pa_memblock)) + length) {
387 if (!(slot = mempool_allocate_slot(p)))
390 b = mempool_slot_data(slot);
391 b->type = PA_MEMBLOCK_POOL;
392 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
394 } else if (p->block_size >= length) {
396 if (!(slot = mempool_allocate_slot(p)))
399 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
400 b = pa_xnew(pa_memblock, 1);
402 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
403 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
406 pa_log_debug("Memory block too large for pool: %lu > %lu", (unsigned long) length, (unsigned long) p->block_size);
407 pa_atomic_inc(&p->stat.n_too_large_for_pool);
413 pa_mempool_ref(b->pool);
414 b->read_only = b->is_silence = false;
416 pa_atomic_store(&b->n_acquired, 0);
417 pa_atomic_store(&b->please_signal, 0);
423 /* No lock necessary */
424 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, bool read_only) {
429 pa_assert(length != (size_t) -1);
432 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
433 b = pa_xnew(pa_memblock, 1);
437 pa_mempool_ref(b->pool);
438 b->type = PA_MEMBLOCK_FIXED;
439 b->read_only = read_only;
440 b->is_silence = false;
441 pa_atomic_ptr_store(&b->data, d);
443 pa_atomic_store(&b->n_acquired, 0);
444 pa_atomic_store(&b->please_signal, 0);
450 /* No lock necessary */
451 pa_memblock *pa_memblock_new_user(
455 pa_free_cb_t free_cb,
463 pa_assert(length != (size_t) -1);
466 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
467 b = pa_xnew(pa_memblock, 1);
471 pa_mempool_ref(b->pool);
472 b->type = PA_MEMBLOCK_USER;
473 b->read_only = read_only;
474 b->is_silence = false;
475 pa_atomic_ptr_store(&b->data, d);
477 pa_atomic_store(&b->n_acquired, 0);
478 pa_atomic_store(&b->please_signal, 0);
480 b->per_type.user.free_cb = free_cb;
481 b->per_type.user.free_cb_data = free_cb_data;
487 /* No lock necessary */
488 bool pa_memblock_is_ours(pa_memblock *b) {
490 pa_assert(PA_REFCNT_VALUE(b) > 0);
492 return b->type != PA_MEMBLOCK_IMPORTED;
495 /* No lock necessary */
496 bool pa_memblock_is_read_only(pa_memblock *b) {
498 pa_assert(PA_REFCNT_VALUE(b) > 0);
500 return b->read_only || PA_REFCNT_VALUE(b) > 1;
503 /* No lock necessary */
504 bool pa_memblock_is_silence(pa_memblock *b) {
506 pa_assert(PA_REFCNT_VALUE(b) > 0);
508 return b->is_silence;
511 /* No lock necessary */
512 void pa_memblock_set_is_silence(pa_memblock *b, bool v) {
514 pa_assert(PA_REFCNT_VALUE(b) > 0);
519 /* No lock necessary */
520 bool pa_memblock_ref_is_one(pa_memblock *b) {
524 pa_assert_se((r = PA_REFCNT_VALUE(b)) > 0);
529 /* No lock necessary */
530 void* pa_memblock_acquire(pa_memblock *b) {
532 pa_assert(PA_REFCNT_VALUE(b) > 0);
534 pa_atomic_inc(&b->n_acquired);
536 return pa_atomic_ptr_load(&b->data);
539 /* No lock necessary */
540 void *pa_memblock_acquire_chunk(const pa_memchunk *c) {
543 return (uint8_t *) pa_memblock_acquire(c->memblock) + c->index;
546 /* No lock necessary, in corner cases locks by its own */
547 void pa_memblock_release(pa_memblock *b) {
550 pa_assert(PA_REFCNT_VALUE(b) > 0);
552 r = pa_atomic_dec(&b->n_acquired);
555 /* Signal a waiting thread that this memblock is no longer used */
556 if (r == 1 && pa_atomic_load(&b->please_signal))
557 pa_semaphore_post(b->pool->semaphore);
560 size_t pa_memblock_get_length(pa_memblock *b) {
562 pa_assert(PA_REFCNT_VALUE(b) > 0);
567 /* Note! Always unref the returned pool after use */
568 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
570 pa_assert(PA_REFCNT_VALUE(b) > 0);
573 pa_mempool_ref(b->pool);
577 /* No lock necessary */
578 pa_memblock* pa_memblock_ref(pa_memblock*b) {
580 pa_assert(PA_REFCNT_VALUE(b) > 0);
586 static void memblock_free(pa_memblock *b) {
591 pa_assert(pa_atomic_load(&b->n_acquired) == 0);
597 case PA_MEMBLOCK_USER :
598 pa_assert(b->per_type.user.free_cb);
599 b->per_type.user.free_cb(b->per_type.user.free_cb_data);
603 case PA_MEMBLOCK_FIXED:
604 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
609 case PA_MEMBLOCK_APPENDED:
611 /* We could attach it to unused_memblocks, but that would
612 * probably waste some considerable amount of memory */
616 case PA_MEMBLOCK_IMPORTED: {
617 pa_memimport_segment *segment;
618 pa_memimport *import;
620 /* FIXME! This should be implemented lock-free */
622 pa_assert_se(segment = b->per_type.imported.segment);
623 pa_assert_se(import = segment->import);
625 pa_mutex_lock(import->mutex);
627 pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
629 pa_assert(segment->n_blocks >= 1);
630 if (-- segment->n_blocks <= 0)
631 segment_detach(segment);
633 pa_mutex_unlock(import->mutex);
635 import->release_cb(import, b->per_type.imported.id, import->userdata);
637 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
643 case PA_MEMBLOCK_POOL_EXTERNAL:
644 case PA_MEMBLOCK_POOL: {
645 struct mempool_slot *slot;
648 pa_assert_se(slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data)));
650 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
652 /* #ifdef HAVE_VALGRIND_MEMCHECK_H */
653 /* if (PA_UNLIKELY(pa_in_valgrind())) { */
654 /* VALGRIND_FREELIKE_BLOCK(slot, b->pool->block_size); */
658 /* The free list dimensions should easily allow all slots
659 * to fit in, hence try harder if pushing this slot into
660 * the free list fails */
661 while (pa_flist_push(b->pool->free_slots, slot) < 0)
665 if (pa_flist_push(PA_STATIC_FLIST_GET(unused_memblocks), b) < 0)
671 case PA_MEMBLOCK_TYPE_MAX:
673 pa_assert_not_reached();
676 pa_mempool_unref(pool);
679 /* No lock necessary */
680 void pa_memblock_unref(pa_memblock*b) {
682 pa_assert(PA_REFCNT_VALUE(b) > 0);
684 if (PA_REFCNT_DEC(b) > 0)
691 static void memblock_wait(pa_memblock *b) {
694 if (pa_atomic_load(&b->n_acquired) > 0) {
695 /* We need to wait until all threads gave up access to the
696 * memory block before we can go on. Unfortunately this means
697 * that we have to lock and wait here. Sniff! */
699 pa_atomic_inc(&b->please_signal);
701 while (pa_atomic_load(&b->n_acquired) > 0)
702 pa_semaphore_wait(b->pool->semaphore);
704 pa_atomic_dec(&b->please_signal);
708 /* No lock necessary. This function is not multiple caller safe! */
709 static void memblock_make_local(pa_memblock *b) {
712 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
714 if (b->length <= b->pool->block_size) {
715 struct mempool_slot *slot;
717 if ((slot = mempool_allocate_slot(b->pool))) {
719 /* We can move it into a local pool, perfect! */
721 new_data = mempool_slot_data(slot);
722 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
723 pa_atomic_ptr_store(&b->data, new_data);
725 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
726 b->read_only = false;
732 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
733 b->per_type.user.free_cb = pa_xfree;
734 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
735 b->per_type.user.free_cb_data = pa_atomic_ptr_load(&b->data);
737 b->type = PA_MEMBLOCK_USER;
738 b->read_only = false;
741 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
742 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
746 /* No lock necessary. This function is not multiple caller safe */
747 void pa_memblock_unref_fixed(pa_memblock *b) {
749 pa_assert(PA_REFCNT_VALUE(b) > 0);
750 pa_assert(b->type == PA_MEMBLOCK_FIXED);
752 if (PA_REFCNT_VALUE(b) > 1)
753 memblock_make_local(b);
755 pa_memblock_unref(b);
758 /* No lock necessary. */
759 pa_memblock *pa_memblock_will_need(pa_memblock *b) {
763 pa_assert(PA_REFCNT_VALUE(b) > 0);
765 p = pa_memblock_acquire(b);
766 pa_will_need(p, b->length);
767 pa_memblock_release(b);
772 /* Self-locked. This function is not multiple-caller safe */
773 static void memblock_replace_import(pa_memblock *b) {
774 pa_memimport_segment *segment;
775 pa_memimport *import;
778 pa_assert(b->type == PA_MEMBLOCK_IMPORTED);
780 pa_assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
781 pa_assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
782 pa_atomic_dec(&b->pool->stat.n_imported);
783 pa_atomic_sub(&b->pool->stat.imported_size, (int) b->length);
785 pa_assert_se(segment = b->per_type.imported.segment);
786 pa_assert_se(import = segment->import);
788 pa_mutex_lock(import->mutex);
790 pa_assert_se(pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id)));
792 memblock_make_local(b);
794 pa_assert(segment->n_blocks >= 1);
795 if (-- segment->n_blocks <= 0)
796 segment_detach(segment);
798 pa_mutex_unlock(import->mutex);
801 /*@per_client: This is a security measure. By default this should
802 * be set to true where the created mempool is never shared with more
803 * than one client in the system. Set this to false if a global
804 * mempool, shared with all existing and future clients, is required.
806 * NOTE-1: Do not create any further global mempools! They allow data
807 * leaks between clients and thus conflict with the xdg-app containers
808 * model. They also complicate the handling of memfd-based pools.
810 * NOTE-2: Almost all mempools are now created on a per client basis.
811 * The only exception is the pa_core's mempool which is still shared
812 * between all clients of the system.
814 * Beside security issues, special marking for global mempools is
815 * required for memfd communication. To avoid fd leaks, memfd pools
816 * are registered with the connection pstream to create an ID<->memfd
817 * mapping on both PA endpoints. Such memory regions are then always
818 * referenced by their IDs and never by their fds and thus their fds
819 * can be quickly closed later.
821 * Unfortunately this scheme cannot work with global pools since the
822 * ID registration mechanism needs to happen for each newly connected
823 * client, and thus the need for a more special handling. That is,
824 * for the pool's fd to be always open :-(
826 * TODO-1: Transform the global core mempool to a per-client one
827 * TODO-2: Remove global mempools support */
828 pa_mempool *pa_mempool_new(pa_mem_type_t type, size_t size, bool per_client) {
830 char t1[PA_BYTES_SNPRINT_MAX], t2[PA_BYTES_SNPRINT_MAX];
831 const size_t page_size = pa_page_size();
833 p = pa_xnew0(pa_mempool, 1);
836 p->block_size = PA_PAGE_ALIGN(PA_MEMPOOL_SLOT_SIZE);
837 if (p->block_size < page_size)
838 p->block_size = page_size;
841 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
843 p->n_blocks = (unsigned) (size / p->block_size);
849 if (pa_shm_create_rw(&p->memory, type, p->n_blocks * p->block_size, 0700) < 0) {
854 pa_log_debug("Using %s memory pool with %u slots of size %s each, total size is %s, maximum usable slot size is %lu",
855 pa_mem_type_to_string(type),
857 pa_bytes_snprint(t1, sizeof(t1), (unsigned) p->block_size),
858 pa_bytes_snprint(t2, sizeof(t2), (unsigned) (p->n_blocks * p->block_size)),
859 (unsigned long) pa_mempool_block_size_max(p));
861 p->global = !per_client;
863 pa_atomic_store(&p->n_init, 0);
865 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
866 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
868 p->mutex = pa_mutex_new(true, true);
869 p->semaphore = pa_semaphore_new(0);
871 p->free_slots = pa_flist_new(p->n_blocks);
876 static void mempool_free(pa_mempool *p) {
879 pa_mutex_lock(p->mutex);
882 pa_memimport_free(p->imports);
885 pa_memexport_free(p->exports);
887 pa_mutex_unlock(p->mutex);
889 pa_flist_free(p->free_slots, NULL);
891 if (pa_atomic_load(&p->stat.n_allocated) > 0) {
893 /* Ouch, somebody is retaining a memory block reference! */
899 /* Let's try to find at least one of those leaked memory blocks */
901 list = pa_flist_new(p->n_blocks);
903 for (i = 0; i < (unsigned) pa_atomic_load(&p->n_init); i++) {
904 struct mempool_slot *slot;
907 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * (size_t) i));
908 b = mempool_slot_data(slot);
910 while ((k = pa_flist_pop(p->free_slots))) {
911 while (pa_flist_push(list, k) < 0)
919 pa_log("REF: Leaked memory block %p", b);
921 while ((k = pa_flist_pop(list)))
922 while (pa_flist_push(p->free_slots, k) < 0)
926 pa_flist_free(list, NULL);
930 pa_log_error("Memory pool destroyed but not all memory blocks freed! %u remain.", pa_atomic_load(&p->stat.n_allocated));
935 pa_shm_free(&p->memory);
937 pa_mutex_free(p->mutex);
938 pa_semaphore_free(p->semaphore);
943 /* No lock necessary */
944 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
950 /* No lock necessary */
951 size_t pa_mempool_block_size_max(pa_mempool *p) {
954 return p->block_size - PA_ALIGN(sizeof(pa_memblock));
957 /* No lock necessary */
958 void pa_mempool_vacuum(pa_mempool *p) {
959 struct mempool_slot *slot;
964 list = pa_flist_new(p->n_blocks);
966 while ((slot = pa_flist_pop(p->free_slots)))
967 while (pa_flist_push(list, slot) < 0)
970 while ((slot = pa_flist_pop(list))) {
971 pa_shm_punch(&p->memory, (size_t) ((uint8_t*) slot - (uint8_t*) p->memory.ptr), p->block_size);
973 while (pa_flist_push(p->free_slots, slot))
977 pa_flist_free(list, NULL);
980 /* No lock necessary */
981 bool pa_mempool_is_shared(pa_mempool *p) {
984 return pa_mem_type_is_shared(p->memory.type);
987 /* No lock necessary */
988 bool pa_mempool_is_memfd_backed(const pa_mempool *p) {
991 return (p->memory.type == PA_MEM_TYPE_SHARED_MEMFD);
994 /* No lock necessary */
995 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
998 if (!pa_mempool_is_shared(p))
1006 pa_mempool* pa_mempool_ref(pa_mempool *p) {
1008 pa_assert(PA_REFCNT_VALUE(p) > 0);
1014 void pa_mempool_unref(pa_mempool *p) {
1016 pa_assert(PA_REFCNT_VALUE(p) > 0);
1018 if (PA_REFCNT_DEC(p) <= 0)
1022 /* No lock necessary
1023 * Check pa_mempool_new() for per-client vs. global mempools */
1024 bool pa_mempool_is_global(pa_mempool *p) {
1030 /* No lock necessary
1031 * Check pa_mempool_new() for per-client vs. global mempools */
1032 bool pa_mempool_is_per_client(pa_mempool *p) {
1033 return !pa_mempool_is_global(p);
1038 * This is only for per-client mempools!
1040 * After this method's return, the caller owns the file descriptor
1041 * and is responsible for closing it in the appropriate time. This
1042 * should only be called once during during a mempool's lifetime.
1044 * Check pa_shm->fd and pa_mempool_new() for further context. */
1045 int pa_mempool_take_memfd_fd(pa_mempool *p) {
1049 pa_assert(pa_mempool_is_shared(p));
1050 pa_assert(pa_mempool_is_memfd_backed(p));
1051 pa_assert(pa_mempool_is_per_client(p));
1053 pa_mutex_lock(p->mutex);
1055 memfd_fd = p->memory.fd;
1058 pa_mutex_unlock(p->mutex);
1060 pa_assert(memfd_fd != -1);
1064 /* No lock necessary
1066 * This is only for global mempools!
1068 * Global mempools have their memfd descriptor always open. DO NOT
1069 * close the returned descriptor by your own.
1071 * Check pa_mempool_new() for further context. */
1072 int pa_mempool_get_memfd_fd(pa_mempool *p) {
1076 pa_assert(pa_mempool_is_shared(p));
1077 pa_assert(pa_mempool_is_memfd_backed(p));
1078 pa_assert(pa_mempool_is_global(p));
1080 memfd_fd = p->memory.fd;
1081 pa_assert(memfd_fd != -1);
1086 /* For receiving blocks from other nodes */
1087 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
1093 i = pa_xnew(pa_memimport, 1);
1094 i->mutex = pa_mutex_new(true, true);
1096 pa_mempool_ref(i->pool);
1097 i->segments = pa_hashmap_new(NULL, NULL);
1098 i->blocks = pa_hashmap_new(NULL, NULL);
1100 i->userdata = userdata;
1102 pa_mutex_lock(p->mutex);
1103 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
1104 pa_mutex_unlock(p->mutex);
1109 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
1111 /* Should be called locked
1112 * Caller owns passed @memfd_fd and must close it down when appropriate. */
1113 static pa_memimport_segment* segment_attach(pa_memimport *i, pa_mem_type_t type, uint32_t shm_id,
1114 int memfd_fd, bool writable) {
1115 pa_memimport_segment* seg;
1116 pa_assert(pa_mem_type_is_shared(type));
1118 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
1121 seg = pa_xnew0(pa_memimport_segment, 1);
1123 if (pa_shm_attach(&seg->memory, type, shm_id, memfd_fd, writable) < 0) {
1128 seg->writable = writable;
1130 seg->trap = pa_memtrap_add(seg->memory.ptr, seg->memory.size);
1132 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(seg->memory.id), seg);
1136 /* Should be called locked */
1137 static void segment_detach(pa_memimport_segment *seg) {
1139 pa_assert(seg->n_blocks == (segment_is_permanent(seg) ? 1u : 0u));
1141 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
1142 pa_shm_free(&seg->memory);
1145 pa_memtrap_remove(seg->trap);
1150 /* Self-locked. Not multiple-caller safe */
1151 void pa_memimport_free(pa_memimport *i) {
1154 pa_memimport_segment *seg;
1159 pa_mutex_lock(i->mutex);
1161 while ((b = pa_hashmap_first(i->blocks)))
1162 memblock_replace_import(b);
1164 /* Permanent segments exist for the lifetime of the memimport. Now
1165 * that we're freeing the memimport itself, clear them all up.
1167 * Careful! segment_detach() internally removes itself from the
1168 * memimport's hash; the same hash we're now using for iteration. */
1169 PA_HASHMAP_FOREACH(seg, i->segments, state) {
1170 if (segment_is_permanent(seg))
1171 segment_detach(seg);
1173 pa_assert(pa_hashmap_size(i->segments) == 0);
1175 pa_mutex_unlock(i->mutex);
1177 pa_mutex_lock(i->pool->mutex);
1179 /* If we've exported this block further we need to revoke that export */
1180 for (e = i->pool->exports; e; e = e->next)
1181 memexport_revoke_blocks(e, i);
1183 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
1185 pa_mutex_unlock(i->pool->mutex);
1187 pa_mempool_unref(i->pool);
1188 pa_hashmap_free(i->blocks);
1189 pa_hashmap_free(i->segments);
1191 pa_mutex_free(i->mutex);
1196 /* Create a new memimport's memfd segment entry, with passed SHM ID
1197 * as key and the newly-created segment (with its mmap()-ed memfd
1198 * memory region) as its value.
1200 * Note! check comments at 'pa_shm->fd', 'segment_is_permanent()',
1201 * and 'pa_pstream_register_memfd_mempool()' for further details.
1203 * Caller owns passed @memfd_fd and must close it down when appropriate. */
1204 int pa_memimport_attach_memfd(pa_memimport *i, uint32_t shm_id, int memfd_fd, bool writable) {
1205 pa_memimport_segment *seg;
1209 pa_assert(memfd_fd != -1);
1211 pa_mutex_lock(i->mutex);
1213 if (!(seg = segment_attach(i, PA_MEM_TYPE_SHARED_MEMFD, shm_id, memfd_fd, writable)))
1216 /* n_blocks acts as a segment reference count. To avoid the segment
1217 * being deleted when receiving silent memchunks, etc., mark our
1218 * permanent presence by incrementing that refcount. */
1221 pa_assert(segment_is_permanent(seg));
1225 pa_mutex_unlock(i->mutex);
1230 pa_memblock* pa_memimport_get(pa_memimport *i, pa_mem_type_t type, uint32_t block_id, uint32_t shm_id,
1231 size_t offset, size_t size, bool writable) {
1232 pa_memblock *b = NULL;
1233 pa_memimport_segment *seg;
1236 pa_assert(pa_mem_type_is_shared(type));
1238 pa_mutex_lock(i->mutex);
1240 if ((b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(block_id)))) {
1245 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
1248 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id)))) {
1249 if (type == PA_MEM_TYPE_SHARED_MEMFD) {
1250 pa_log("Bailing out! No cached memimport segment for memfd ID %u", shm_id);
1251 pa_log("Did the other PA endpoint forget registering its memfd pool?");
1255 pa_assert(type == PA_MEM_TYPE_SHARED_POSIX);
1256 if (!(seg = segment_attach(i, type, shm_id, -1, writable)))
1260 if (writable && !seg->writable) {
1261 pa_log("Cannot import cached segment in write mode - previously mapped as read-only");
1265 if (offset+size > seg->memory.size)
1268 if (!(b = pa_flist_pop(PA_STATIC_FLIST_GET(unused_memblocks))))
1269 b = pa_xnew(pa_memblock, 1);
1273 pa_mempool_ref(b->pool);
1274 b->type = PA_MEMBLOCK_IMPORTED;
1275 b->read_only = !writable;
1276 b->is_silence = false;
1277 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
1279 pa_atomic_store(&b->n_acquired, 0);
1280 pa_atomic_store(&b->please_signal, 0);
1281 b->per_type.imported.id = block_id;
1282 b->per_type.imported.segment = seg;
1284 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
1291 pa_mutex_unlock(i->mutex);
1296 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
1301 pa_mutex_lock(i->mutex);
1303 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id)))) {
1308 memblock_replace_import(b);
1311 pa_mutex_unlock(i->mutex);
1316 /* For sending blocks to other nodes */
1317 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
1320 static pa_atomic_t export_baseidx = PA_ATOMIC_INIT(0);
1325 if (!pa_mempool_is_shared(p))
1328 e = pa_xnew(pa_memexport, 1);
1329 e->mutex = pa_mutex_new(true, true);
1331 pa_mempool_ref(e->pool);
1332 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
1333 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
1336 e->userdata = userdata;
1338 pa_mutex_lock(p->mutex);
1340 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
1341 e->baseidx = (uint32_t) pa_atomic_add(&export_baseidx, PA_MEMEXPORT_SLOTS_MAX);
1343 pa_mutex_unlock(p->mutex);
1347 void pa_memexport_free(pa_memexport *e) {
1350 pa_mutex_lock(e->mutex);
1351 while (e->used_slots)
1352 pa_memexport_process_release(e, (uint32_t) (e->used_slots - e->slots + e->baseidx));
1353 pa_mutex_unlock(e->mutex);
1355 pa_mutex_lock(e->pool->mutex);
1356 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
1357 pa_mutex_unlock(e->pool->mutex);
1359 pa_mempool_unref(e->pool);
1360 pa_mutex_free(e->mutex);
1365 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
1370 pa_mutex_lock(e->mutex);
1372 if (id < e->baseidx)
1376 if (id >= e->n_init)
1379 if (!e->slots[id].block)
1382 b = e->slots[id].block;
1383 e->slots[id].block = NULL;
1385 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
1386 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
1388 pa_mutex_unlock(e->mutex);
1390 /* pa_log("Processing release for %u", id); */
1392 pa_assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
1393 pa_assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
1395 pa_atomic_dec(&e->pool->stat.n_exported);
1396 pa_atomic_sub(&e->pool->stat.exported_size, (int) b->length);
1398 pa_memblock_unref(b);
1403 pa_mutex_unlock(e->mutex);
1409 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
1410 struct memexport_slot *slot, *next;
1414 pa_mutex_lock(e->mutex);
1416 for (slot = e->used_slots; slot; slot = next) {
1420 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
1421 slot->block->per_type.imported.segment->import != i)
1424 idx = (uint32_t) (slot - e->slots + e->baseidx);
1425 e->revoke_cb(e, idx, e->userdata);
1426 pa_memexport_process_release(e, idx);
1429 pa_mutex_unlock(e->mutex);
1432 /* No lock necessary */
1433 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
1439 if (b->type == PA_MEMBLOCK_IMPORTED ||
1440 b->type == PA_MEMBLOCK_POOL ||
1441 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
1442 pa_assert(b->pool == p);
1443 return pa_memblock_ref(b);
1446 if (!(n = pa_memblock_new_pool(p, b->length)))
1449 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
1454 int pa_memexport_put(pa_memexport *e, pa_memblock *b, pa_mem_type_t *type, uint32_t *block_id,
1455 uint32_t *shm_id, size_t *offset, size_t * size) {
1457 struct memexport_slot *slot;
1463 pa_assert(block_id);
1467 pa_assert(b->pool == e->pool);
1469 if (!(b = memblock_shared_copy(e->pool, b)))
1472 pa_mutex_lock(e->mutex);
1474 if (e->free_slots) {
1475 slot = e->free_slots;
1476 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1477 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1478 slot = &e->slots[e->n_init++];
1480 pa_mutex_unlock(e->mutex);
1481 pa_memblock_unref(b);
1485 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1487 *block_id = (uint32_t) (slot - e->slots + e->baseidx);
1489 pa_mutex_unlock(e->mutex);
1490 /* pa_log("Got block id %u", *block_id); */
1492 data = pa_memblock_acquire(b);
1494 if (b->type == PA_MEMBLOCK_IMPORTED) {
1495 pa_assert(b->per_type.imported.segment);
1496 memory = &b->per_type.imported.segment->memory;
1498 pa_assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1500 pa_assert(pa_mempool_is_shared(b->pool));
1501 memory = &b->pool->memory;
1504 pa_assert(data >= memory->ptr);
1505 pa_assert((uint8_t*) data + b->length <= (uint8_t*) memory->ptr + memory->size);
1507 *type = memory->type;
1508 *shm_id = memory->id;
1509 *offset = (size_t) ((uint8_t*) data - (uint8_t*) memory->ptr);
1512 pa_memblock_release(b);
1514 pa_atomic_inc(&e->pool->stat.n_exported);
1515 pa_atomic_add(&e->pool->stat.exported_size, (int) b->length);