4 This file is part of PulseAudio.
6 Copyright 2004-2006 Lennart Poettering
7 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
9 PulseAudio is free software; you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License as
11 published by the Free Software Foundation; either version 2.1 of the
12 License, or (at your option) any later version.
14 PulseAudio is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details.
19 You should have received a copy of the GNU Lesser General Public
20 License along with PulseAudio; if not, write to the Free Software
21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
35 #include <pulse/xmalloc.h>
36 #include <pulse/def.h>
38 #include <pulsecore/shm.h>
39 #include <pulsecore/log.h>
40 #include <pulsecore/hashmap.h>
41 #include <pulsecore/semaphore.h>
42 #include <pulsecore/macro.h>
43 #include <pulsecore/flist.h>
47 #define PA_MEMPOOL_SLOTS_MAX 128
48 #define PA_MEMPOOL_SLOT_SIZE (16*1024)
50 #define PA_MEMEXPORT_SLOTS_MAX 128
52 #define PA_MEMIMPORT_SLOTS_MAX 128
53 #define PA_MEMIMPORT_SEGMENTS_MAX 16
56 PA_REFCNT_DECLARE; /* the reference counter */
59 pa_memblock_type_t type;
60 int read_only; /* boolean */
65 pa_atomic_t n_acquired;
66 pa_atomic_t please_signal;
70 /* If type == PA_MEMBLOCK_USER this points to a function for freeing this memory block */
76 pa_memimport_segment *segment;
81 struct pa_memimport_segment {
94 /* Called whenever an imported memory block is no longer
96 pa_memimport_release_cb_t release_cb;
99 PA_LLIST_FIELDS(pa_memimport);
102 struct memexport_slot {
103 PA_LLIST_FIELDS(struct memexport_slot);
107 struct pa_memexport {
111 struct memexport_slot slots[PA_MEMEXPORT_SLOTS_MAX];
113 PA_LLIST_HEAD(struct memexport_slot, free_slots);
114 PA_LLIST_HEAD(struct memexport_slot, used_slots);
117 /* Called whenever a client from which we imported a memory block
118 which we in turn exported to another client dies and we need to
119 revoke the memory block accordingly */
120 pa_memexport_revoke_cb_t revoke_cb;
123 PA_LLIST_FIELDS(pa_memexport);
126 struct mempool_slot {
127 PA_LLIST_FIELDS(struct mempool_slot);
128 /* the actual data follows immediately hereafter */
132 pa_semaphore *semaphore;
141 PA_LLIST_HEAD(pa_memimport, imports);
142 PA_LLIST_HEAD(pa_memexport, exports);
144 /* A list of free slots that may be reused */
145 pa_flist *free_slots;
147 pa_mempool_stat stat;
150 static void segment_detach(pa_memimport_segment *seg);
152 /* No lock necessary */
153 static void stat_add(pa_memblock*b) {
157 pa_atomic_inc(&b->pool->stat.n_allocated);
158 pa_atomic_add(&b->pool->stat.allocated_size, b->length);
160 pa_atomic_inc(&b->pool->stat.n_accumulated);
161 pa_atomic_add(&b->pool->stat.accumulated_size, b->length);
163 if (b->type == PA_MEMBLOCK_IMPORTED) {
164 pa_atomic_inc(&b->pool->stat.n_imported);
165 pa_atomic_add(&b->pool->stat.imported_size, b->length);
168 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
169 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
172 /* No lock necessary */
173 static void stat_remove(pa_memblock *b) {
177 assert(pa_atomic_load(&b->pool->stat.n_allocated) > 0);
178 assert(pa_atomic_load(&b->pool->stat.allocated_size) >= (int) b->length);
180 pa_atomic_dec(&b->pool->stat.n_allocated);
181 pa_atomic_sub(&b->pool->stat.allocated_size, b->length);
183 if (b->type == PA_MEMBLOCK_IMPORTED) {
184 assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
185 assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
187 pa_atomic_dec(&b->pool->stat.n_imported);
188 pa_atomic_sub(&b->pool->stat.imported_size, b->length);
191 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
194 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length);
196 /* No lock necessary */
197 pa_memblock *pa_memblock_new(pa_mempool *p, size_t length) {
203 if (!(b = pa_memblock_new_pool(p, length)))
204 b = memblock_new_appended(p, length);
209 /* No lock necessary */
210 static pa_memblock *memblock_new_appended(pa_mempool *p, size_t length) {
216 b = pa_xmalloc(PA_ALIGN(sizeof(pa_memblock)) + length);
219 b->type = PA_MEMBLOCK_APPENDED;
221 pa_atomic_ptr_store(&b->data, (uint8_t*) b + PA_ALIGN(sizeof(pa_memblock)));
223 pa_atomic_store(&b->n_acquired, 0);
224 pa_atomic_store(&b->please_signal, 0);
230 /* No lock necessary */
231 static struct mempool_slot* mempool_allocate_slot(pa_mempool *p) {
232 struct mempool_slot *slot;
235 if (!(slot = pa_flist_pop(p->free_slots))) {
238 /* The free list was empty, we have to allocate a new entry */
240 if ((unsigned) (idx = pa_atomic_inc(&p->n_init)) >= p->n_blocks)
241 pa_atomic_dec(&p->n_init);
243 slot = (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (p->block_size * idx));
246 pa_log_debug("Pool full");
247 pa_atomic_inc(&p->stat.n_pool_full);
255 /* No lock necessary */
256 static void* mempool_slot_data(struct mempool_slot *slot) {
259 return (uint8_t*) slot + sizeof(struct mempool_slot);
262 /* No lock necessary */
263 static unsigned mempool_slot_idx(pa_mempool *p, void *ptr) {
266 assert((uint8_t*) ptr >= (uint8_t*) p->memory.ptr);
267 assert((uint8_t*) ptr < (uint8_t*) p->memory.ptr + p->memory.size);
269 return ((uint8_t*) ptr - (uint8_t*) p->memory.ptr) / p->block_size;
272 /* No lock necessary */
273 static struct mempool_slot* mempool_slot_by_ptr(pa_mempool *p, void *ptr) {
276 if ((idx = mempool_slot_idx(p, ptr)) == (unsigned) -1)
279 return (struct mempool_slot*) ((uint8_t*) p->memory.ptr + (idx * p->block_size));
282 /* No lock necessary */
283 pa_memblock *pa_memblock_new_pool(pa_mempool *p, size_t length) {
284 pa_memblock *b = NULL;
285 struct mempool_slot *slot;
290 if (p->block_size - sizeof(struct mempool_slot) >= sizeof(pa_memblock) + length) {
292 if (!(slot = mempool_allocate_slot(p)))
295 b = mempool_slot_data(slot);
296 b->type = PA_MEMBLOCK_POOL;
297 pa_atomic_ptr_store(&b->data, (uint8_t*) b + sizeof(pa_memblock));
299 } else if (p->block_size - sizeof(struct mempool_slot) >= length) {
301 if (!(slot = mempool_allocate_slot(p)))
304 b = pa_xnew(pa_memblock, 1);
305 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
306 pa_atomic_ptr_store(&b->data, mempool_slot_data(slot));
309 pa_log_debug("Memory block too large for pool: %u > %u", length, p->block_size - sizeof(struct mempool_slot));
310 pa_atomic_inc(&p->stat.n_too_large_for_pool);
318 pa_atomic_store(&b->n_acquired, 0);
319 pa_atomic_store(&b->please_signal, 0);
325 /* No lock necessary */
326 pa_memblock *pa_memblock_new_fixed(pa_mempool *p, void *d, size_t length, int read_only) {
333 b = pa_xnew(pa_memblock, 1);
336 b->type = PA_MEMBLOCK_FIXED;
337 b->read_only = read_only;
338 pa_atomic_ptr_store(&b->data, d);
340 pa_atomic_store(&b->n_acquired, 0);
341 pa_atomic_store(&b->please_signal, 0);
347 /* No lock necessary */
348 pa_memblock *pa_memblock_new_user(pa_mempool *p, void *d, size_t length, void (*free_cb)(void *p), int read_only) {
356 b = pa_xnew(pa_memblock, 1);
359 b->type = PA_MEMBLOCK_USER;
360 b->read_only = read_only;
361 pa_atomic_ptr_store(&b->data, d);
363 pa_atomic_store(&b->n_acquired, 0);
364 pa_atomic_store(&b->please_signal, 0);
366 b->per_type.user.free_cb = free_cb;
372 /* No lock necessary */
373 int pa_memblock_is_read_only(pa_memblock *b) {
375 assert(PA_REFCNT_VALUE(b) > 0);
377 return b->read_only && PA_REFCNT_VALUE(b) == 1;
380 /* No lock necessary */
381 void* pa_memblock_acquire(pa_memblock *b) {
383 assert(PA_REFCNT_VALUE(b) > 0);
385 pa_atomic_inc(&b->n_acquired);
387 return pa_atomic_ptr_load(&b->data);
390 /* No lock necessary, in corner cases locks by its own */
391 void pa_memblock_release(pa_memblock *b) {
394 assert(PA_REFCNT_VALUE(b) > 0);
396 r = pa_atomic_dec(&b->n_acquired);
399 /* Signal a waiting thread that this memblock is no longer used */
400 if (r == 1 && pa_atomic_load(&b->please_signal))
401 pa_semaphore_post(b->pool->semaphore);
404 size_t pa_memblock_get_length(pa_memblock *b) {
406 assert(PA_REFCNT_VALUE(b) > 0);
411 pa_mempool* pa_memblock_get_pool(pa_memblock *b) {
413 assert(PA_REFCNT_VALUE(b) > 0);
418 /* No lock necessary */
419 pa_memblock* pa_memblock_ref(pa_memblock*b) {
421 assert(PA_REFCNT_VALUE(b) > 0);
427 static void memblock_free(pa_memblock *b) {
430 assert(pa_atomic_load(&b->n_acquired) == 0);
435 case PA_MEMBLOCK_USER :
436 assert(b->per_type.user.free_cb);
437 b->per_type.user.free_cb(pa_atomic_ptr_load(&b->data));
441 case PA_MEMBLOCK_FIXED:
442 case PA_MEMBLOCK_APPENDED :
446 case PA_MEMBLOCK_IMPORTED : {
447 pa_memimport_segment *segment;
448 pa_memimport *import;
450 /* FIXME! This should be implemented lock-free */
452 segment = b->per_type.imported.segment;
454 import = segment->import;
457 pa_mutex_lock(import->mutex);
458 pa_hashmap_remove(import->blocks, PA_UINT32_TO_PTR(b->per_type.imported.id));
459 if (-- segment->n_blocks <= 0)
460 segment_detach(segment);
462 pa_mutex_unlock(import->mutex);
464 import->release_cb(import, b->per_type.imported.id, import->userdata);
470 case PA_MEMBLOCK_POOL_EXTERNAL:
471 case PA_MEMBLOCK_POOL: {
472 struct mempool_slot *slot;
475 slot = mempool_slot_by_ptr(b->pool, pa_atomic_ptr_load(&b->data));
478 call_free = b->type == PA_MEMBLOCK_POOL_EXTERNAL;
480 /* The free list dimensions should easily allow all slots
481 * to fit in, hence try harder if pushing this slot into
482 * the free list fails */
483 while (pa_flist_push(b->pool->free_slots, slot) < 0)
492 case PA_MEMBLOCK_TYPE_MAX:
498 /* No lock necessary */
499 void pa_memblock_unref(pa_memblock*b) {
501 assert(PA_REFCNT_VALUE(b) > 0);
503 if (PA_REFCNT_DEC(b) > 0)
510 static void memblock_wait(pa_memblock *b) {
513 if (pa_atomic_load(&b->n_acquired) > 0) {
514 /* We need to wait until all threads gave up access to the
515 * memory block before we can go on. Unfortunately this means
516 * that we have to lock and wait here. Sniff! */
518 pa_atomic_inc(&b->please_signal);
520 while (pa_atomic_load(&b->n_acquired) > 0)
521 pa_semaphore_wait(b->pool->semaphore);
523 pa_atomic_dec(&b->please_signal);
527 /* No lock necessary. This function is not multiple caller safe! */
528 static void memblock_make_local(pa_memblock *b) {
531 pa_atomic_dec(&b->pool->stat.n_allocated_by_type[b->type]);
533 if (b->length <= b->pool->block_size - sizeof(struct mempool_slot)) {
534 struct mempool_slot *slot;
536 if ((slot = mempool_allocate_slot(b->pool))) {
538 /* We can move it into a local pool, perfect! */
540 new_data = mempool_slot_data(slot);
541 memcpy(new_data, pa_atomic_ptr_load(&b->data), b->length);
542 pa_atomic_ptr_store(&b->data, new_data);
544 b->type = PA_MEMBLOCK_POOL_EXTERNAL;
551 /* Humm, not enough space in the pool, so lets allocate the memory with malloc() */
552 b->per_type.user.free_cb = pa_xfree;
553 pa_atomic_ptr_store(&b->data, pa_xmemdup(pa_atomic_ptr_load(&b->data), b->length));
555 b->type = PA_MEMBLOCK_USER;
559 pa_atomic_inc(&b->pool->stat.n_allocated_by_type[b->type]);
560 pa_atomic_inc(&b->pool->stat.n_accumulated_by_type[b->type]);
564 /* No lock necessary. This function is not multiple caller safe*/
565 void pa_memblock_unref_fixed(pa_memblock *b) {
567 assert(PA_REFCNT_VALUE(b) > 0);
568 assert(b->type == PA_MEMBLOCK_FIXED);
570 if (PA_REFCNT_DEC(b) > 0)
571 memblock_make_local(b);
576 /* Self-locked. This function is not multiple-caller safe */
577 static void memblock_replace_import(pa_memblock *b) {
578 pa_memimport_segment *seg;
581 assert(b->type == PA_MEMBLOCK_IMPORTED);
583 assert(pa_atomic_load(&b->pool->stat.n_imported) > 0);
584 assert(pa_atomic_load(&b->pool->stat.imported_size) >= (int) b->length);
585 pa_atomic_dec(&b->pool->stat.n_imported);
586 pa_atomic_sub(&b->pool->stat.imported_size, b->length);
588 seg = b->per_type.imported.segment;
592 pa_mutex_lock(seg->import->mutex);
596 PA_UINT32_TO_PTR(b->per_type.imported.id));
598 memblock_make_local(b);
600 if (-- seg->n_blocks <= 0)
603 pa_mutex_unlock(seg->import->mutex);
606 pa_mempool* pa_mempool_new(int shared) {
610 p = pa_xnew(pa_mempool, 1);
612 p->mutex = pa_mutex_new(1);
613 p->semaphore = pa_semaphore_new(0);
616 ps = (size_t) sysconf(_SC_PAGESIZE);
617 #elif defined(PAGE_SIZE)
618 ps = (size_t) PAGE_SIZE;
620 ps = 4096; /* Let's hope it's like x86. */
623 p->block_size = (PA_MEMPOOL_SLOT_SIZE/ps)*ps;
625 if (p->block_size < ps)
628 p->n_blocks = PA_MEMPOOL_SLOTS_MAX;
630 assert(p->block_size > sizeof(struct mempool_slot));
632 if (pa_shm_create_rw(&p->memory, p->n_blocks * p->block_size, shared, 0700) < 0) {
637 memset(&p->stat, 0, sizeof(p->stat));
638 pa_atomic_store(&p->n_init, 0);
640 PA_LLIST_HEAD_INIT(pa_memimport, p->imports);
641 PA_LLIST_HEAD_INIT(pa_memexport, p->exports);
643 p->free_slots = pa_flist_new(p->n_blocks*2);
648 void pa_mempool_free(pa_mempool *p) {
651 pa_mutex_lock(p->mutex);
654 pa_memimport_free(p->imports);
657 pa_memexport_free(p->exports);
659 pa_mutex_unlock(p->mutex);
661 if (pa_atomic_load(&p->stat.n_allocated) > 0)
662 pa_log_warn("WARNING! Memory pool destroyed but not all memory blocks freed!");
664 pa_flist_free(p->free_slots, NULL);
665 pa_shm_free(&p->memory);
667 pa_mutex_free(p->mutex);
668 pa_semaphore_free(p->semaphore);
673 /* No lock necessary */
674 const pa_mempool_stat* pa_mempool_get_stat(pa_mempool *p) {
680 /* No lock necessary */
681 void pa_mempool_vacuum(pa_mempool *p) {
682 struct mempool_slot *slot;
687 list = pa_flist_new(p->n_blocks*2);
689 while ((slot = pa_flist_pop(p->free_slots)))
690 while (pa_flist_push(list, slot) < 0)
693 while ((slot = pa_flist_pop(list))) {
694 pa_shm_punch(&p->memory,
695 (uint8_t*) slot - (uint8_t*) p->memory.ptr + sizeof(struct mempool_slot),
696 p->block_size - sizeof(struct mempool_slot));
698 while (pa_flist_push(p->free_slots, slot))
702 pa_flist_free(list, NULL);
705 /* No lock necessary */
706 int pa_mempool_get_shm_id(pa_mempool *p, uint32_t *id) {
709 if (!p->memory.shared)
717 /* No lock necessary */
718 int pa_mempool_is_shared(pa_mempool *p) {
721 return !!p->memory.shared;
724 /* For recieving blocks from other nodes */
725 pa_memimport* pa_memimport_new(pa_mempool *p, pa_memimport_release_cb_t cb, void *userdata) {
731 i = pa_xnew(pa_memimport, 1);
732 i->mutex = pa_mutex_new(0);
734 i->segments = pa_hashmap_new(NULL, NULL);
735 i->blocks = pa_hashmap_new(NULL, NULL);
737 i->userdata = userdata;
739 pa_mutex_lock(p->mutex);
740 PA_LLIST_PREPEND(pa_memimport, p->imports, i);
741 pa_mutex_unlock(p->mutex);
746 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i);
748 /* Should be called locked */
749 static pa_memimport_segment* segment_attach(pa_memimport *i, uint32_t shm_id) {
750 pa_memimport_segment* seg;
752 if (pa_hashmap_size(i->segments) >= PA_MEMIMPORT_SEGMENTS_MAX)
755 seg = pa_xnew(pa_memimport_segment, 1);
757 if (pa_shm_attach_ro(&seg->memory, shm_id) < 0) {
765 pa_hashmap_put(i->segments, PA_UINT32_TO_PTR(shm_id), seg);
769 /* Should be called locked */
770 static void segment_detach(pa_memimport_segment *seg) {
773 pa_hashmap_remove(seg->import->segments, PA_UINT32_TO_PTR(seg->memory.id));
774 pa_shm_free(&seg->memory);
778 /* Self-locked. Not multiple-caller safe */
779 void pa_memimport_free(pa_memimport *i) {
785 pa_mutex_lock(i->mutex);
787 while ((b = pa_hashmap_get_first(i->blocks)))
788 memblock_replace_import(b);
790 assert(pa_hashmap_size(i->segments) == 0);
792 pa_mutex_unlock(i->mutex);
794 pa_mutex_lock(i->pool->mutex);
796 /* If we've exported this block further we need to revoke that export */
797 for (e = i->pool->exports; e; e = e->next)
798 memexport_revoke_blocks(e, i);
800 PA_LLIST_REMOVE(pa_memimport, i->pool->imports, i);
802 pa_mutex_unlock(i->pool->mutex);
804 pa_hashmap_free(i->blocks, NULL, NULL);
805 pa_hashmap_free(i->segments, NULL, NULL);
807 pa_mutex_free(i->mutex);
813 pa_memblock* pa_memimport_get(pa_memimport *i, uint32_t block_id, uint32_t shm_id, size_t offset, size_t size) {
814 pa_memblock *b = NULL;
815 pa_memimport_segment *seg;
819 pa_mutex_lock(i->mutex);
821 if (pa_hashmap_size(i->blocks) >= PA_MEMIMPORT_SLOTS_MAX)
824 if (!(seg = pa_hashmap_get(i->segments, PA_UINT32_TO_PTR(shm_id))))
825 if (!(seg = segment_attach(i, shm_id)))
828 if (offset+size > seg->memory.size)
831 b = pa_xnew(pa_memblock, 1);
834 b->type = PA_MEMBLOCK_IMPORTED;
836 pa_atomic_ptr_store(&b->data, (uint8_t*) seg->memory.ptr + offset);
838 pa_atomic_store(&b->n_acquired, 0);
839 pa_atomic_store(&b->please_signal, 0);
840 b->per_type.imported.id = block_id;
841 b->per_type.imported.segment = seg;
843 pa_hashmap_put(i->blocks, PA_UINT32_TO_PTR(block_id), b);
848 pa_mutex_unlock(i->mutex);
856 int pa_memimport_process_revoke(pa_memimport *i, uint32_t id) {
860 pa_mutex_lock(i->mutex);
862 if (!(b = pa_hashmap_get(i->blocks, PA_UINT32_TO_PTR(id))))
865 memblock_replace_import(b);
867 pa_mutex_unlock(i->mutex);
872 /* For sending blocks to other nodes */
873 pa_memexport* pa_memexport_new(pa_mempool *p, pa_memexport_revoke_cb_t cb, void *userdata) {
879 if (!p->memory.shared)
882 e = pa_xnew(pa_memexport, 1);
883 e->mutex = pa_mutex_new(1);
885 PA_LLIST_HEAD_INIT(struct memexport_slot, e->free_slots);
886 PA_LLIST_HEAD_INIT(struct memexport_slot, e->used_slots);
889 e->userdata = userdata;
891 pa_mutex_lock(p->mutex);
892 PA_LLIST_PREPEND(pa_memexport, p->exports, e);
893 pa_mutex_unlock(p->mutex);
897 void pa_memexport_free(pa_memexport *e) {
900 pa_mutex_lock(e->mutex);
901 while (e->used_slots)
902 pa_memexport_process_release(e, e->used_slots - e->slots);
903 pa_mutex_unlock(e->mutex);
905 pa_mutex_lock(e->pool->mutex);
906 PA_LLIST_REMOVE(pa_memexport, e->pool->exports, e);
907 pa_mutex_unlock(e->pool->mutex);
913 int pa_memexport_process_release(pa_memexport *e, uint32_t id) {
918 pa_mutex_lock(e->mutex);
923 if (!e->slots[id].block)
926 b = e->slots[id].block;
927 e->slots[id].block = NULL;
929 PA_LLIST_REMOVE(struct memexport_slot, e->used_slots, &e->slots[id]);
930 PA_LLIST_PREPEND(struct memexport_slot, e->free_slots, &e->slots[id]);
932 pa_mutex_unlock(e->mutex);
934 /* pa_log("Processing release for %u", id); */
936 assert(pa_atomic_load(&e->pool->stat.n_exported) > 0);
937 assert(pa_atomic_load(&e->pool->stat.exported_size) >= (int) b->length);
939 pa_atomic_dec(&e->pool->stat.n_exported);
940 pa_atomic_sub(&e->pool->stat.exported_size, b->length);
942 pa_memblock_unref(b);
947 pa_mutex_unlock(e->mutex);
953 static void memexport_revoke_blocks(pa_memexport *e, pa_memimport *i) {
954 struct memexport_slot *slot, *next;
958 pa_mutex_lock(e->mutex);
960 for (slot = e->used_slots; slot; slot = next) {
964 if (slot->block->type != PA_MEMBLOCK_IMPORTED ||
965 slot->block->per_type.imported.segment->import != i)
968 idx = slot - e->slots;
969 e->revoke_cb(e, idx, e->userdata);
970 pa_memexport_process_release(e, idx);
973 pa_mutex_unlock(e->mutex);
976 /* No lock necessary */
977 static pa_memblock *memblock_shared_copy(pa_mempool *p, pa_memblock *b) {
983 if (b->type == PA_MEMBLOCK_IMPORTED ||
984 b->type == PA_MEMBLOCK_POOL ||
985 b->type == PA_MEMBLOCK_POOL_EXTERNAL) {
986 assert(b->pool == p);
987 return pa_memblock_ref(b);
990 if (!(n = pa_memblock_new_pool(p, b->length)))
993 memcpy(pa_atomic_ptr_load(&n->data), pa_atomic_ptr_load(&b->data), b->length);
998 int pa_memexport_put(pa_memexport *e, pa_memblock *b, uint32_t *block_id, uint32_t *shm_id, size_t *offset, size_t * size) {
1000 struct memexport_slot *slot;
1010 assert(b->pool == e->pool);
1012 if (!(b = memblock_shared_copy(e->pool, b)))
1015 pa_mutex_lock(e->mutex);
1017 if (e->free_slots) {
1018 slot = e->free_slots;
1019 PA_LLIST_REMOVE(struct memexport_slot, e->free_slots, slot);
1020 } else if (e->n_init < PA_MEMEXPORT_SLOTS_MAX)
1021 slot = &e->slots[e->n_init++];
1023 pa_mutex_unlock(e->mutex);
1024 pa_memblock_unref(b);
1028 PA_LLIST_PREPEND(struct memexport_slot, e->used_slots, slot);
1030 *block_id = slot - e->slots;
1032 pa_mutex_unlock(e->mutex);
1033 /* pa_log("Got block id %u", *block_id); */
1035 data = pa_memblock_acquire(b);
1037 if (b->type == PA_MEMBLOCK_IMPORTED) {
1038 assert(b->per_type.imported.segment);
1039 memory = &b->per_type.imported.segment->memory;
1041 assert(b->type == PA_MEMBLOCK_POOL || b->type == PA_MEMBLOCK_POOL_EXTERNAL);
1043 memory = &b->pool->memory;
1046 assert(data >= memory->ptr);
1047 assert((uint8_t*) data + length <= (uint8_t*) memory->ptr + memory->size);
1049 *shm_id = memory->id;
1050 *offset = (uint8_t*) data - (uint8_t*) memory->ptr;
1053 pa_memblock_release(b);
1055 pa_atomic_inc(&e->pool->stat.n_exported);
1056 pa_atomic_add(&e->pool->stat.exported_size, length);