1 /* Cairo - a vector graphics library with display and print output
3 * Copyright © 2007 Chris Wilson
4 * Copyright © 2009 Intel Corporation
6 * This library is free software; you can redistribute it and/or
7 * modify it either under the terms of the GNU Lesser General Public
8 * License version 2.1 as published by the Free Software Foundation
9 * (the "LGPL") or, at your option, under the terms of the Mozilla
10 * Public License Version 1.1 (the "MPL"). If you do not alter this
11 * notice, a recipient may use your version of this file under either
12 * the MPL or the LGPL.
14 * You should have received a copy of the LGPL along with this library
15 * in the file COPYING-LGPL-2.1; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
17 * You should have received a copy of the MPL along with this library
18 * in the file COPYING-MPL-1.1
20 * The contents of this file are subject to the Mozilla Public License
21 * Version 1.1 (the "License"); you may not use this file except in
22 * compliance with the License. You may obtain a copy of the License at
23 * http://www.mozilla.org/MPL/
25 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
26 * OF ANY KIND, either express or implied. See the LGPL or the MPL for
27 * the specific language governing rights and limitations.
29 * The Original Code is the cairo graphics library.
31 * The Initial Developer of the Original Code is Red Hat, Inc.
34 * Chris Wilson <chris@chris-wilson.co.uk>
39 #if CAIRO_HAS_XCB_SHM_FUNCTIONS
41 #include "cairo-xcb-private.h"
42 #include "cairo-list-inline.h"
49 #define CAIRO_MAX_SHM_MEMORY (16*1024*1024)
51 /* a simple buddy allocator for memory pools
52 * XXX fragmentation? use Doug Lea's malloc?
55 typedef struct _cairo_xcb_shm_mem_block cairo_xcb_shm_mem_block_t;
62 struct _cairo_xcb_shm_mem_block {
67 struct _cairo_xcb_shm_mem_pool {
73 cairo_xcb_shm_mem_block_t *blocks;
74 cairo_list_t free[32];
77 unsigned int min_bits; /* Minimum block size is 1 << min_bits */
78 unsigned int num_sizes;
82 unsigned int max_free_bits;
87 #define BITTEST(p, n) ((p)->map[(n) >> 3] & (128 >> ((n) & 7)))
88 #define BITSET(p, n) ((p)->map[(n) >> 3] |= (128 >> ((n) & 7)))
89 #define BITCLEAR(p, n) ((p)->map[(n) >> 3] &= ~(128 >> ((n) & 7)))
92 clear_bits (cairo_xcb_shm_mem_pool_t *pi, size_t first, size_t last)
95 size_t first_full = (first + 7) & ~7;
96 size_t past_full = last & ~7;
101 for (i = first; i < n; i++)
104 if (past_full > first_full) {
105 bytes = past_full - first_full;
107 memset (pi->map + (first_full >> 3), 0, bytes);
112 for (i = past_full; i < last; i++)
117 free_bits (cairo_xcb_shm_mem_pool_t *pi,
122 cairo_xcb_shm_mem_block_t *block;
125 clear_bits (pi, start, start + (1 << bits));
127 block = pi->blocks + start;
130 cairo_list_add (&block->link, &pi->free[bits]);
132 pi->free_bytes += 1 << (bits + pi->min_bits);
133 if (bits > pi->max_free_bits)
134 pi->max_free_bits = bits;
137 /* Add a chunk to the free list */
139 free_blocks (cairo_xcb_shm_mem_pool_t *pi,
150 /* To avoid cost quadratic in the number of different
151 * blocks produced from this chunk of store, we have to
152 * use the size of the previous block produced from this
153 * chunk as the starting point to work out the size of the
154 * next block we can produce. If you look at the binary
155 * representation of the starting points of the blocks
156 * produced, you can see that you first of all increase the
157 * size of the blocks produced up to some maximum as the
158 * address dealt with gets offsets added on which zap out
159 * low order bits, then decrease as the low order bits of the
160 * final block produced get added in. E.g. as you go from
161 * 001 to 0111 you generate blocks
162 * of size 001 at 001 taking you to 010
163 * of size 010 at 010 taking you to 100
164 * of size 010 at 100 taking you to 110
165 * of size 001 at 110 taking you to 111
166 * So the maximum total cost of the loops below this comment
167 * is one trip from the lowest blocksize to the highest and
170 while (bits < pi->num_sizes - 1) {
171 size_t next_bits = bits + 1;
172 size_t next_len = len << 1;
174 if (i + next_bits > last) {
175 /* off end of chunk to be freed */
179 if (i & (next_len - 1)) /* block would not be on boundary */
187 if (i + len > last) /* off end of chunk to be freed */
190 if (i & (len - 1)) /* block would not be on boundary */
203 free_bits (pi, i, bits, clear);
208 static cairo_status_t
209 _cairo_xcb_shm_mem_pool_init (cairo_xcb_shm_mem_pool_t *pi,
211 unsigned int min_bits,
212 unsigned int num_sizes)
217 assert ((((unsigned long) pi->base) & ((1 << min_bits) - 1)) == 0);
218 assert (num_sizes < ARRAY_LENGTH (pi->free));
221 pi->max_bytes = bytes;
222 pi->max_free_bits = 0;
224 setBits = bytes >> min_bits;
225 pi->blocks = calloc (setBits, sizeof (cairo_xcb_shm_mem_block_t));
226 if (pi->blocks == NULL)
227 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
229 pi->nBlocks = setBits;
230 pi->min_bits = min_bits;
231 pi->num_sizes = num_sizes;
233 for (i = 0; i < ARRAY_LENGTH (pi->free); i++)
234 cairo_list_init (&pi->free[i]);
236 pi->map = malloc ((setBits + 7) >> 3);
237 if (pi->map == NULL) {
239 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
242 memset (pi->map, -1, (setBits + 7) >> 3);
243 clear_bits (pi, 0, setBits);
245 /* Now add all blocks to the free list */
246 free_blocks (pi, 0, setBits, 1);
248 return CAIRO_STATUS_SUCCESS;
251 static cairo_xcb_shm_mem_block_t *
252 get_buddy (cairo_xcb_shm_mem_pool_t *pi,
256 cairo_xcb_shm_mem_block_t *block;
258 assert (offset + (1 << bits) <= pi->nBlocks);
260 if (BITTEST (pi, offset + (1 << bits) - 1))
261 return NULL; /* buddy is allocated */
263 block = pi->blocks + offset;
264 if (block->bits != bits)
265 return NULL; /* buddy is partially allocated */
271 merge_buddies (cairo_xcb_shm_mem_pool_t *pi,
272 cairo_xcb_shm_mem_block_t *block,
273 unsigned int max_bits)
275 size_t block_offset = block - pi->blocks;
276 unsigned int bits = block->bits;
278 while (bits < max_bits - 1) {
279 /* while you can, merge two blocks and get a legal block size */
280 size_t buddy_offset = block_offset ^ (1 << bits);
282 block = get_buddy (pi, buddy_offset, bits);
286 cairo_list_del (&block->link);
288 /* Merged block starts at buddy */
289 if (buddy_offset < block_offset)
290 block_offset = buddy_offset;
295 block = pi->blocks + block_offset;
297 cairo_list_add (&block->link, &pi->free[bits]);
299 if (bits > pi->max_free_bits)
300 pi->max_free_bits = bits;
303 /* attempt to merge all available buddies up to a particular size */
305 merge_bits (cairo_xcb_shm_mem_pool_t *pi,
306 unsigned int max_bits)
308 cairo_xcb_shm_mem_block_t *block, *buddy, *next;
311 for (bits = 0; bits < max_bits - 1; bits++) {
312 cairo_list_foreach_entry_safe (block, next,
313 cairo_xcb_shm_mem_block_t,
317 size_t buddy_offset = (block - pi->blocks) ^ (1 << bits);
319 buddy = get_buddy (pi, buddy_offset, bits);
324 next = cairo_container_of (buddy->link.next,
325 cairo_xcb_shm_mem_block_t,
329 cairo_list_del (&block->link);
330 merge_buddies (pi, block, max_bits);
334 return pi->max_free_bits;
337 /* find store for 1 << bits blocks */
339 buddy_malloc (cairo_xcb_shm_mem_pool_t *pi,
345 cairo_xcb_shm_mem_block_t *block;
347 if (bits > pi->max_free_bits && bits > merge_bits (pi, bits))
350 /* Find a list with blocks big enough on it */
352 for (b = bits; b <= pi->max_free_bits; b++) {
353 if (! cairo_list_is_empty (&pi->free[b])) {
354 block = cairo_list_first_entry (&pi->free[b],
355 cairo_xcb_shm_mem_block_t,
360 assert (block != NULL);
362 cairo_list_del (&block->link);
364 while (cairo_list_is_empty (&pi->free[pi->max_free_bits])) {
365 if (--pi->max_free_bits == 0)
369 /* Mark end of allocated area */
370 offset = block - pi->blocks;
371 past = offset + (1 << bits);
372 BITSET (pi, past - 1);
375 /* If we used a larger free block than we needed, free the rest */
376 pi->free_bytes -= 1 << (b + pi->min_bits);
377 free_blocks (pi, past, offset + (1 << b), 0);
379 return pi->base + ((block - pi->blocks) << pi->min_bits);
383 _cairo_xcb_shm_mem_pool_malloc (cairo_xcb_shm_mem_pool_t *pi,
389 size = 1 << pi->min_bits;
390 for (bits = 0; size < bytes; bits++)
392 if (bits >= pi->num_sizes)
395 return buddy_malloc (pi, bits);
399 _cairo_xcb_shm_mem_pool_free (cairo_xcb_shm_mem_pool_t *pi,
403 cairo_xcb_shm_mem_block_t *block;
405 block_offset = (storage - pi->base) >> pi->min_bits;
406 block = pi->blocks + block_offset;
408 BITCLEAR (pi, block_offset + ((1 << block->bits) - 1));
409 pi->free_bytes += 1 << (block->bits + pi->min_bits);
411 merge_buddies (pi, block, pi->num_sizes);
415 _cairo_xcb_shm_mem_pool_destroy (cairo_xcb_shm_mem_pool_t *pool)
418 cairo_list_del (&pool->link);
426 _cairo_xcb_shm_info_finalize (cairo_xcb_shm_info_t *shm_info)
428 cairo_xcb_connection_t *connection = shm_info->connection;
430 assert (CAIRO_MUTEX_IS_LOCKED (connection->shm_mutex));
432 _cairo_xcb_shm_mem_pool_free (shm_info->pool, shm_info->mem);
433 _cairo_freepool_free (&connection->shm_info_freelist, shm_info);
435 /* scan for old, unused pools - hold at least one in reserve */
436 if (! cairo_list_is_singular (&connection->shm_pools))
438 cairo_xcb_shm_mem_pool_t *pool, *next;
441 cairo_list_init (&head);
442 cairo_list_move (connection->shm_pools.next, &head);
444 cairo_list_foreach_entry_safe (pool, next, cairo_xcb_shm_mem_pool_t,
445 &connection->shm_pools, link)
447 if (pool->free_bytes == pool->max_bytes) {
448 _cairo_xcb_connection_shm_detach (connection, pool->shmseg);
449 _cairo_xcb_shm_mem_pool_destroy (pool);
453 cairo_list_move (head.next, &connection->shm_pools);
458 _cairo_xcb_shm_process_pending (cairo_xcb_connection_t *connection, shm_wait_type_t wait)
460 cairo_xcb_shm_info_t *info, *next;
461 xcb_get_input_focus_reply_t *reply;
463 assert (CAIRO_MUTEX_IS_LOCKED (connection->shm_mutex));
464 cairo_list_foreach_entry_safe (info, next, cairo_xcb_shm_info_t,
465 &connection->shm_pending, pending)
469 reply = xcb_wait_for_reply (connection->xcb_connection,
470 info->sync.sequence, NULL);
473 if (! xcb_poll_for_reply (connection->xcb_connection,
475 (void **) &reply, NULL))
476 /* We cannot be sure the server finished with this image yet, so
477 * try again later. All other shm info are guaranteed to have a
478 * larger sequence number and thus don't have to be checked. */
482 /* silence Clang static analyzer warning */
488 cairo_list_del (&info->pending);
489 _cairo_xcb_shm_info_finalize (info);
494 _cairo_xcb_connection_allocate_shm_info (cairo_xcb_connection_t *connection,
496 cairo_bool_t might_reuse,
497 cairo_xcb_shm_info_t **shm_info_out)
499 cairo_xcb_shm_info_t *shm_info;
500 cairo_xcb_shm_mem_pool_t *pool, *next;
501 size_t bytes, maxbits = 16, minbits = 8;
502 size_t shm_allocated = 0;
504 cairo_status_t status;
506 assert (connection->flags & CAIRO_XCB_HAS_SHM);
508 CAIRO_MUTEX_LOCK (connection->shm_mutex);
509 _cairo_xcb_shm_process_pending (connection, PENDING_POLL);
512 cairo_list_foreach_entry (shm_info, cairo_xcb_shm_info_t,
513 &connection->shm_pending, pending) {
514 if (shm_info->size >= size) {
515 cairo_list_del (&shm_info->pending);
516 CAIRO_MUTEX_UNLOCK (connection->shm_mutex);
518 xcb_discard_reply (connection->xcb_connection, shm_info->sync.sequence);
519 shm_info->sync.sequence = XCB_NONE;
521 *shm_info_out = shm_info;
522 return CAIRO_STATUS_SUCCESS;
527 cairo_list_foreach_entry_safe (pool, next, cairo_xcb_shm_mem_pool_t,
528 &connection->shm_pools, link)
530 if (pool->free_bytes > size) {
531 mem = _cairo_xcb_shm_mem_pool_malloc (pool, size);
533 /* keep the active pools towards the front */
534 cairo_list_move (&pool->link, &connection->shm_pools);
535 goto allocate_shm_info;
538 /* scan for old, unused pools */
539 if (pool->free_bytes == pool->max_bytes) {
540 _cairo_xcb_connection_shm_detach (connection,
542 _cairo_xcb_shm_mem_pool_destroy (pool);
544 shm_allocated += pool->max_bytes;
548 if (unlikely (shm_allocated >= CAIRO_MAX_SHM_MEMORY)) {
549 CAIRO_MUTEX_UNLOCK (connection->shm_mutex);
550 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
553 pool = malloc (sizeof (cairo_xcb_shm_mem_pool_t));
554 if (unlikely (pool == NULL)) {
555 CAIRO_MUTEX_UNLOCK (connection->shm_mutex);
556 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
559 bytes = 1 << maxbits;
560 while (bytes <= size)
561 bytes <<= 1, maxbits++;
565 pool->shmid = shmget (IPC_PRIVATE, bytes, IPC_CREAT | 0600);
566 if (pool->shmid != -1)
569 /* If the allocation failed because we asked for too much memory, we try
570 * again with a smaller request, as long as our allocation still fits. */
572 if (errno != EINVAL || bytes < size)
575 if (pool->shmid == -1) {
577 if (! (err == EINVAL || err == ENOMEM))
578 connection->flags &= ~CAIRO_XCB_HAS_SHM;
580 CAIRO_MUTEX_UNLOCK (connection->shm_mutex);
581 return CAIRO_INT_STATUS_UNSUPPORTED;
584 pool->base = shmat (pool->shmid, NULL, 0);
585 if (unlikely (pool->base == (char *) -1)) {
586 shmctl (pool->shmid, IPC_RMID, NULL);
588 CAIRO_MUTEX_UNLOCK (connection->shm_mutex);
589 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
592 status = _cairo_xcb_shm_mem_pool_init (pool,
595 maxbits - minbits + 1);
596 if (unlikely (status)) {
599 CAIRO_MUTEX_UNLOCK (connection->shm_mutex);
603 pool->shmseg = _cairo_xcb_connection_shm_attach (connection, pool->shmid, FALSE);
604 shmctl (pool->shmid, IPC_RMID, NULL);
606 cairo_list_add (&pool->link, &connection->shm_pools);
607 mem = _cairo_xcb_shm_mem_pool_malloc (pool, size);
610 shm_info = _cairo_freepool_alloc (&connection->shm_info_freelist);
611 if (unlikely (shm_info == NULL)) {
612 _cairo_xcb_shm_mem_pool_free (pool, mem);
613 CAIRO_MUTEX_UNLOCK (connection->shm_mutex);
614 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
617 shm_info->connection = connection;
618 shm_info->pool = pool;
619 shm_info->shm = pool->shmseg;
620 shm_info->size = size;
621 shm_info->offset = (char *) mem - (char *) pool->base;
623 shm_info->sync.sequence = XCB_NONE;
625 /* scan for old, unused pools */
626 cairo_list_foreach_entry_safe (pool, next, cairo_xcb_shm_mem_pool_t,
627 &connection->shm_pools, link)
629 if (pool->free_bytes == pool->max_bytes) {
630 _cairo_xcb_connection_shm_detach (connection,
632 _cairo_xcb_shm_mem_pool_destroy (pool);
635 CAIRO_MUTEX_UNLOCK (connection->shm_mutex);
637 *shm_info_out = shm_info;
638 return CAIRO_STATUS_SUCCESS;
642 _cairo_xcb_shm_info_destroy (cairo_xcb_shm_info_t *shm_info)
644 cairo_xcb_connection_t *connection = shm_info->connection;
646 /* We can only return shm_info->mem to the allocator when we can be sure
647 * that the X server no longer reads from it. Since the X server processes
648 * requests in order, we send a GetInputFocus here.
649 * _cairo_xcb_shm_process_pending () will later check if the reply for that
650 * request was received and then actually mark this memory area as free. */
652 CAIRO_MUTEX_LOCK (connection->shm_mutex);
653 assert (shm_info->sync.sequence == XCB_NONE);
654 shm_info->sync = xcb_get_input_focus (connection->xcb_connection);
656 cairo_list_init (&shm_info->pending);
657 cairo_list_add_tail (&shm_info->pending, &connection->shm_pending);
658 CAIRO_MUTEX_UNLOCK (connection->shm_mutex);
662 _cairo_xcb_connection_shm_mem_pools_flush (cairo_xcb_connection_t *connection)
664 CAIRO_MUTEX_LOCK (connection->shm_mutex);
665 _cairo_xcb_shm_process_pending (connection, PENDING_WAIT);
666 CAIRO_MUTEX_UNLOCK (connection->shm_mutex);
670 _cairo_xcb_connection_shm_mem_pools_fini (cairo_xcb_connection_t *connection)
672 assert (cairo_list_is_empty (&connection->shm_pending));
673 while (! cairo_list_is_empty (&connection->shm_pools)) {
674 _cairo_xcb_shm_mem_pool_destroy (cairo_list_first_entry (&connection->shm_pools,
675 cairo_xcb_shm_mem_pool_t,
680 #endif /* CAIRO_HAS_XCB_SHM_FUNCTIONS */