2 * Copyright © 2000 SuSE, Inc.
3 * Copyright © 1999 Keith Packard
5 * Permission to use, copy, modify, distribute, and sell this software and its
6 * documentation for any purpose is hereby granted without fee, provided that
7 * the above copyright notice appear in all copies and that both that
8 * copyright notice and this permission notice appear in supporting
9 * documentation, and that the name of SuSE not be used in advertising or
10 * publicity pertaining to distribution of the software without specific,
11 * written prior permission. SuSE makes no representations about the
12 * suitability of this software for any purpose. It is provided "as is"
13 * without express or implied warranty.
15 * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
17 * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
18 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
19 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
20 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 * Author: Keith Packard, SuSE, Inc.
31 #include "pixman-private.h"
34 * Computing composite region
36 #define BOUND(v) (int16_t) ((v) < INT16_MIN ? INT16_MIN : (v) > INT16_MAX ? INT16_MAX : (v))
38 static inline pixman_bool_t
39 clip_general_image (pixman_region32_t * region,
40 pixman_region32_t * clip,
44 if (pixman_region32_n_rects(region) == 1 &&
45 pixman_region32_n_rects(clip) == 1)
47 pixman_box32_t * rbox = pixman_region32_rectangles(region, NULL);
48 pixman_box32_t * cbox = pixman_region32_rectangles(clip, NULL);
51 if (rbox->x1 < (v = cbox->x1 + dx))
53 if (rbox->x2 > (v = cbox->x2 + dx))
55 if (rbox->y1 < (v = cbox->y1 + dy))
57 if (rbox->y2 > (v = cbox->y2 + dy))
59 if (rbox->x1 >= rbox->x2 ||
62 pixman_region32_init (region);
65 else if (!pixman_region32_not_empty (clip))
72 pixman_region32_translate (region, -dx, -dy);
73 if (!pixman_region32_intersect (region, region, clip))
76 pixman_region32_translate(region, dx, dy);
78 return pixman_region32_not_empty(region);
82 static inline pixman_bool_t
83 clip_source_image (pixman_region32_t * region,
84 pixman_image_t * picture,
88 /* The workaround lets certain fast paths run even when they
89 * would normally be rejected because of out-of-bounds access.
90 * We need to clip against the source geometry in that case
92 if (!picture->common.need_workaround)
94 /* Source clips are ignored, unless they are explicitly turned on
95 * and the clip in question was set by an X client. (Because if
96 * the clip was not set by a client, then it is a hierarchy
97 * clip and those should always be ignored for sources).
99 if (!picture->common.clip_sources || !picture->common.client_clip)
103 return clip_general_image (region,
104 &picture->common.clip_region,
109 * returns FALSE if the final region is empty. Indistinguishable from
110 * an allocation failure, but rendering ignores those anyways.
113 pixman_compute_composite_region32 (pixman_region32_t * region,
114 pixman_image_t * src_image,
115 pixman_image_t * mask_image,
116 pixman_image_t * dst_image,
128 region->extents.x1 = dest_x;
130 region->extents.x2 = BOUND(v);
131 region->extents.y1 = dest_y;
133 region->extents.y2 = BOUND(v);
135 region->extents.x1 = MAX (region->extents.x1, 0);
136 region->extents.y1 = MAX (region->extents.y1, 0);
138 /* Some X servers rely on an old bug, where pixman would just believe the
139 * set clip_region and not clip against the destination geometry. So,
140 * since only X servers set "source clip", we don't clip against
141 * destination geometry when that is set and when the workaround has
142 * not been explicitly disabled by
144 * pixman_disable_out_of_bounds_workaround();
147 if (!(dst_image->common.need_workaround))
149 region->extents.x2 = MIN (region->extents.x2, dst_image->bits.width);
150 region->extents.y2 = MIN (region->extents.y2, dst_image->bits.height);
155 /* Check for empty operation */
156 if (region->extents.x1 >= region->extents.x2 ||
157 region->extents.y1 >= region->extents.y2)
159 pixman_region32_init (region);
163 if (dst_image->common.have_clip_region)
165 if (!clip_general_image (region, &dst_image->common.clip_region, 0, 0))
167 pixman_region32_fini (region);
172 if (dst_image->common.alpha_map && dst_image->common.alpha_map->common.have_clip_region)
174 if (!clip_general_image (region, &dst_image->common.alpha_map->common.clip_region,
175 -dst_image->common.alpha_origin_x,
176 -dst_image->common.alpha_origin_y))
178 pixman_region32_fini (region);
183 /* clip against src */
184 if (src_image->common.have_clip_region)
186 if (!clip_source_image (region, src_image, dest_x - src_x, dest_y - src_y))
188 pixman_region32_fini (region);
192 if (src_image->common.alpha_map && src_image->common.alpha_map->common.have_clip_region)
194 if (!clip_source_image (region, (pixman_image_t *)src_image->common.alpha_map,
195 dest_x - (src_x - src_image->common.alpha_origin_x),
196 dest_y - (src_y - src_image->common.alpha_origin_y)))
198 pixman_region32_fini (region);
202 /* clip against mask */
203 if (mask_image && mask_image->common.have_clip_region)
205 if (!clip_source_image (region, mask_image, dest_x - mask_x, dest_y - mask_y))
207 pixman_region32_fini (region);
210 if (mask_image->common.alpha_map && mask_image->common.alpha_map->common.have_clip_region)
212 if (!clip_source_image (region, (pixman_image_t *)mask_image->common.alpha_map,
213 dest_x - (mask_x - mask_image->common.alpha_origin_x),
214 dest_y - (mask_y - mask_image->common.alpha_origin_y)))
216 pixman_region32_fini (region);
225 PIXMAN_EXPORT pixman_bool_t
226 pixman_compute_composite_region (pixman_region16_t * region,
227 pixman_image_t * src_image,
228 pixman_image_t * mask_image,
229 pixman_image_t * dst_image,
239 pixman_region32_t r32;
240 pixman_bool_t retval;
242 pixman_region32_init (&r32);
244 retval = pixman_compute_composite_region32 (&r32, src_image, mask_image, dst_image,
245 src_x, src_y, mask_x, mask_y, dest_x, dest_y,
250 if (!pixman_region16_copy_from_region32 (region, &r32))
254 pixman_region32_fini (&r32);
259 pixman_multiply_overflows_int (unsigned int a,
262 return a >= INT32_MAX / b;
266 pixman_addition_overflows_int (unsigned int a,
269 return a > INT32_MAX - b;
273 pixman_malloc_ab(unsigned int a,
276 if (a >= INT32_MAX / b)
279 return malloc (a * b);
283 pixman_malloc_abc (unsigned int a,
287 if (a >= INT32_MAX / b)
289 else if (a * b >= INT32_MAX / c)
292 return malloc (a * b * c);
296 * Helper routine to expand a color component from 0 < n <= 8 bits to 16 bits by
299 static inline uint64_t
300 expand16(const uint8_t val, int nbits)
302 // Start out with the high bit of val in the high bit of result.
303 uint16_t result = (uint16_t)val << (16 - nbits);
308 // Copy the bits in result, doubling the number of bits each time, until we
311 result |= result >> nbits;
319 * This function expands images from ARGB8 format to ARGB16. To preserve
320 * precision, it needs to know the original source format. For example, if the
321 * source was PIXMAN_x1r5g5b5 and the red component contained bits 12345, then
322 * the expanded value is 12345123. To correctly expand this to 16 bits, it
323 * should be 1234512345123451 and not 1234512312345123.
326 pixman_expand(uint64_t *dst, const uint32_t *src,
327 pixman_format_code_t format, int width)
330 * Determine the sizes of each component and the masks and shifts required
331 * to extract them from the source pixel.
333 const int a_size = PIXMAN_FORMAT_A(format),
334 r_size = PIXMAN_FORMAT_R(format),
335 g_size = PIXMAN_FORMAT_G(format),
336 b_size = PIXMAN_FORMAT_B(format);
337 const int a_shift = 32 - a_size,
338 r_shift = 24 - r_size,
339 g_shift = 16 - g_size,
340 b_shift = 8 - b_size;
341 const uint8_t a_mask = ~(~0 << a_size),
342 r_mask = ~(~0 << r_size),
343 g_mask = ~(~0 << g_size),
344 b_mask = ~(~0 << b_size);
347 /* Start at the end so that we can do the expansion in place when src == dst */
348 for (i = width - 1; i >= 0; i--)
350 const uint32_t pixel = src[i];
351 // Extract the components.
352 const uint8_t a = (pixel >> a_shift) & a_mask,
353 r = (pixel >> r_shift) & r_mask,
354 g = (pixel >> g_shift) & g_mask,
355 b = (pixel >> b_shift) & b_mask;
356 const uint64_t a16 = a_size ? expand16(a, a_size) : 0xffff,
357 r16 = expand16(r, r_size),
358 g16 = expand16(g, g_size),
359 b16 = expand16(b, b_size);
361 dst[i] = a16 << 48 | r16 << 32 | g16 << 16 | b16;
366 * Contracting is easier than expanding. We just need to truncate the
370 pixman_contract(uint32_t *dst, const uint64_t *src, int width)
374 /* Start at the beginning so that we can do the contraction in place when
376 for (i = 0; i < width; i++)
378 const uint8_t a = src[i] >> 56,
382 dst[i] = a << 24 | r << 16 | g << 8 | b;
387 walk_region_internal (pixman_implementation_t *imp,
389 pixman_image_t * src_image,
390 pixman_image_t * mask_image,
391 pixman_image_t * dst_image,
400 pixman_bool_t src_repeat,
401 pixman_bool_t mask_repeat,
402 pixman_region32_t *region,
403 pixman_composite_func_t composite_rect)
406 const pixman_box32_t *pbox;
407 int w, h, w_this, h_this;
408 int x_msk, y_msk, x_src, y_src, x_dst, y_dst;
410 pbox = pixman_region32_rectangles (region, &n);
413 h = pbox->y2 - pbox->y1;
414 y_src = pbox->y1 - dest_y + src_y;
415 y_msk = pbox->y1 - dest_y + mask_y;
420 w = pbox->x2 - pbox->x1;
421 x_src = pbox->x1 - dest_x + src_x;
422 x_msk = pbox->x1 - dest_x + mask_x;
427 y_msk = MOD (y_msk, mask_image->bits.height);
428 if (h_this > mask_image->bits.height - y_msk)
429 h_this = mask_image->bits.height - y_msk;
433 y_src = MOD (y_src, src_image->bits.height);
434 if (h_this > src_image->bits.height - y_src)
435 h_this = src_image->bits.height - y_src;
442 x_msk = MOD (x_msk, mask_image->bits.width);
443 if (w_this > mask_image->bits.width - x_msk)
444 w_this = mask_image->bits.width - x_msk;
448 x_src = MOD (x_src, src_image->bits.width);
449 if (w_this > src_image->bits.width - x_src)
450 w_this = src_image->bits.width - x_src;
452 (*composite_rect) (imp,
453 op, src_image, mask_image, dst_image,
454 x_src, y_src, x_msk, y_msk, x_dst, y_dst,
471 _pixman_walk_composite_region (pixman_implementation_t *imp,
473 pixman_image_t * src_image,
474 pixman_image_t * mask_image,
475 pixman_image_t * dst_image,
484 pixman_composite_func_t composite_rect)
486 pixman_region32_t region;
488 pixman_region32_init (®ion);
490 if (pixman_compute_composite_region32 (
491 ®ion, src_image, mask_image, dst_image,
492 src_x, src_y, mask_x, mask_y, dest_x, dest_y,
495 walk_region_internal (imp, op,
496 src_image, mask_image, dst_image,
497 src_x, src_y, mask_x, mask_y, dest_x, dest_y,
498 width, height, FALSE, FALSE,
503 pixman_region32_fini (®ion);
508 mask_is_solid (pixman_image_t *mask)
510 if (mask->type == SOLID)
513 if (mask->type == BITS &&
514 mask->common.repeat == PIXMAN_REPEAT_NORMAL &&
515 mask->bits.width == 1 &&
516 mask->bits.height == 1)
524 static const pixman_fast_path_t *
525 get_fast_path (const pixman_fast_path_t *fast_paths,
527 pixman_image_t *src_image,
528 pixman_image_t *mask_image,
529 pixman_image_t *dst_image,
530 pixman_bool_t is_pixbuf)
532 const pixman_fast_path_t *info;
534 for (info = fast_paths; info->op != PIXMAN_OP_NONE; info++)
536 pixman_bool_t valid_src = FALSE;
537 pixman_bool_t valid_mask = FALSE;
542 if ((info->src_format == PIXMAN_solid && _pixman_image_is_solid (src_image)) ||
543 (src_image->type == BITS && info->src_format == src_image->bits.format))
551 if ((info->mask_format == PIXMAN_null && !mask_image) ||
552 (mask_image && mask_image->type == BITS && info->mask_format == mask_image->bits.format))
556 if (info->flags & NEED_SOLID_MASK)
558 if (!mask_image || !mask_is_solid (mask_image))
562 if (info->flags & NEED_COMPONENT_ALPHA)
564 if (!mask_image || !mask_image->common.component_alpha)
572 if (info->dest_format != dst_image->bits.format)
575 if ((info->flags & NEED_PIXBUF) && !is_pixbuf)
584 static inline pixman_bool_t
585 image_covers (pixman_image_t *image, pixman_box32_t *extents, int x, int y)
587 if (image->common.type == BITS && image->common.repeat == PIXMAN_REPEAT_NONE)
589 if (x > extents->x1 || y > extents->y1 ||
590 x + image->bits.width < extents->x2 ||
591 y + image->bits.height < extents->y2)
601 _pixman_run_fast_path (const pixman_fast_path_t *paths,
602 pixman_implementation_t *imp,
605 pixman_image_t *mask,
606 pixman_image_t *dest,
616 pixman_composite_func_t func = NULL;
617 pixman_bool_t src_repeat = src->common.repeat == PIXMAN_REPEAT_NORMAL;
618 pixman_bool_t mask_repeat = mask && mask->common.repeat == PIXMAN_REPEAT_NORMAL;
619 pixman_bool_t result;
621 if ((src->type == BITS || _pixman_image_is_solid (src)) &&
622 (!mask || mask->type == BITS)
623 && !src->common.transform && !(mask && mask->common.transform)
624 && !(mask && mask->common.alpha_map) && !src->common.alpha_map && !dest->common.alpha_map
625 && (src->common.filter != PIXMAN_FILTER_CONVOLUTION)
626 && (src->common.repeat != PIXMAN_REPEAT_PAD)
627 && (src->common.repeat != PIXMAN_REPEAT_REFLECT)
628 && (!mask || (mask->common.filter != PIXMAN_FILTER_CONVOLUTION &&
629 mask->common.repeat != PIXMAN_REPEAT_PAD &&
630 mask->common.repeat != PIXMAN_REPEAT_REFLECT))
631 && !src->common.read_func && !src->common.write_func
632 && !(mask && mask->common.read_func)
633 && !(mask && mask->common.write_func)
634 && !dest->common.read_func
635 && !dest->common.write_func)
637 const pixman_fast_path_t *info;
638 pixman_bool_t pixbuf;
641 src && src->type == BITS &&
642 mask && mask->type == BITS &&
643 src->bits.bits == mask->bits.bits &&
646 !mask->common.component_alpha &&
649 info = get_fast_path (paths, op, src, mask, dest, pixbuf);
655 if (info->src_format == PIXMAN_solid)
658 if (info->mask_format == PIXMAN_solid || info->flags & NEED_SOLID_MASK)
662 src->bits.width == 1 &&
663 src->bits.height == 1) ||
665 mask->bits.width == 1 &&
666 mask->bits.height == 1))
668 /* If src or mask are repeating 1x1 images and src_repeat or
669 * mask_repeat are still TRUE, it means the fast path we
670 * selected does not actually handle repeating images.
672 * So rather than call the "fast path" with a zillion
673 * 1x1 requests, we just fall back to the general code (which
674 * does do something sensible with 1x1 repeating images).
685 pixman_region32_t region;
686 pixman_region32_init (®ion);
688 if (pixman_compute_composite_region32 (
689 ®ion, src, mask, dest, src_x, src_y, mask_x, mask_y, dest_x, dest_y, width, height))
691 pixman_box32_t *extents = pixman_region32_extents (®ion);
693 if ((image_covers (src, extents, dest_x - src_x, dest_y - src_y) &&
694 (!mask || image_covers (mask, extents, dest_x - mask_x, dest_y - mask_y))) ||
695 src->common.need_workaround)
697 walk_region_internal (imp, op,
699 src_x, src_y, mask_x, mask_y,
702 src_repeat, mask_repeat,
710 pixman_region32_fini (®ion);
716 #define N_TMP_BOXES (16)
719 pixman_region16_copy_from_region32 (pixman_region16_t *dst,
720 pixman_region32_t *src)
723 pixman_box32_t *boxes32;
724 pixman_box16_t *boxes16;
725 pixman_bool_t retval;
727 boxes32 = pixman_region32_rectangles (src, &n_boxes);
729 boxes16 = pixman_malloc_ab (n_boxes, sizeof (pixman_box16_t));
734 for (i = 0; i < n_boxes; ++i)
736 boxes16[i].x1 = boxes32[i].x1;
737 boxes16[i].y1 = boxes32[i].y1;
738 boxes16[i].x2 = boxes32[i].x2;
739 boxes16[i].y2 = boxes32[i].y2;
742 pixman_region_fini (dst);
743 retval = pixman_region_init_rects (dst, boxes16, n_boxes);
749 pixman_region32_copy_from_region16 (pixman_region32_t *dst,
750 pixman_region16_t *src)
753 pixman_box16_t *boxes16;
754 pixman_box32_t *boxes32;
755 pixman_box32_t tmp_boxes[N_TMP_BOXES];
756 pixman_bool_t retval;
758 boxes16 = pixman_region_rectangles (src, &n_boxes);
760 if (n_boxes > N_TMP_BOXES)
761 boxes32 = pixman_malloc_ab (n_boxes, sizeof (pixman_box32_t));
768 for (i = 0; i < n_boxes; ++i)
770 boxes32[i].x1 = boxes16[i].x1;
771 boxes32[i].y1 = boxes16[i].y1;
772 boxes32[i].x2 = boxes16[i].x2;
773 boxes32[i].y2 = boxes16[i].y2;
776 pixman_region32_fini (dst);
777 retval = pixman_region32_init_rects (dst, boxes32, n_boxes);
779 if (boxes32 != tmp_boxes)