2 * Copyright © 2000 SuSE, Inc.
3 * Copyright © 1999 Keith Packard
5 * Permission to use, copy, modify, distribute, and sell this software and its
6 * documentation for any purpose is hereby granted without fee, provided that
7 * the above copyright notice appear in all copies and that both that
8 * copyright notice and this permission notice appear in supporting
9 * documentation, and that the name of SuSE not be used in advertising or
10 * publicity pertaining to distribution of the software without specific,
11 * written prior permission. SuSE makes no representations about the
12 * suitability of this software for any purpose. It is provided "as is"
13 * without express or implied warranty.
15 * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
17 * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
18 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
19 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
20 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 * Author: Keith Packard, SuSE, Inc.
31 #include "pixman-private.h"
34 * Computing composite region
36 static inline pixman_bool_t
37 clip_general_image (pixman_region32_t * region,
38 pixman_region32_t * clip,
42 if (pixman_region32_n_rects (region) == 1 &&
43 pixman_region32_n_rects (clip) == 1)
45 pixman_box32_t * rbox = pixman_region32_rectangles (region, NULL);
46 pixman_box32_t * cbox = pixman_region32_rectangles (clip, NULL);
49 if (rbox->x1 < (v = cbox->x1 + dx))
51 if (rbox->x2 > (v = cbox->x2 + dx))
53 if (rbox->y1 < (v = cbox->y1 + dy))
55 if (rbox->y2 > (v = cbox->y2 + dy))
57 if (rbox->x1 >= rbox->x2 || rbox->y1 >= rbox->y2)
59 pixman_region32_init (region);
63 else if (!pixman_region32_not_empty (clip))
70 pixman_region32_translate (region, -dx, -dy);
72 if (!pixman_region32_intersect (region, region, clip))
76 pixman_region32_translate (region, dx, dy);
79 return pixman_region32_not_empty (region);
82 static inline pixman_bool_t
83 clip_source_image (pixman_region32_t * region,
84 pixman_image_t * image,
88 /* Source clips are ignored, unless they are explicitly turned on
89 * and the clip in question was set by an X client. (Because if
90 * the clip was not set by a client, then it is a hierarchy
91 * clip and those should always be ignored for sources).
93 if (!image->common.clip_sources || !image->common.client_clip)
96 return clip_general_image (region,
97 &image->common.clip_region,
102 * returns FALSE if the final region is empty. Indistinguishable from
103 * an allocation failure, but rendering ignores those anyways.
106 pixman_compute_composite_region32 (pixman_region32_t * region,
107 pixman_image_t * src_image,
108 pixman_image_t * mask_image,
109 pixman_image_t * dst_image,
119 region->extents.x1 = dest_x;
120 region->extents.x2 = dest_x + width;
121 region->extents.y1 = dest_y;
122 region->extents.y2 = dest_y + height;
124 region->extents.x1 = MAX (region->extents.x1, 0);
125 region->extents.y1 = MAX (region->extents.y1, 0);
126 region->extents.x2 = MIN (region->extents.x2, dst_image->bits.width);
127 region->extents.y2 = MIN (region->extents.y2, dst_image->bits.height);
131 /* Check for empty operation */
132 if (region->extents.x1 >= region->extents.x2 ||
133 region->extents.y1 >= region->extents.y2)
135 pixman_region32_init (region);
139 if (dst_image->common.have_clip_region)
141 if (!clip_general_image (region, &dst_image->common.clip_region, 0, 0))
143 pixman_region32_fini (region);
148 if (dst_image->common.alpha_map && dst_image->common.alpha_map->common.have_clip_region)
150 if (!clip_general_image (region, &dst_image->common.alpha_map->common.clip_region,
151 -dst_image->common.alpha_origin_x,
152 -dst_image->common.alpha_origin_y))
154 pixman_region32_fini (region);
159 /* clip against src */
160 if (src_image->common.have_clip_region)
162 if (!clip_source_image (region, src_image, dest_x - src_x, dest_y - src_y))
164 pixman_region32_fini (region);
168 if (src_image->common.alpha_map && src_image->common.alpha_map->common.have_clip_region)
170 if (!clip_source_image (region, (pixman_image_t *)src_image->common.alpha_map,
171 dest_x - (src_x - src_image->common.alpha_origin_x),
172 dest_y - (src_y - src_image->common.alpha_origin_y)))
174 pixman_region32_fini (region);
178 /* clip against mask */
179 if (mask_image && mask_image->common.have_clip_region)
181 if (!clip_source_image (region, mask_image, dest_x - mask_x, dest_y - mask_y))
183 pixman_region32_fini (region);
186 if (mask_image->common.alpha_map && mask_image->common.alpha_map->common.have_clip_region)
188 if (!clip_source_image (region, (pixman_image_t *)mask_image->common.alpha_map,
189 dest_x - (mask_x - mask_image->common.alpha_origin_x),
190 dest_y - (mask_y - mask_image->common.alpha_origin_y)))
192 pixman_region32_fini (region);
201 PIXMAN_EXPORT pixman_bool_t
202 pixman_compute_composite_region (pixman_region16_t * region,
203 pixman_image_t * src_image,
204 pixman_image_t * mask_image,
205 pixman_image_t * dst_image,
215 pixman_region32_t r32;
216 pixman_bool_t retval;
218 pixman_region32_init (&r32);
220 retval = pixman_compute_composite_region32 (
221 &r32, src_image, mask_image, dst_image,
222 src_x, src_y, mask_x, mask_y, dest_x, dest_y,
227 if (!pixman_region16_copy_from_region32 (region, &r32))
231 pixman_region32_fini (&r32);
236 pixman_multiply_overflows_int (unsigned int a,
239 return a >= INT32_MAX / b;
243 pixman_addition_overflows_int (unsigned int a,
246 return a > INT32_MAX - b;
250 pixman_malloc_ab (unsigned int a,
253 if (a >= INT32_MAX / b)
256 return malloc (a * b);
260 pixman_malloc_abc (unsigned int a,
264 if (a >= INT32_MAX / b)
266 else if (a * b >= INT32_MAX / c)
269 return malloc (a * b * c);
273 * Helper routine to expand a color component from 0 < n <= 8 bits to 16
274 * bits by replication.
276 static inline uint64_t
277 expand16 (const uint8_t val, int nbits)
279 /* Start out with the high bit of val in the high bit of result. */
280 uint16_t result = (uint16_t)val << (16 - nbits);
285 /* Copy the bits in result, doubling the number of bits each time, until
286 * we fill all 16 bits.
290 result |= result >> nbits;
298 * This function expands images from ARGB8 format to ARGB16. To preserve
299 * precision, it needs to know the original source format. For example, if the
300 * source was PIXMAN_x1r5g5b5 and the red component contained bits 12345, then
301 * the expanded value is 12345123. To correctly expand this to 16 bits, it
302 * should be 1234512345123451 and not 1234512312345123.
305 pixman_expand (uint64_t * dst,
306 const uint32_t * src,
307 pixman_format_code_t format,
311 * Determine the sizes of each component and the masks and shifts
312 * required to extract them from the source pixel.
314 const int a_size = PIXMAN_FORMAT_A (format),
315 r_size = PIXMAN_FORMAT_R (format),
316 g_size = PIXMAN_FORMAT_G (format),
317 b_size = PIXMAN_FORMAT_B (format);
318 const int a_shift = 32 - a_size,
319 r_shift = 24 - r_size,
320 g_shift = 16 - g_size,
321 b_shift = 8 - b_size;
322 const uint8_t a_mask = ~(~0 << a_size),
323 r_mask = ~(~0 << r_size),
324 g_mask = ~(~0 << g_size),
325 b_mask = ~(~0 << b_size);
328 /* Start at the end so that we can do the expansion in place
331 for (i = width - 1; i >= 0; i--)
333 const uint32_t pixel = src[i];
334 const uint8_t a = (pixel >> a_shift) & a_mask,
335 r = (pixel >> r_shift) & r_mask,
336 g = (pixel >> g_shift) & g_mask,
337 b = (pixel >> b_shift) & b_mask;
338 const uint64_t a16 = a_size ? expand16 (a, a_size) : 0xffff,
339 r16 = expand16 (r, r_size),
340 g16 = expand16 (g, g_size),
341 b16 = expand16 (b, b_size);
343 dst[i] = a16 << 48 | r16 << 32 | g16 << 16 | b16;
348 * Contracting is easier than expanding. We just need to truncate the
352 pixman_contract (uint32_t * dst,
358 /* Start at the beginning so that we can do the contraction in
359 * place when src == dst
361 for (i = 0; i < width; i++)
363 const uint8_t a = src[i] >> 56,
368 dst[i] = a << 24 | r << 16 | g << 8 | b;
373 walk_region_internal (pixman_implementation_t *imp,
375 pixman_image_t * src_image,
376 pixman_image_t * mask_image,
377 pixman_image_t * dst_image,
386 pixman_bool_t src_repeat,
387 pixman_bool_t mask_repeat,
388 pixman_region32_t * region,
389 pixman_composite_func_t composite_rect)
392 const pixman_box32_t *pbox;
393 int w, h, w_this, h_this;
394 int x_msk, y_msk, x_src, y_src, x_dst, y_dst;
396 pbox = pixman_region32_rectangles (region, &n);
399 h = pbox->y2 - pbox->y1;
400 y_src = pbox->y1 - dest_y + src_y;
401 y_msk = pbox->y1 - dest_y + mask_y;
407 w = pbox->x2 - pbox->x1;
408 x_src = pbox->x1 - dest_x + src_x;
409 x_msk = pbox->x1 - dest_x + mask_x;
414 y_msk = MOD (y_msk, mask_image->bits.height);
415 if (h_this > mask_image->bits.height - y_msk)
416 h_this = mask_image->bits.height - y_msk;
421 y_src = MOD (y_src, src_image->bits.height);
422 if (h_this > src_image->bits.height - y_src)
423 h_this = src_image->bits.height - y_src;
432 x_msk = MOD (x_msk, mask_image->bits.width);
433 if (w_this > mask_image->bits.width - x_msk)
434 w_this = mask_image->bits.width - x_msk;
439 x_src = MOD (x_src, src_image->bits.width);
440 if (w_this > src_image->bits.width - x_src)
441 w_this = src_image->bits.width - x_src;
444 (*composite_rect) (imp, op,
445 src_image, mask_image, dst_image,
446 x_src, y_src, x_msk, y_msk, x_dst, y_dst,
466 _pixman_walk_composite_region (pixman_implementation_t *imp,
468 pixman_image_t * src_image,
469 pixman_image_t * mask_image,
470 pixman_image_t * dst_image,
479 pixman_composite_func_t composite_rect)
481 pixman_region32_t region;
483 pixman_region32_init (®ion);
485 if (pixman_compute_composite_region32 (
486 ®ion, src_image, mask_image, dst_image,
487 src_x, src_y, mask_x, mask_y, dest_x, dest_y,
490 walk_region_internal (imp, op,
491 src_image, mask_image, dst_image,
492 src_x, src_y, mask_x, mask_y, dest_x, dest_y,
493 width, height, FALSE, FALSE,
497 pixman_region32_fini (®ion);
502 mask_is_solid (pixman_image_t *mask)
504 if (mask->type == SOLID)
507 if (mask->type == BITS &&
508 mask->common.repeat == PIXMAN_REPEAT_NORMAL &&
509 mask->bits.width == 1 &&
510 mask->bits.height == 1)
518 static const pixman_fast_path_t *
519 get_fast_path (const pixman_fast_path_t *fast_paths,
521 pixman_image_t * src_image,
522 pixman_image_t * mask_image,
523 pixman_image_t * dst_image,
524 pixman_bool_t is_pixbuf)
526 const pixman_fast_path_t *info;
528 for (info = fast_paths; info->op != PIXMAN_OP_NONE; info++)
530 pixman_bool_t valid_src = FALSE;
531 pixman_bool_t valid_mask = FALSE;
536 if ((info->src_format == PIXMAN_solid &&
537 _pixman_image_is_solid (src_image)) ||
538 (src_image->type == BITS &&
539 info->src_format == src_image->bits.format))
547 if ((info->mask_format == PIXMAN_null && !mask_image) ||
548 (mask_image && mask_image->type == BITS &&
549 info->mask_format == mask_image->bits.format))
553 if (info->flags & NEED_SOLID_MASK)
555 if (!mask_image || !mask_is_solid (mask_image))
559 if (info->flags & NEED_COMPONENT_ALPHA)
561 if (!mask_image || !mask_image->common.component_alpha)
569 if (info->dest_format != dst_image->bits.format)
572 if ((info->flags & NEED_PIXBUF) && !is_pixbuf)
581 static force_inline pixman_bool_t
582 image_covers (pixman_image_t *image,
583 pixman_box32_t *extents,
587 if (image->common.type == BITS &&
588 image->common.repeat == PIXMAN_REPEAT_NONE)
590 if (x > extents->x1 || y > extents->y1 ||
591 x + image->bits.width < extents->x2 ||
592 y + image->bits.height < extents->y2)
601 static force_inline pixman_bool_t
602 sources_cover (pixman_image_t *src,
603 pixman_image_t *mask,
604 pixman_box32_t *extents,
612 if (!image_covers (src, extents, dest_x - src_x, dest_y - src_y))
618 if (!image_covers (mask, extents, dest_x - mask_x, dest_y - mask_y))
625 _pixman_run_fast_path (const pixman_fast_path_t *paths,
626 pixman_implementation_t * imp,
628 pixman_image_t * src,
629 pixman_image_t * mask,
630 pixman_image_t * dest,
640 pixman_composite_func_t func = NULL;
641 pixman_bool_t src_repeat =
642 src->common.repeat == PIXMAN_REPEAT_NORMAL;
643 pixman_bool_t mask_repeat =
644 mask && mask->common.repeat == PIXMAN_REPEAT_NORMAL;
645 pixman_bool_t result;
646 pixman_bool_t has_fast_path;
648 has_fast_path = !dest->common.alpha_map &&
649 !dest->bits.read_func &&
650 !dest->bits.write_func;
654 has_fast_path = (src->type == BITS || _pixman_image_is_solid (src)) &&
655 !src->common.transform &&
656 !src->common.alpha_map &&
657 src->common.filter != PIXMAN_FILTER_CONVOLUTION &&
658 src->common.repeat != PIXMAN_REPEAT_PAD &&
659 src->common.repeat != PIXMAN_REPEAT_REFLECT;
660 if (has_fast_path && src->type == BITS)
662 has_fast_path = !src->bits.read_func &&
663 !src->bits.write_func &&
664 !PIXMAN_FORMAT_IS_WIDE (src->bits.format);
668 if (mask && has_fast_path)
671 mask->type == BITS &&
672 !mask->common.transform &&
673 !mask->common.alpha_map &&
674 !mask->bits.read_func &&
675 !mask->bits.write_func &&
676 mask->common.filter != PIXMAN_FILTER_CONVOLUTION &&
677 mask->common.repeat != PIXMAN_REPEAT_PAD &&
678 mask->common.repeat != PIXMAN_REPEAT_REFLECT &&
679 !PIXMAN_FORMAT_IS_WIDE (mask->bits.format);
684 const pixman_fast_path_t *info;
685 pixman_bool_t pixbuf;
688 src && src->type == BITS &&
689 mask && mask->type == BITS &&
690 src->bits.bits == mask->bits.bits &&
693 !mask->common.component_alpha &&
696 info = get_fast_path (paths, op, src, mask, dest, pixbuf);
702 if (info->src_format == PIXMAN_solid)
705 if (info->mask_format == PIXMAN_solid ||
706 info->flags & NEED_SOLID_MASK)
712 src->bits.width == 1 &&
713 src->bits.height == 1) ||
715 mask->bits.width == 1 &&
716 mask->bits.height == 1))
718 /* If src or mask are repeating 1x1 images and src_repeat or
719 * mask_repeat are still TRUE, it means the fast path we
720 * selected does not actually handle repeating images.
722 * So rather than call the "fast path" with a zillion
723 * 1x1 requests, we just fall back to the general code (which
724 * does do something sensible with 1x1 repeating images).
735 pixman_region32_t region;
736 pixman_region32_init (®ion);
738 if (pixman_compute_composite_region32 (
739 ®ion, src, mask, dest,
740 src_x, src_y, mask_x, mask_y, dest_x, dest_y, width, height))
742 pixman_box32_t *extents = pixman_region32_extents (®ion);
746 src_x, src_y, mask_x, mask_y, dest_x, dest_y))
748 walk_region_internal (imp, op,
750 src_x, src_y, mask_x, mask_y,
753 src_repeat, mask_repeat,
760 pixman_region32_fini (®ion);
767 #define N_TMP_BOXES (16)
770 pixman_region16_copy_from_region32 (pixman_region16_t *dst,
771 pixman_region32_t *src)
774 pixman_box32_t *boxes32;
775 pixman_box16_t *boxes16;
776 pixman_bool_t retval;
778 boxes32 = pixman_region32_rectangles (src, &n_boxes);
780 boxes16 = pixman_malloc_ab (n_boxes, sizeof (pixman_box16_t));
785 for (i = 0; i < n_boxes; ++i)
787 boxes16[i].x1 = boxes32[i].x1;
788 boxes16[i].y1 = boxes32[i].y1;
789 boxes16[i].x2 = boxes32[i].x2;
790 boxes16[i].y2 = boxes32[i].y2;
793 pixman_region_fini (dst);
794 retval = pixman_region_init_rects (dst, boxes16, n_boxes);
800 pixman_region32_copy_from_region16 (pixman_region32_t *dst,
801 pixman_region16_t *src)
804 pixman_box16_t *boxes16;
805 pixman_box32_t *boxes32;
806 pixman_box32_t tmp_boxes[N_TMP_BOXES];
807 pixman_bool_t retval;
809 boxes16 = pixman_region_rectangles (src, &n_boxes);
811 if (n_boxes > N_TMP_BOXES)
812 boxes32 = pixman_malloc_ab (n_boxes, sizeof (pixman_box32_t));
819 for (i = 0; i < n_boxes; ++i)
821 boxes32[i].x1 = boxes16[i].x1;
822 boxes32[i].y1 = boxes16[i].y1;
823 boxes32[i].x2 = boxes16[i].x2;
824 boxes32[i].y2 = boxes16[i].y2;
827 pixman_region32_fini (dst);
828 retval = pixman_region32_init_rects (dst, boxes32, n_boxes);
830 if (boxes32 != tmp_boxes)