1 /* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */
3 * Copyright © 2000 SuSE, Inc.
4 * Copyright © 2007 Red Hat, Inc.
6 * Permission to use, copy, modify, distribute, and sell this software and its
7 * documentation for any purpose is hereby granted without fee, provided that
8 * the above copyright notice appear in all copies and that both that
9 * copyright notice and this permission notice appear in supporting
10 * documentation, and that the name of SuSE not be used in advertising or
11 * publicity pertaining to distribution of the software without specific,
12 * written prior permission. SuSE makes no representations about the
13 * suitability of this software for any purpose. It is provided "as is"
14 * without express or implied warranty.
16 * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
18 * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
20 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
21 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 * Author: Keith Packard, SuSE, Inc.
30 #include "pixman-private.h"
31 #include "pixman-combine32.h"
33 static force_inline uint32_t
36 if (((unsigned long)a) & 1)
38 #ifdef WORDS_BIGENDIAN
39 return (*a << 16) | (*(uint16_t *)(a + 1));
41 return *a | (*(uint16_t *)(a + 1) << 8);
46 #ifdef WORDS_BIGENDIAN
47 return (*(uint16_t *)a << 8) | *(a + 2);
49 return *(uint16_t *)a | (*(a + 2) << 16);
54 static force_inline void
58 if (((unsigned long)a) & 1)
60 #ifdef WORDS_BIGENDIAN
61 *a = (uint8_t) (v >> 16);
62 *(uint16_t *)(a + 1) = (uint16_t) (v);
65 *(uint16_t *)(a + 1) = (uint16_t) (v >> 8);
70 #ifdef WORDS_BIGENDIAN
71 *(uint16_t *)a = (uint16_t)(v >> 8);
72 *(a + 2) = (uint8_t)v;
74 *(uint16_t *)a = (uint16_t)v;
75 *(a + 2) = (uint8_t)(v >> 16);
80 static force_inline uint32_t
84 uint32_t a = ~src >> 24;
86 UN8x4_MUL_UN8_ADD_UN8x4 (dest, a, src);
108 fast_composite_over_x888_8_8888 (pixman_implementation_t *imp,
110 pixman_image_t * src_image,
111 pixman_image_t * mask_image,
112 pixman_image_t * dst_image,
122 uint32_t *src, *src_line;
123 uint32_t *dst, *dst_line;
124 uint8_t *mask, *mask_line;
125 int src_stride, mask_stride, dst_stride;
130 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
131 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
132 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
137 src_line += src_stride;
139 dst_line += dst_stride;
141 mask_line += mask_stride;
149 s = *src | 0xff000000;
158 *dst = over (d, *dst);
168 fast_composite_in_n_8_8 (pixman_implementation_t *imp,
170 pixman_image_t * src_image,
171 pixman_image_t * mask_image,
172 pixman_image_t * dest_image,
183 uint8_t *dst_line, *dst;
184 uint8_t *mask_line, *mask, m;
185 int dst_stride, mask_stride;
189 src = _pixman_image_get_solid (src_image, dest_image->bits.format);
193 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
194 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
201 dst_line += dst_stride;
203 mask_line += mask_stride;
213 *dst = MUL_UN8 (m, *dst, t);
224 dst_line += dst_stride;
226 mask_line += mask_stride;
232 m = MUL_UN8 (m, srca, t);
237 *dst = MUL_UN8 (m, *dst, t);
246 fast_composite_in_8_8 (pixman_implementation_t *imp,
248 pixman_image_t * src_image,
249 pixman_image_t * mask_image,
250 pixman_image_t * dest_image,
260 uint8_t *dst_line, *dst;
261 uint8_t *src_line, *src;
262 int dst_stride, src_stride;
267 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
268 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
273 dst_line += dst_stride;
275 src_line += src_stride;
285 *dst = MUL_UN8 (s, *dst, t);
293 fast_composite_over_n_8_8888 (pixman_implementation_t *imp,
295 pixman_image_t * src_image,
296 pixman_image_t * mask_image,
297 pixman_image_t * dst_image,
308 uint32_t *dst_line, *dst, d;
309 uint8_t *mask_line, *mask, m;
310 int dst_stride, mask_stride;
313 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
319 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
320 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
325 dst_line += dst_stride;
327 mask_line += mask_stride;
338 *dst = over (src, *dst);
343 *dst = over (d, *dst);
351 fast_composite_add_n_8888_8888_ca (pixman_implementation_t *imp,
353 pixman_image_t * src_image,
354 pixman_image_t * mask_image,
355 pixman_image_t * dst_image,
365 uint32_t src, srca, s;
366 uint32_t *dst_line, *dst, d;
367 uint32_t *mask_line, *mask, ma;
368 int dst_stride, mask_stride;
371 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
377 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
378 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
383 dst_line += dst_stride;
385 mask_line += mask_stride;
392 if (ma == 0xffffffff && srca == 0xff)
401 UN8x4_MUL_UN8x4_ADD_UN8x4 (s, ma, d);
412 fast_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
414 pixman_image_t * src_image,
415 pixman_image_t * mask_image,
416 pixman_image_t * dst_image,
426 uint32_t src, srca, s;
427 uint32_t *dst_line, *dst, d;
428 uint32_t *mask_line, *mask, ma;
429 int dst_stride, mask_stride;
432 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
438 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
439 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
444 dst_line += dst_stride;
446 mask_line += mask_stride;
452 if (ma == 0xffffffff)
457 *dst = over (src, *dst);
464 UN8x4_MUL_UN8x4 (s, ma);
465 UN8x4_MUL_UN8 (ma, srca);
467 UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s);
478 fast_composite_over_n_8_0888 (pixman_implementation_t *imp,
480 pixman_image_t * src_image,
481 pixman_image_t * mask_image,
482 pixman_image_t * dst_image,
493 uint8_t *dst_line, *dst;
495 uint8_t *mask_line, *mask, m;
496 int dst_stride, mask_stride;
499 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
505 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3);
506 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
511 dst_line += dst_stride;
513 mask_line += mask_stride;
534 d = over (in (src, m), fetch_24 (dst));
543 fast_composite_over_n_8_0565 (pixman_implementation_t *imp,
545 pixman_image_t * src_image,
546 pixman_image_t * mask_image,
547 pixman_image_t * dst_image,
558 uint16_t *dst_line, *dst;
560 uint8_t *mask_line, *mask, m;
561 int dst_stride, mask_stride;
564 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
570 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
571 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
576 dst_line += dst_stride;
578 mask_line += mask_stride;
593 d = over (src, CONVERT_0565_TO_0888 (d));
595 *dst = CONVERT_8888_TO_0565 (d);
600 d = over (in (src, m), CONVERT_0565_TO_0888 (d));
601 *dst = CONVERT_8888_TO_0565 (d);
609 fast_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
611 pixman_image_t * src_image,
612 pixman_image_t * mask_image,
613 pixman_image_t * dst_image,
623 uint32_t src, srca, s;
625 uint16_t *dst_line, *dst;
627 uint32_t *mask_line, *mask, ma;
628 int dst_stride, mask_stride;
631 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
637 src16 = CONVERT_8888_TO_0565 (src);
639 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
640 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
645 dst_line += dst_stride;
647 mask_line += mask_stride;
653 if (ma == 0xffffffff)
662 d = over (src, CONVERT_0565_TO_0888 (d));
663 *dst = CONVERT_8888_TO_0565 (d);
669 d = CONVERT_0565_TO_0888 (d);
673 UN8x4_MUL_UN8x4 (s, ma);
674 UN8x4_MUL_UN8 (ma, srca);
676 UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s);
678 *dst = CONVERT_8888_TO_0565 (d);
686 fast_composite_over_8888_8888 (pixman_implementation_t *imp,
688 pixman_image_t * src_image,
689 pixman_image_t * mask_image,
690 pixman_image_t * dst_image,
700 uint32_t *dst_line, *dst;
701 uint32_t *src_line, *src, s;
702 int dst_stride, src_stride;
706 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
707 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
712 dst_line += dst_stride;
714 src_line += src_stride;
724 *dst = over (s, *dst);
731 fast_composite_over_8888_0888 (pixman_implementation_t *imp,
733 pixman_image_t * src_image,
734 pixman_image_t * mask_image,
735 pixman_image_t * dst_image,
745 uint8_t *dst_line, *dst;
747 uint32_t *src_line, *src, s;
749 int dst_stride, src_stride;
752 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3);
753 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
758 dst_line += dst_stride;
760 src_line += src_stride;
772 d = over (s, fetch_24 (dst));
782 fast_composite_over_8888_0565 (pixman_implementation_t *imp,
784 pixman_image_t * src_image,
785 pixman_image_t * mask_image,
786 pixman_image_t * dst_image,
796 uint16_t *dst_line, *dst;
798 uint32_t *src_line, *src, s;
800 int dst_stride, src_stride;
803 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
804 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
809 dst_line += dst_stride;
811 src_line += src_stride;
827 d = over (s, CONVERT_0565_TO_0888 (d));
829 *dst = CONVERT_8888_TO_0565 (d);
837 fast_composite_src_x888_0565 (pixman_implementation_t *imp,
839 pixman_image_t * src_image,
840 pixman_image_t * mask_image,
841 pixman_image_t * dst_image,
851 uint16_t *dst_line, *dst;
852 uint32_t *src_line, *src, s;
853 int dst_stride, src_stride;
856 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
857 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
862 dst_line += dst_stride;
864 src_line += src_stride;
870 *dst = CONVERT_8888_TO_0565 (s);
877 fast_composite_add_8000_8000 (pixman_implementation_t *imp,
879 pixman_image_t * src_image,
880 pixman_image_t * mask_image,
881 pixman_image_t * dst_image,
891 uint8_t *dst_line, *dst;
892 uint8_t *src_line, *src;
893 int dst_stride, src_stride;
898 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
899 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
904 dst_line += dst_stride;
906 src_line += src_stride;
918 s = t | (0 - (t >> 8));
928 fast_composite_add_8888_8888 (pixman_implementation_t *imp,
930 pixman_image_t * src_image,
931 pixman_image_t * mask_image,
932 pixman_image_t * dst_image,
942 uint32_t *dst_line, *dst;
943 uint32_t *src_line, *src;
944 int dst_stride, src_stride;
948 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
949 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
954 dst_line += dst_stride;
956 src_line += src_stride;
968 UN8x4_ADD_UN8x4 (s, d);
978 fast_composite_add_n_8_8 (pixman_implementation_t *imp,
980 pixman_image_t * src_image,
981 pixman_image_t * mask_image,
982 pixman_image_t * dst_image,
992 uint8_t *dst_line, *dst;
993 uint8_t *mask_line, *mask;
994 int dst_stride, mask_stride;
999 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
1000 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
1001 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
1007 dst_line += dst_stride;
1009 mask_line += mask_stride;
1022 m = MUL_UN8 (sa, a, tmp);
1023 r = ADD_UN8 (m, d, tmp);
1035 fast_composite_solid_fill (pixman_implementation_t *imp,
1037 pixman_image_t * src_image,
1038 pixman_image_t * mask_image,
1039 pixman_image_t * dst_image,
1051 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
1053 if (dst_image->bits.format == PIXMAN_a8)
1057 else if (dst_image->bits.format == PIXMAN_r5g6b5 ||
1058 dst_image->bits.format == PIXMAN_b5g6r5)
1060 src = CONVERT_8888_TO_0565 (src);
1063 pixman_fill (dst_image->bits.bits, dst_image->bits.rowstride,
1064 PIXMAN_FORMAT_BPP (dst_image->bits.format),
1071 fast_composite_src_8888_x888 (pixman_implementation_t *imp,
1073 pixman_image_t * src_image,
1074 pixman_image_t * mask_image,
1075 pixman_image_t * dst_image,
1087 int dst_stride, src_stride;
1088 uint32_t n_bytes = width * sizeof (uint32_t);
1090 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src, 1);
1091 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst, 1);
1095 memcpy (dst, src, n_bytes);
1102 static const pixman_fast_path_t c_fast_paths[] =
1104 { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r5g6b5, fast_composite_over_n_8_0565, 0 },
1105 { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_b5g6r5, fast_composite_over_n_8_0565, 0 },
1106 { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_r8g8b8, fast_composite_over_n_8_0888, 0 },
1107 { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_b8g8r8, fast_composite_over_n_8_0888, 0 },
1108 { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8r8g8b8, fast_composite_over_n_8_8888, 0 },
1109 { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8r8g8b8, fast_composite_over_n_8_8888, 0 },
1110 { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8b8g8r8, fast_composite_over_n_8_8888, 0 },
1111 { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8, PIXMAN_x8b8g8r8, fast_composite_over_n_8_8888, 0 },
1112 { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, fast_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
1113 { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, fast_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
1114 { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5, fast_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
1115 { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, fast_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
1116 { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, fast_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
1117 { PIXMAN_OP_OVER, PIXMAN_solid, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5, fast_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
1118 { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_x8r8g8b8, fast_composite_over_x888_8_8888, 0 },
1119 { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8, PIXMAN_a8r8g8b8, fast_composite_over_x888_8_8888, 0 },
1120 { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_x8b8g8r8, fast_composite_over_x888_8_8888, 0 },
1121 { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8, PIXMAN_a8b8g8r8, fast_composite_over_x888_8_8888, 0 },
1122 { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, fast_composite_over_8888_8888, 0 },
1123 { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, fast_composite_over_8888_8888, 0 },
1124 { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, fast_composite_over_8888_0565, 0 },
1125 { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, fast_composite_over_8888_8888, 0 },
1126 { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, fast_composite_over_8888_8888, 0 },
1127 { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, fast_composite_over_8888_0565, 0 },
1128 { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_a8r8g8b8, fast_composite_add_8888_8888, 0 },
1129 { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_a8b8g8r8, fast_composite_add_8888_8888, 0 },
1130 { PIXMAN_OP_ADD, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, fast_composite_add_8000_8000, 0 },
1131 { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, fast_composite_add_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
1132 { PIXMAN_OP_ADD, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, fast_composite_add_n_8_8, 0 },
1133 { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_a8r8g8b8, fast_composite_solid_fill, 0 },
1134 { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_x8r8g8b8, fast_composite_solid_fill, 0 },
1135 { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_a8b8g8r8, fast_composite_solid_fill, 0 },
1136 { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_x8b8g8r8, fast_composite_solid_fill, 0 },
1137 { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_a8, fast_composite_solid_fill, 0 },
1138 { PIXMAN_OP_SRC, PIXMAN_solid, PIXMAN_null, PIXMAN_r5g6b5, fast_composite_solid_fill, 0 },
1139 { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, fast_composite_src_8888_x888, 0 },
1140 { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_x8r8g8b8, fast_composite_src_8888_x888, 0 },
1141 { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, fast_composite_src_8888_x888, 0 },
1142 { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_x8b8g8r8, fast_composite_src_8888_x888, 0 },
1143 { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, fast_composite_src_x888_0565, 0 },
1144 { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8, PIXMAN_null, PIXMAN_r5g6b5, fast_composite_src_x888_0565, 0 },
1145 { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, fast_composite_src_x888_0565, 0 },
1146 { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8, PIXMAN_null, PIXMAN_b5g6r5, fast_composite_src_x888_0565, 0 },
1147 { PIXMAN_OP_IN, PIXMAN_a8, PIXMAN_null, PIXMAN_a8, fast_composite_in_8_8, 0 },
1148 { PIXMAN_OP_IN, PIXMAN_solid, PIXMAN_a8, PIXMAN_a8, fast_composite_in_n_8_8, 0 },
1153 fast_composite_src_scale_nearest (pixman_implementation_t *imp,
1155 pixman_image_t * src_image,
1156 pixman_image_t * mask_image,
1157 pixman_image_t * dst_image,
1169 int dst_stride, src_stride;
1173 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst, 1);
1174 /* pass in 0 instead of src_x and src_y because src_x and src_y need to be
1175 * transformed from destination space to source space */
1176 PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, uint32_t, src_stride, src, 1);
1178 /* reference point is the center of the pixel */
1179 v.vector[0] = pixman_int_to_fixed (src_x) + pixman_fixed_1 / 2;
1180 v.vector[1] = pixman_int_to_fixed (src_y) + pixman_fixed_1 / 2;
1181 v.vector[2] = pixman_fixed_1;
1183 if (!pixman_transform_point_3d (src_image->common.transform, &v))
1186 /* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */
1187 v.vector[0] -= pixman_fixed_e;
1188 v.vector[1] -= pixman_fixed_e;
1190 for (j = 0; j < height; j++)
1192 pixman_fixed_t vx = v.vector[0];
1193 pixman_fixed_t vy = v.vector[1];
1195 for (i = 0; i < width; ++i)
1197 pixman_bool_t inside_bounds;
1203 /* apply the repeat function */
1204 switch (src_image->common.repeat)
1206 case PIXMAN_REPEAT_NORMAL:
1207 x = MOD (x, src_image->bits.width);
1208 y = MOD (y, src_image->bits.height);
1209 inside_bounds = TRUE;
1212 case PIXMAN_REPEAT_PAD:
1213 x = CLIP (x, 0, src_image->bits.width - 1);
1214 y = CLIP (y, 0, src_image->bits.height - 1);
1215 inside_bounds = TRUE;
1218 case PIXMAN_REPEAT_REFLECT:
1219 x = MOD (x, src_image->bits.width * 2);
1220 if (x >= src_image->bits.width)
1221 x = src_image->bits.width * 2 - x - 1;
1222 y = MOD (y, src_image->bits.height * 2);
1223 if (y >= src_image->bits.height)
1224 y = src_image->bits.height * 2 - y - 1;
1225 inside_bounds = TRUE;
1228 case PIXMAN_REPEAT_NONE:
1232 x < src_image->bits.width &&
1234 y < src_image->bits.height);
1240 /* XXX: we should move this multiplication out of the loop */
1241 result = *(src + y * src_stride + x);
1247 *(dst + i) = result;
1249 /* adjust the x location by a unit vector in the x direction:
1250 * this is equivalent to transforming x+1 of the destination
1251 * point to source space
1253 vx += src_image->common.transform->matrix[0][0];
1255 /* adjust the y location by a unit vector in the y direction
1256 * this is equivalent to transforming y+1 of the destination point
1259 v.vector[1] += src_image->common.transform->matrix[1][1];
1265 fast_path_composite (pixman_implementation_t *imp,
1267 pixman_image_t * src,
1268 pixman_image_t * mask,
1269 pixman_image_t * dest,
1279 if (src->type == BITS
1280 && src->common.transform
1282 && op == PIXMAN_OP_SRC
1283 && !src->common.alpha_map && !dest->common.alpha_map
1284 && (src->common.filter == PIXMAN_FILTER_NEAREST)
1285 && PIXMAN_FORMAT_BPP (dest->bits.format) == 32
1286 && src->bits.format == dest->bits.format
1287 && !src->bits.read_func && !src->bits.write_func
1288 && !dest->bits.read_func && !dest->bits.write_func)
1290 /* ensure that the transform matrix only has a scale */
1291 if (src->common.transform->matrix[0][1] == 0 &&
1292 src->common.transform->matrix[1][0] == 0 &&
1293 src->common.transform->matrix[2][0] == 0 &&
1294 src->common.transform->matrix[2][1] == 0 &&
1295 src->common.transform->matrix[2][2] == pixman_fixed_1)
1297 _pixman_walk_composite_region (imp, op,
1303 fast_composite_src_scale_nearest);
1308 if (_pixman_run_fast_path (c_fast_paths, imp,
1309 op, src, mask, dest,
1318 _pixman_implementation_composite (imp->delegate, op,
1327 pixman_fill8 (uint32_t *bits,
1335 int byte_stride = stride * (int) sizeof (uint32_t);
1336 uint8_t *dst = (uint8_t *) bits;
1337 uint8_t v = xor & 0xff;
1340 dst = dst + y * byte_stride + x;
1344 for (i = 0; i < width; ++i)
1352 pixman_fill16 (uint32_t *bits,
1361 (stride * (int)sizeof (uint32_t)) / (int)sizeof (uint16_t);
1362 uint16_t *dst = (uint16_t *)bits;
1363 uint16_t v = xor & 0xffff;
1366 dst = dst + y * short_stride + x;
1370 for (i = 0; i < width; ++i)
1373 dst += short_stride;
1378 pixman_fill32 (uint32_t *bits,
1388 bits = bits + y * stride + x;
1392 for (i = 0; i < width; ++i)
1399 static pixman_bool_t
1400 fast_path_fill (pixman_implementation_t *imp,
1413 pixman_fill8 (bits, stride, x, y, width, height, xor);
1417 pixman_fill16 (bits, stride, x, y, width, height, xor);
1421 pixman_fill32 (bits, stride, x, y, width, height, xor);
1425 return _pixman_implementation_fill (
1426 imp->delegate, bits, stride, bpp, x, y, width, height, xor);
1433 pixman_implementation_t *
1434 _pixman_implementation_create_fast_path (void)
1436 pixman_implementation_t *general = _pixman_implementation_create_general ();
1437 pixman_implementation_t *imp = _pixman_implementation_create (general);
1439 imp->composite = fast_path_composite;
1440 imp->fill = fast_path_fill;