1 /* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */
3 * Copyright © 2000 SuSE, Inc.
4 * Copyright © 2007 Red Hat, Inc.
6 * Permission to use, copy, modify, distribute, and sell this software and its
7 * documentation for any purpose is hereby granted without fee, provided that
8 * the above copyright notice appear in all copies and that both that
9 * copyright notice and this permission notice appear in supporting
10 * documentation, and that the name of SuSE not be used in advertising or
11 * publicity pertaining to distribution of the software without specific,
12 * written prior permission. SuSE makes no representations about the
13 * suitability of this software for any purpose. It is provided "as is"
14 * without express or implied warranty.
16 * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
18 * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
20 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
21 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 * Author: Keith Packard, SuSE, Inc.
31 #include "pixman-private.h"
32 #include "pixman-combine32.h"
33 #include "pixman-inlines.h"
35 static force_inline uint32_t
38 if (((unsigned long)a) & 1)
40 #ifdef WORDS_BIGENDIAN
41 return (*a << 16) | (*(uint16_t *)(a + 1));
43 return *a | (*(uint16_t *)(a + 1) << 8);
48 #ifdef WORDS_BIGENDIAN
49 return (*(uint16_t *)a << 8) | *(a + 2);
51 return *(uint16_t *)a | (*(a + 2) << 16);
56 static force_inline void
60 if (((unsigned long)a) & 1)
62 #ifdef WORDS_BIGENDIAN
63 *a = (uint8_t) (v >> 16);
64 *(uint16_t *)(a + 1) = (uint16_t) (v);
67 *(uint16_t *)(a + 1) = (uint16_t) (v >> 8);
72 #ifdef WORDS_BIGENDIAN
73 *(uint16_t *)a = (uint16_t)(v >> 8);
74 *(a + 2) = (uint8_t)v;
76 *(uint16_t *)a = (uint16_t)v;
77 *(a + 2) = (uint8_t)(v >> 16);
82 static force_inline uint32_t
86 uint32_t a = ~src >> 24;
88 UN8x4_MUL_UN8_ADD_UN8x4 (dest, a, src);
110 fast_composite_over_x888_8_8888 (pixman_implementation_t *imp,
111 pixman_composite_info_t *info)
113 PIXMAN_COMPOSITE_ARGS (info);
114 uint32_t *src, *src_line;
115 uint32_t *dst, *dst_line;
116 uint8_t *mask, *mask_line;
117 int src_stride, mask_stride, dst_stride;
122 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
123 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
124 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
129 src_line += src_stride;
131 dst_line += dst_stride;
133 mask_line += mask_stride;
141 s = *src | 0xff000000;
150 *dst = over (d, *dst);
160 fast_composite_in_n_8_8 (pixman_implementation_t *imp,
161 pixman_composite_info_t *info)
163 PIXMAN_COMPOSITE_ARGS (info);
165 uint8_t *dst_line, *dst;
166 uint8_t *mask_line, *mask, m;
167 int dst_stride, mask_stride;
171 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
175 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
176 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
183 dst_line += dst_stride;
185 mask_line += mask_stride;
195 *dst = MUL_UN8 (m, *dst, t);
206 dst_line += dst_stride;
208 mask_line += mask_stride;
214 m = MUL_UN8 (m, srca, t);
219 *dst = MUL_UN8 (m, *dst, t);
228 fast_composite_in_8_8 (pixman_implementation_t *imp,
229 pixman_composite_info_t *info)
231 PIXMAN_COMPOSITE_ARGS (info);
232 uint8_t *dst_line, *dst;
233 uint8_t *src_line, *src;
234 int dst_stride, src_stride;
239 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
240 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
245 dst_line += dst_stride;
247 src_line += src_stride;
257 *dst = MUL_UN8 (s, *dst, t);
265 fast_composite_over_n_8_8888 (pixman_implementation_t *imp,
266 pixman_composite_info_t *info)
268 PIXMAN_COMPOSITE_ARGS (info);
270 uint32_t *dst_line, *dst, d;
271 uint8_t *mask_line, *mask, m;
272 int dst_stride, mask_stride;
275 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
281 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
282 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
287 dst_line += dst_stride;
289 mask_line += mask_stride;
300 *dst = over (src, *dst);
305 *dst = over (d, *dst);
313 fast_composite_add_n_8888_8888_ca (pixman_implementation_t *imp,
314 pixman_composite_info_t *info)
316 PIXMAN_COMPOSITE_ARGS (info);
318 uint32_t *dst_line, *dst, d;
319 uint32_t *mask_line, *mask, ma;
320 int dst_stride, mask_stride;
323 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
328 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
329 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
334 dst_line += dst_stride;
336 mask_line += mask_stride;
348 UN8x4_MUL_UN8x4_ADD_UN8x4 (s, ma, d);
359 fast_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
360 pixman_composite_info_t *info)
362 PIXMAN_COMPOSITE_ARGS (info);
363 uint32_t src, srca, s;
364 uint32_t *dst_line, *dst, d;
365 uint32_t *mask_line, *mask, ma;
366 int dst_stride, mask_stride;
369 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
375 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
376 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
381 dst_line += dst_stride;
383 mask_line += mask_stride;
389 if (ma == 0xffffffff)
394 *dst = over (src, *dst);
401 UN8x4_MUL_UN8x4 (s, ma);
402 UN8x4_MUL_UN8 (ma, srca);
404 UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s);
415 fast_composite_over_n_8_0888 (pixman_implementation_t *imp,
416 pixman_composite_info_t *info)
418 PIXMAN_COMPOSITE_ARGS (info);
420 uint8_t *dst_line, *dst;
422 uint8_t *mask_line, *mask, m;
423 int dst_stride, mask_stride;
426 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
432 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3);
433 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
438 dst_line += dst_stride;
440 mask_line += mask_stride;
461 d = over (in (src, m), fetch_24 (dst));
470 fast_composite_over_n_8_0565 (pixman_implementation_t *imp,
471 pixman_composite_info_t *info)
473 PIXMAN_COMPOSITE_ARGS (info);
475 uint16_t *dst_line, *dst;
477 uint8_t *mask_line, *mask, m;
478 int dst_stride, mask_stride;
481 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
487 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
488 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
493 dst_line += dst_stride;
495 mask_line += mask_stride;
510 d = over (src, CONVERT_0565_TO_0888 (d));
512 *dst = CONVERT_8888_TO_0565 (d);
517 d = over (in (src, m), CONVERT_0565_TO_0888 (d));
518 *dst = CONVERT_8888_TO_0565 (d);
526 fast_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
527 pixman_composite_info_t *info)
529 PIXMAN_COMPOSITE_ARGS (info);
530 uint32_t src, srca, s;
532 uint16_t *dst_line, *dst;
534 uint32_t *mask_line, *mask, ma;
535 int dst_stride, mask_stride;
538 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
544 src16 = CONVERT_8888_TO_0565 (src);
546 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
547 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
552 dst_line += dst_stride;
554 mask_line += mask_stride;
560 if (ma == 0xffffffff)
569 d = over (src, CONVERT_0565_TO_0888 (d));
570 *dst = CONVERT_8888_TO_0565 (d);
576 d = CONVERT_0565_TO_0888 (d);
580 UN8x4_MUL_UN8x4 (s, ma);
581 UN8x4_MUL_UN8 (ma, srca);
583 UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s);
585 *dst = CONVERT_8888_TO_0565 (d);
593 fast_composite_over_8888_8888 (pixman_implementation_t *imp,
594 pixman_composite_info_t *info)
596 PIXMAN_COMPOSITE_ARGS (info);
597 uint32_t *dst_line, *dst;
598 uint32_t *src_line, *src, s;
599 int dst_stride, src_stride;
603 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
604 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
609 dst_line += dst_stride;
611 src_line += src_stride;
621 *dst = over (s, *dst);
628 fast_composite_src_x888_8888 (pixman_implementation_t *imp,
629 pixman_composite_info_t *info)
631 PIXMAN_COMPOSITE_ARGS (info);
632 uint32_t *dst_line, *dst;
633 uint32_t *src_line, *src;
634 int dst_stride, src_stride;
637 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
638 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
643 dst_line += dst_stride;
645 src_line += src_stride;
649 *dst++ = (*src++) | 0xff000000;
655 fast_composite_over_8888_0888 (pixman_implementation_t *imp,
656 pixman_composite_info_t *info)
658 PIXMAN_COMPOSITE_ARGS (info);
659 uint8_t *dst_line, *dst;
661 uint32_t *src_line, *src, s;
663 int dst_stride, src_stride;
666 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3);
667 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
672 dst_line += dst_stride;
674 src_line += src_stride;
686 d = over (s, fetch_24 (dst));
697 fast_composite_over_8888_0565 (pixman_implementation_t *imp,
698 pixman_composite_info_t *info)
700 PIXMAN_COMPOSITE_ARGS (info);
701 uint16_t *dst_line, *dst;
703 uint32_t *src_line, *src, s;
705 int dst_stride, src_stride;
708 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
709 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
714 dst_line += dst_stride;
716 src_line += src_stride;
732 d = over (s, CONVERT_0565_TO_0888 (d));
734 *dst = CONVERT_8888_TO_0565 (d);
742 fast_composite_src_x888_0565 (pixman_implementation_t *imp,
743 pixman_composite_info_t *info)
745 PIXMAN_COMPOSITE_ARGS (info);
746 uint16_t *dst_line, *dst;
747 uint32_t *src_line, *src, s;
748 int dst_stride, src_stride;
751 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
752 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
757 dst_line += dst_stride;
759 src_line += src_stride;
765 *dst = CONVERT_8888_TO_0565 (s);
772 fast_composite_add_8_8 (pixman_implementation_t *imp,
773 pixman_composite_info_t *info)
775 PIXMAN_COMPOSITE_ARGS (info);
776 uint8_t *dst_line, *dst;
777 uint8_t *src_line, *src;
778 int dst_stride, src_stride;
783 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
784 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
789 dst_line += dst_stride;
791 src_line += src_stride;
803 s = t | (0 - (t >> 8));
813 fast_composite_add_8888_8888 (pixman_implementation_t *imp,
814 pixman_composite_info_t *info)
816 PIXMAN_COMPOSITE_ARGS (info);
817 uint32_t *dst_line, *dst;
818 uint32_t *src_line, *src;
819 int dst_stride, src_stride;
823 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
824 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
829 dst_line += dst_stride;
831 src_line += src_stride;
843 UN8x4_ADD_UN8x4 (s, d);
853 fast_composite_add_n_8_8 (pixman_implementation_t *imp,
854 pixman_composite_info_t *info)
856 PIXMAN_COMPOSITE_ARGS (info);
857 uint8_t *dst_line, *dst;
858 uint8_t *mask_line, *mask;
859 int dst_stride, mask_stride;
864 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
865 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
866 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
872 dst_line += dst_stride;
874 mask_line += mask_stride;
887 m = MUL_UN8 (sa, a, tmp);
888 r = ADD_UN8 (m, d, tmp);
895 #ifdef WORDS_BIGENDIAN
896 #define CREATE_BITMASK(n) (0x80000000 >> (n))
897 #define UPDATE_BITMASK(n) ((n) >> 1)
899 #define CREATE_BITMASK(n) (1 << (n))
900 #define UPDATE_BITMASK(n) ((n) << 1)
903 #define TEST_BIT(p, n) \
904 (*((p) + ((n) >> 5)) & CREATE_BITMASK ((n) & 31))
905 #define SET_BIT(p, n) \
906 do { *((p) + ((n) >> 5)) |= CREATE_BITMASK ((n) & 31); } while (0);
909 fast_composite_add_1000_1000 (pixman_implementation_t *imp,
910 pixman_composite_info_t *info)
912 PIXMAN_COMPOSITE_ARGS (info);
913 uint32_t *dst_line, *dst;
914 uint32_t *src_line, *src;
915 int dst_stride, src_stride;
918 PIXMAN_IMAGE_GET_LINE (src_image, 0, src_y, uint32_t,
919 src_stride, src_line, 1);
920 PIXMAN_IMAGE_GET_LINE (dest_image, 0, dest_y, uint32_t,
921 dst_stride, dst_line, 1);
926 dst_line += dst_stride;
928 src_line += src_stride;
934 * TODO: improve performance by processing uint32_t data instead
937 if (TEST_BIT (src, src_x + w))
938 SET_BIT (dst, dest_x + w);
944 fast_composite_over_n_1_8888 (pixman_implementation_t *imp,
945 pixman_composite_info_t *info)
947 PIXMAN_COMPOSITE_ARGS (info);
949 uint32_t *dst, *dst_line;
950 uint32_t *mask, *mask_line;
951 int mask_stride, dst_stride;
952 uint32_t bitcache, bitmask;
958 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
963 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t,
964 dst_stride, dst_line, 1);
965 PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t,
966 mask_stride, mask_line, 1);
967 mask_line += mask_x >> 5;
974 dst_line += dst_stride;
976 mask_line += mask_stride;
980 bitmask = CREATE_BITMASK (mask_x & 31);
987 bitmask = CREATE_BITMASK (0);
989 if (bitcache & bitmask)
991 bitmask = UPDATE_BITMASK (bitmask);
1001 dst_line += dst_stride;
1003 mask_line += mask_stride;
1007 bitmask = CREATE_BITMASK (mask_x & 31);
1014 bitmask = CREATE_BITMASK (0);
1016 if (bitcache & bitmask)
1017 *dst = over (src, *dst);
1018 bitmask = UPDATE_BITMASK (bitmask);
1026 fast_composite_over_n_1_0565 (pixman_implementation_t *imp,
1027 pixman_composite_info_t *info)
1029 PIXMAN_COMPOSITE_ARGS (info);
1031 uint16_t *dst, *dst_line;
1032 uint32_t *mask, *mask_line;
1033 int mask_stride, dst_stride;
1034 uint32_t bitcache, bitmask;
1042 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
1047 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t,
1048 dst_stride, dst_line, 1);
1049 PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t,
1050 mask_stride, mask_line, 1);
1051 mask_line += mask_x >> 5;
1055 src565 = CONVERT_8888_TO_0565 (src);
1059 dst_line += dst_stride;
1061 mask_line += mask_stride;
1065 bitmask = CREATE_BITMASK (mask_x & 31);
1072 bitmask = CREATE_BITMASK (0);
1074 if (bitcache & bitmask)
1076 bitmask = UPDATE_BITMASK (bitmask);
1086 dst_line += dst_stride;
1088 mask_line += mask_stride;
1092 bitmask = CREATE_BITMASK (mask_x & 31);
1099 bitmask = CREATE_BITMASK (0);
1101 if (bitcache & bitmask)
1103 d = over (src, CONVERT_0565_TO_0888 (*dst));
1104 *dst = CONVERT_8888_TO_0565 (d);
1106 bitmask = UPDATE_BITMASK (bitmask);
1118 fast_composite_solid_fill (pixman_implementation_t *imp,
1119 pixman_composite_info_t *info)
1121 PIXMAN_COMPOSITE_ARGS (info);
1124 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
1126 if (dest_image->bits.format == PIXMAN_a1)
1130 else if (dest_image->bits.format == PIXMAN_a8)
1134 else if (dest_image->bits.format == PIXMAN_r5g6b5 ||
1135 dest_image->bits.format == PIXMAN_b5g6r5)
1137 src = CONVERT_8888_TO_0565 (src);
1140 pixman_fill (dest_image->bits.bits, dest_image->bits.rowstride,
1141 PIXMAN_FORMAT_BPP (dest_image->bits.format),
1148 fast_composite_src_memcpy (pixman_implementation_t *imp,
1149 pixman_composite_info_t *info)
1151 PIXMAN_COMPOSITE_ARGS (info);
1152 int bpp = PIXMAN_FORMAT_BPP (dest_image->bits.format) / 8;
1153 uint32_t n_bytes = width * bpp;
1154 int dst_stride, src_stride;
1158 src_stride = src_image->bits.rowstride * 4;
1159 dst_stride = dest_image->bits.rowstride * 4;
1161 src = (uint8_t *)src_image->bits.bits + src_y * src_stride + src_x * bpp;
1162 dst = (uint8_t *)dest_image->bits.bits + dest_y * dst_stride + dest_x * bpp;
1166 memcpy (dst, src, n_bytes);
1173 FAST_NEAREST (8888_8888_cover, 8888, 8888, uint32_t, uint32_t, SRC, COVER)
1174 FAST_NEAREST (8888_8888_none, 8888, 8888, uint32_t, uint32_t, SRC, NONE)
1175 FAST_NEAREST (8888_8888_pad, 8888, 8888, uint32_t, uint32_t, SRC, PAD)
1176 FAST_NEAREST (8888_8888_normal, 8888, 8888, uint32_t, uint32_t, SRC, NORMAL)
1177 FAST_NEAREST (x888_8888_cover, x888, 8888, uint32_t, uint32_t, SRC, COVER)
1178 FAST_NEAREST (x888_8888_pad, x888, 8888, uint32_t, uint32_t, SRC, PAD)
1179 FAST_NEAREST (x888_8888_normal, x888, 8888, uint32_t, uint32_t, SRC, NORMAL)
1180 FAST_NEAREST (8888_8888_cover, 8888, 8888, uint32_t, uint32_t, OVER, COVER)
1181 FAST_NEAREST (8888_8888_none, 8888, 8888, uint32_t, uint32_t, OVER, NONE)
1182 FAST_NEAREST (8888_8888_pad, 8888, 8888, uint32_t, uint32_t, OVER, PAD)
1183 FAST_NEAREST (8888_8888_normal, 8888, 8888, uint32_t, uint32_t, OVER, NORMAL)
1184 FAST_NEAREST (8888_565_cover, 8888, 0565, uint32_t, uint16_t, SRC, COVER)
1185 FAST_NEAREST (8888_565_none, 8888, 0565, uint32_t, uint16_t, SRC, NONE)
1186 FAST_NEAREST (8888_565_pad, 8888, 0565, uint32_t, uint16_t, SRC, PAD)
1187 FAST_NEAREST (8888_565_normal, 8888, 0565, uint32_t, uint16_t, SRC, NORMAL)
1188 FAST_NEAREST (565_565_normal, 0565, 0565, uint16_t, uint16_t, SRC, NORMAL)
1189 FAST_NEAREST (8888_565_cover, 8888, 0565, uint32_t, uint16_t, OVER, COVER)
1190 FAST_NEAREST (8888_565_none, 8888, 0565, uint32_t, uint16_t, OVER, NONE)
1191 FAST_NEAREST (8888_565_pad, 8888, 0565, uint32_t, uint16_t, OVER, PAD)
1192 FAST_NEAREST (8888_565_normal, 8888, 0565, uint32_t, uint16_t, OVER, NORMAL)
1194 #define REPEAT_MIN_WIDTH 32
1197 fast_composite_tiled_repeat (pixman_implementation_t *imp,
1198 pixman_composite_info_t *info)
1200 PIXMAN_COMPOSITE_ARGS (info);
1201 pixman_composite_func_t func;
1202 pixman_format_code_t mask_format;
1203 uint32_t src_flags, mask_flags;
1205 src_flags = (info->src_flags & ~FAST_PATH_NORMAL_REPEAT) |
1206 FAST_PATH_SAMPLES_COVER_CLIP_NEAREST;
1210 mask_format = mask_image->common.extended_format_code;
1211 mask_flags = info->mask_flags;
1215 mask_format = PIXMAN_null;
1216 mask_flags = FAST_PATH_IS_OPAQUE;
1219 if (_pixman_lookup_composite_function (
1220 imp->toplevel, info->op,
1221 src_image->common.extended_format_code, src_flags,
1222 mask_format, mask_flags,
1223 dest_image->common.extended_format_code, info->dest_flags,
1227 int32_t width_remain;
1231 pixman_image_t extended_src_image;
1232 uint32_t extended_src[REPEAT_MIN_WIDTH * 2];
1233 pixman_bool_t need_src_extension;
1237 pixman_composite_info_t info2 = *info;
1239 src_bpp = PIXMAN_FORMAT_BPP (src_image->bits.format);
1241 if (src_image->bits.width < REPEAT_MIN_WIDTH &&
1242 (src_bpp == 32 || src_bpp == 16 || src_bpp == 8))
1245 sx = MOD (sx, src_image->bits.width);
1249 while (src_width < REPEAT_MIN_WIDTH && src_width <= sx)
1250 src_width += src_image->bits.width;
1252 src_stride = (src_width * (src_bpp >> 3) + 3) / (int) sizeof (uint32_t);
1254 /* Initialize/validate stack-allocated temporary image */
1255 _pixman_bits_image_init (&extended_src_image, src_image->bits.format,
1256 src_width, 1, &extended_src[0], src_stride);
1257 _pixman_image_validate (&extended_src_image);
1259 info2.src_image = &extended_src_image;
1260 need_src_extension = TRUE;
1264 src_width = src_image->bits.width;
1265 need_src_extension = FALSE;
1271 while (--height >= 0)
1273 sx = MOD (sx, src_width);
1274 sy = MOD (sy, src_image->bits.height);
1276 if (need_src_extension)
1280 PIXMAN_IMAGE_GET_LINE (src_image, 0, sy, uint32_t, src_stride, src_line, 1);
1282 for (i = 0; i < src_width; )
1284 for (j = 0; j < src_image->bits.width; j++, i++)
1285 extended_src[i] = src_line[j];
1288 else if (src_bpp == 16)
1290 uint16_t *src_line_16;
1292 PIXMAN_IMAGE_GET_LINE (src_image, 0, sy, uint16_t, src_stride,
1294 src_line = (uint32_t*)src_line_16;
1296 for (i = 0; i < src_width; )
1298 for (j = 0; j < src_image->bits.width; j++, i++)
1299 ((uint16_t*)extended_src)[i] = ((uint16_t*)src_line)[j];
1302 else if (src_bpp == 8)
1304 uint8_t *src_line_8;
1306 PIXMAN_IMAGE_GET_LINE (src_image, 0, sy, uint8_t, src_stride,
1308 src_line = (uint32_t*)src_line_8;
1310 for (i = 0; i < src_width; )
1312 for (j = 0; j < src_image->bits.width; j++, i++)
1313 ((uint8_t*)extended_src)[i] = ((uint8_t*)src_line)[j];
1324 width_remain = width;
1326 while (width_remain > 0)
1328 num_pixels = src_width - sx;
1330 if (num_pixels > width_remain)
1331 num_pixels = width_remain;
1334 info2.width = num_pixels;
1339 width_remain -= num_pixels;
1340 info2.mask_x += num_pixels;
1341 info2.dest_x += num_pixels;
1347 info2.mask_x = info->mask_x;
1349 info2.dest_x = info->dest_x;
1353 if (need_src_extension)
1354 _pixman_image_fini (&extended_src_image);
1358 _pixman_log_error (FUNC, "Didn't find a suitable function ");
1362 /* Use more unrolling for src_0565_0565 because it is typically CPU bound */
1363 static force_inline void
1364 scaled_nearest_scanline_565_565_SRC (uint16_t * dst,
1365 const uint16_t * src,
1368 pixman_fixed_t unit_x,
1369 pixman_fixed_t max_vx,
1370 pixman_bool_t fully_transparent_src)
1372 uint16_t tmp1, tmp2, tmp3, tmp4;
1373 while ((w -= 4) >= 0)
1375 tmp1 = src[pixman_fixed_to_int (vx)];
1377 tmp2 = src[pixman_fixed_to_int (vx)];
1379 tmp3 = src[pixman_fixed_to_int (vx)];
1381 tmp4 = src[pixman_fixed_to_int (vx)];
1390 tmp1 = src[pixman_fixed_to_int (vx)];
1392 tmp2 = src[pixman_fixed_to_int (vx)];
1398 *dst++ = src[pixman_fixed_to_int (vx)];
1401 FAST_NEAREST_MAINLOOP (565_565_cover_SRC,
1402 scaled_nearest_scanline_565_565_SRC,
1403 uint16_t, uint16_t, COVER)
1404 FAST_NEAREST_MAINLOOP (565_565_none_SRC,
1405 scaled_nearest_scanline_565_565_SRC,
1406 uint16_t, uint16_t, NONE)
1407 FAST_NEAREST_MAINLOOP (565_565_pad_SRC,
1408 scaled_nearest_scanline_565_565_SRC,
1409 uint16_t, uint16_t, PAD)
1411 static force_inline uint32_t
1412 fetch_nearest (pixman_repeat_t src_repeat,
1413 pixman_format_code_t format,
1414 uint32_t *src, int x, int src_width)
1416 if (repeat (src_repeat, &x, src_width))
1418 if (format == PIXMAN_x8r8g8b8)
1419 return *(src + x) | 0xff000000;
1429 static force_inline void
1430 combine_over (uint32_t s, uint32_t *dst)
1434 uint8_t ia = 0xff - (s >> 24);
1437 UN8x4_MUL_UN8_ADD_UN8x4 (*dst, ia, s);
1443 static force_inline void
1444 combine_src (uint32_t s, uint32_t *dst)
1450 fast_composite_scaled_nearest (pixman_implementation_t *imp,
1451 pixman_composite_info_t *info)
1453 PIXMAN_COMPOSITE_ARGS (info);
1456 int dst_stride, src_stride;
1457 int src_width, src_height;
1458 pixman_repeat_t src_repeat;
1459 pixman_fixed_t unit_x, unit_y;
1460 pixman_format_code_t src_format;
1464 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
1465 /* pass in 0 instead of src_x and src_y because src_x and src_y need to be
1466 * transformed from destination space to source space
1468 PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, uint32_t, src_stride, src_line, 1);
1470 /* reference point is the center of the pixel */
1471 v.vector[0] = pixman_int_to_fixed (src_x) + pixman_fixed_1 / 2;
1472 v.vector[1] = pixman_int_to_fixed (src_y) + pixman_fixed_1 / 2;
1473 v.vector[2] = pixman_fixed_1;
1475 if (!pixman_transform_point_3d (src_image->common.transform, &v))
1478 unit_x = src_image->common.transform->matrix[0][0];
1479 unit_y = src_image->common.transform->matrix[1][1];
1481 /* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */
1482 v.vector[0] -= pixman_fixed_e;
1483 v.vector[1] -= pixman_fixed_e;
1485 src_height = src_image->bits.height;
1486 src_width = src_image->bits.width;
1487 src_repeat = src_image->common.repeat;
1488 src_format = src_image->bits.format;
1493 pixman_fixed_t vx = v.vector[0];
1494 int y = pixman_fixed_to_int (vy);
1495 uint32_t *dst = dst_line;
1497 dst_line += dst_stride;
1499 /* adjust the y location by a unit vector in the y direction
1500 * this is equivalent to transforming y+1 of the destination point to source space */
1503 if (!repeat (src_repeat, &y, src_height))
1505 if (op == PIXMAN_OP_SRC)
1506 memset (dst, 0, sizeof (*dst) * width);
1512 uint32_t *src = src_line + y * src_stride;
1519 x1 = pixman_fixed_to_int (vx);
1522 x2 = pixman_fixed_to_int (vx);
1527 s1 = fetch_nearest (src_repeat, src_format, src, x1, src_width);
1528 s2 = fetch_nearest (src_repeat, src_format, src, x2, src_width);
1530 if (op == PIXMAN_OP_OVER)
1532 combine_over (s1, dst++);
1533 combine_over (s2, dst++);
1537 combine_src (s1, dst++);
1538 combine_src (s2, dst++);
1547 x = pixman_fixed_to_int (vx);
1550 s = fetch_nearest (src_repeat, src_format, src, x, src_width);
1552 if (op == PIXMAN_OP_OVER)
1553 combine_over (s, dst++);
1555 combine_src (s, dst++);
1561 #define CACHE_LINE_SIZE 64
1563 #define FAST_SIMPLE_ROTATE(suffix, pix_type) \
1566 blt_rotated_90_trivial_##suffix (pix_type *dst, \
1568 const pix_type *src, \
1574 for (y = 0; y < h; y++) \
1576 const pix_type *s = src + (h - y - 1); \
1577 pix_type *d = dst + dst_stride * y; \
1578 for (x = 0; x < w; x++) \
1587 blt_rotated_270_trivial_##suffix (pix_type *dst, \
1589 const pix_type *src, \
1595 for (y = 0; y < h; y++) \
1597 const pix_type *s = src + src_stride * (w - 1) + y; \
1598 pix_type *d = dst + dst_stride * y; \
1599 for (x = 0; x < w; x++) \
1608 blt_rotated_90_##suffix (pix_type *dst, \
1610 const pix_type *src, \
1616 int leading_pixels = 0, trailing_pixels = 0; \
1617 const int TILE_SIZE = CACHE_LINE_SIZE / sizeof(pix_type); \
1620 * split processing into handling destination as TILE_SIZExH cache line \
1621 * aligned vertical stripes (optimistically assuming that destination \
1622 * stride is a multiple of cache line, if not - it will be just a bit \
1626 if ((uintptr_t)dst & (CACHE_LINE_SIZE - 1)) \
1628 leading_pixels = TILE_SIZE - (((uintptr_t)dst & \
1629 (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \
1630 if (leading_pixels > W) \
1631 leading_pixels = W; \
1633 /* unaligned leading part NxH (where N < TILE_SIZE) */ \
1634 blt_rotated_90_trivial_##suffix ( \
1642 dst += leading_pixels; \
1643 src += leading_pixels * src_stride; \
1644 W -= leading_pixels; \
1647 if ((uintptr_t)(dst + W) & (CACHE_LINE_SIZE - 1)) \
1649 trailing_pixels = (((uintptr_t)(dst + W) & \
1650 (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \
1651 if (trailing_pixels > W) \
1652 trailing_pixels = W; \
1653 W -= trailing_pixels; \
1656 for (x = 0; x < W; x += TILE_SIZE) \
1658 /* aligned middle part TILE_SIZExH */ \
1659 blt_rotated_90_trivial_##suffix ( \
1662 src + src_stride * x, \
1668 if (trailing_pixels) \
1670 /* unaligned trailing part NxH (where N < TILE_SIZE) */ \
1671 blt_rotated_90_trivial_##suffix ( \
1674 src + W * src_stride, \
1682 blt_rotated_270_##suffix (pix_type *dst, \
1684 const pix_type *src, \
1690 int leading_pixels = 0, trailing_pixels = 0; \
1691 const int TILE_SIZE = CACHE_LINE_SIZE / sizeof(pix_type); \
1694 * split processing into handling destination as TILE_SIZExH cache line \
1695 * aligned vertical stripes (optimistically assuming that destination \
1696 * stride is a multiple of cache line, if not - it will be just a bit \
1700 if ((uintptr_t)dst & (CACHE_LINE_SIZE - 1)) \
1702 leading_pixels = TILE_SIZE - (((uintptr_t)dst & \
1703 (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \
1704 if (leading_pixels > W) \
1705 leading_pixels = W; \
1707 /* unaligned leading part NxH (where N < TILE_SIZE) */ \
1708 blt_rotated_270_trivial_##suffix ( \
1711 src + src_stride * (W - leading_pixels), \
1716 dst += leading_pixels; \
1717 W -= leading_pixels; \
1720 if ((uintptr_t)(dst + W) & (CACHE_LINE_SIZE - 1)) \
1722 trailing_pixels = (((uintptr_t)(dst + W) & \
1723 (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \
1724 if (trailing_pixels > W) \
1725 trailing_pixels = W; \
1726 W -= trailing_pixels; \
1727 src += trailing_pixels * src_stride; \
1730 for (x = 0; x < W; x += TILE_SIZE) \
1732 /* aligned middle part TILE_SIZExH */ \
1733 blt_rotated_270_trivial_##suffix ( \
1736 src + src_stride * (W - x - TILE_SIZE), \
1742 if (trailing_pixels) \
1744 /* unaligned trailing part NxH (where N < TILE_SIZE) */ \
1745 blt_rotated_270_trivial_##suffix ( \
1748 src - trailing_pixels * src_stride, \
1756 fast_composite_rotate_90_##suffix (pixman_implementation_t *imp, \
1757 pixman_composite_info_t *info) \
1759 PIXMAN_COMPOSITE_ARGS (info); \
1760 pix_type *dst_line; \
1761 pix_type *src_line; \
1762 int dst_stride, src_stride; \
1763 int src_x_t, src_y_t; \
1765 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, pix_type, \
1766 dst_stride, dst_line, 1); \
1767 src_x_t = -src_y + pixman_fixed_to_int ( \
1768 src_image->common.transform->matrix[0][2] + \
1769 pixman_fixed_1 / 2 - pixman_fixed_e) - height;\
1770 src_y_t = src_x + pixman_fixed_to_int ( \
1771 src_image->common.transform->matrix[1][2] + \
1772 pixman_fixed_1 / 2 - pixman_fixed_e); \
1773 PIXMAN_IMAGE_GET_LINE (src_image, src_x_t, src_y_t, pix_type, \
1774 src_stride, src_line, 1); \
1775 blt_rotated_90_##suffix (dst_line, dst_stride, src_line, src_stride, \
1780 fast_composite_rotate_270_##suffix (pixman_implementation_t *imp, \
1781 pixman_composite_info_t *info) \
1783 PIXMAN_COMPOSITE_ARGS (info); \
1784 pix_type *dst_line; \
1785 pix_type *src_line; \
1786 int dst_stride, src_stride; \
1787 int src_x_t, src_y_t; \
1789 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, pix_type, \
1790 dst_stride, dst_line, 1); \
1791 src_x_t = src_y + pixman_fixed_to_int ( \
1792 src_image->common.transform->matrix[0][2] + \
1793 pixman_fixed_1 / 2 - pixman_fixed_e); \
1794 src_y_t = -src_x + pixman_fixed_to_int ( \
1795 src_image->common.transform->matrix[1][2] + \
1796 pixman_fixed_1 / 2 - pixman_fixed_e) - width; \
1797 PIXMAN_IMAGE_GET_LINE (src_image, src_x_t, src_y_t, pix_type, \
1798 src_stride, src_line, 1); \
1799 blt_rotated_270_##suffix (dst_line, dst_stride, src_line, src_stride, \
1803 FAST_SIMPLE_ROTATE (8, uint8_t)
1804 FAST_SIMPLE_ROTATE (565, uint16_t)
1805 FAST_SIMPLE_ROTATE (8888, uint32_t)
1807 static const pixman_fast_path_t c_fast_paths[] =
1809 PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, fast_composite_over_n_8_0565),
1810 PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, fast_composite_over_n_8_0565),
1811 PIXMAN_STD_FAST_PATH (OVER, solid, a8, r8g8b8, fast_composite_over_n_8_0888),
1812 PIXMAN_STD_FAST_PATH (OVER, solid, a8, b8g8r8, fast_composite_over_n_8_0888),
1813 PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, fast_composite_over_n_8_8888),
1814 PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, fast_composite_over_n_8_8888),
1815 PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, fast_composite_over_n_8_8888),
1816 PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, fast_composite_over_n_8_8888),
1817 PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8r8g8b8, fast_composite_over_n_1_8888),
1818 PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8r8g8b8, fast_composite_over_n_1_8888),
1819 PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8b8g8r8, fast_composite_over_n_1_8888),
1820 PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8b8g8r8, fast_composite_over_n_1_8888),
1821 PIXMAN_STD_FAST_PATH (OVER, solid, a1, r5g6b5, fast_composite_over_n_1_0565),
1822 PIXMAN_STD_FAST_PATH (OVER, solid, a1, b5g6r5, fast_composite_over_n_1_0565),
1823 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, fast_composite_over_n_8888_8888_ca),
1824 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, fast_composite_over_n_8888_8888_ca),
1825 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, fast_composite_over_n_8888_0565_ca),
1826 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, fast_composite_over_n_8888_8888_ca),
1827 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, fast_composite_over_n_8888_8888_ca),
1828 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, fast_composite_over_n_8888_0565_ca),
1829 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, fast_composite_over_x888_8_8888),
1830 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, fast_composite_over_x888_8_8888),
1831 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, x8b8g8r8, fast_composite_over_x888_8_8888),
1832 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, a8b8g8r8, fast_composite_over_x888_8_8888),
1833 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, fast_composite_over_8888_8888),
1834 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, fast_composite_over_8888_8888),
1835 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, fast_composite_over_8888_0565),
1836 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, fast_composite_over_8888_8888),
1837 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, fast_composite_over_8888_8888),
1838 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, fast_composite_over_8888_0565),
1839 PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, fast_composite_add_8888_8888),
1840 PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, fast_composite_add_8888_8888),
1841 PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, fast_composite_add_8_8),
1842 PIXMAN_STD_FAST_PATH (ADD, a1, null, a1, fast_composite_add_1000_1000),
1843 PIXMAN_STD_FAST_PATH_CA (ADD, solid, a8r8g8b8, a8r8g8b8, fast_composite_add_n_8888_8888_ca),
1844 PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, fast_composite_add_n_8_8),
1845 PIXMAN_STD_FAST_PATH (SRC, solid, null, a8r8g8b8, fast_composite_solid_fill),
1846 PIXMAN_STD_FAST_PATH (SRC, solid, null, x8r8g8b8, fast_composite_solid_fill),
1847 PIXMAN_STD_FAST_PATH (SRC, solid, null, a8b8g8r8, fast_composite_solid_fill),
1848 PIXMAN_STD_FAST_PATH (SRC, solid, null, x8b8g8r8, fast_composite_solid_fill),
1849 PIXMAN_STD_FAST_PATH (SRC, solid, null, a1, fast_composite_solid_fill),
1850 PIXMAN_STD_FAST_PATH (SRC, solid, null, a8, fast_composite_solid_fill),
1851 PIXMAN_STD_FAST_PATH (SRC, solid, null, r5g6b5, fast_composite_solid_fill),
1852 PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, fast_composite_src_x888_8888),
1853 PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, fast_composite_src_x888_8888),
1854 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, fast_composite_src_memcpy),
1855 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, fast_composite_src_memcpy),
1856 PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, fast_composite_src_memcpy),
1857 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, fast_composite_src_memcpy),
1858 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, fast_composite_src_memcpy),
1859 PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, fast_composite_src_memcpy),
1860 PIXMAN_STD_FAST_PATH (SRC, b8g8r8a8, null, b8g8r8x8, fast_composite_src_memcpy),
1861 PIXMAN_STD_FAST_PATH (SRC, b8g8r8a8, null, b8g8r8a8, fast_composite_src_memcpy),
1862 PIXMAN_STD_FAST_PATH (SRC, b8g8r8x8, null, b8g8r8x8, fast_composite_src_memcpy),
1863 PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, fast_composite_src_memcpy),
1864 PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, fast_composite_src_memcpy),
1865 PIXMAN_STD_FAST_PATH (SRC, r8g8b8, null, r8g8b8, fast_composite_src_memcpy),
1866 PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, b8g8r8, fast_composite_src_memcpy),
1867 PIXMAN_STD_FAST_PATH (SRC, x1r5g5b5, null, x1r5g5b5, fast_composite_src_memcpy),
1868 PIXMAN_STD_FAST_PATH (SRC, a1r5g5b5, null, x1r5g5b5, fast_composite_src_memcpy),
1869 PIXMAN_STD_FAST_PATH (SRC, a8, null, a8, fast_composite_src_memcpy),
1870 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, fast_composite_src_x888_0565),
1871 PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, fast_composite_src_x888_0565),
1872 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, fast_composite_src_x888_0565),
1873 PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, fast_composite_src_x888_0565),
1874 PIXMAN_STD_FAST_PATH (IN, a8, null, a8, fast_composite_in_8_8),
1875 PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, fast_composite_in_n_8_8),
1877 SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, 8888_8888),
1878 SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, 8888_8888),
1879 SIMPLE_NEAREST_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8, 8888_8888),
1880 SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8, 8888_8888),
1882 SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, 8888_8888),
1883 SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8, 8888_8888),
1885 SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, r5g6b5, 8888_565),
1886 SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, r5g6b5, 8888_565),
1888 SIMPLE_NEAREST_FAST_PATH (SRC, r5g6b5, r5g6b5, 565_565),
1890 SIMPLE_NEAREST_FAST_PATH_COVER (SRC, x8r8g8b8, a8r8g8b8, x888_8888),
1891 SIMPLE_NEAREST_FAST_PATH_COVER (SRC, x8b8g8r8, a8b8g8r8, x888_8888),
1892 SIMPLE_NEAREST_FAST_PATH_PAD (SRC, x8r8g8b8, a8r8g8b8, x888_8888),
1893 SIMPLE_NEAREST_FAST_PATH_PAD (SRC, x8b8g8r8, a8b8g8r8, x888_8888),
1894 SIMPLE_NEAREST_FAST_PATH_NORMAL (SRC, x8r8g8b8, a8r8g8b8, x888_8888),
1895 SIMPLE_NEAREST_FAST_PATH_NORMAL (SRC, x8b8g8r8, a8b8g8r8, x888_8888),
1897 SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, 8888_8888),
1898 SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, 8888_8888),
1899 SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, 8888_8888),
1900 SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, 8888_8888),
1902 SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, r5g6b5, 8888_565),
1904 #define NEAREST_FAST_PATH(op,s,d) \
1905 { PIXMAN_OP_ ## op, \
1906 PIXMAN_ ## s, SCALED_NEAREST_FLAGS, \
1908 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1909 fast_composite_scaled_nearest, \
1912 NEAREST_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8),
1913 NEAREST_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8),
1914 NEAREST_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8),
1915 NEAREST_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8),
1917 NEAREST_FAST_PATH (SRC, x8r8g8b8, a8r8g8b8),
1918 NEAREST_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8),
1919 NEAREST_FAST_PATH (SRC, x8b8g8r8, a8b8g8r8),
1920 NEAREST_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8),
1922 NEAREST_FAST_PATH (OVER, x8r8g8b8, x8r8g8b8),
1923 NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8),
1924 NEAREST_FAST_PATH (OVER, x8b8g8r8, x8b8g8r8),
1925 NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8),
1927 NEAREST_FAST_PATH (OVER, x8r8g8b8, a8r8g8b8),
1928 NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8),
1929 NEAREST_FAST_PATH (OVER, x8b8g8r8, a8b8g8r8),
1930 NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8),
1932 #define SIMPLE_ROTATE_FLAGS(angle) \
1933 (FAST_PATH_ROTATE_ ## angle ## _TRANSFORM | \
1934 FAST_PATH_NEAREST_FILTER | \
1935 FAST_PATH_SAMPLES_COVER_CLIP_NEAREST | \
1936 FAST_PATH_STANDARD_FLAGS)
1938 #define SIMPLE_ROTATE_FAST_PATH(op,s,d,suffix) \
1939 { PIXMAN_OP_ ## op, \
1940 PIXMAN_ ## s, SIMPLE_ROTATE_FLAGS (90), \
1942 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1943 fast_composite_rotate_90_##suffix, \
1945 { PIXMAN_OP_ ## op, \
1946 PIXMAN_ ## s, SIMPLE_ROTATE_FLAGS (270), \
1948 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1949 fast_composite_rotate_270_##suffix, \
1952 SIMPLE_ROTATE_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, 8888),
1953 SIMPLE_ROTATE_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, 8888),
1954 SIMPLE_ROTATE_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, 8888),
1955 SIMPLE_ROTATE_FAST_PATH (SRC, r5g6b5, r5g6b5, 565),
1956 SIMPLE_ROTATE_FAST_PATH (SRC, a8, a8, 8),
1958 /* Simple repeat fast path entry. */
1961 (FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM | FAST_PATH_BITS_IMAGE |
1962 FAST_PATH_NORMAL_REPEAT),
1964 PIXMAN_any, FAST_PATH_STD_DEST_FLAGS,
1965 fast_composite_tiled_repeat
1971 #ifdef WORDS_BIGENDIAN
1972 #define A1_FILL_MASK(n, offs) (((1U << (n)) - 1) << (32 - (offs) - (n)))
1974 #define A1_FILL_MASK(n, offs) (((1U << (n)) - 1) << (offs))
1977 static force_inline void
1978 pixman_fill1_line (uint32_t *dst, int offs, int width, int v)
1982 int leading_pixels = 32 - offs;
1983 if (leading_pixels >= width)
1986 *dst |= A1_FILL_MASK (width, offs);
1988 *dst &= ~A1_FILL_MASK (width, offs);
1994 *dst++ |= A1_FILL_MASK (leading_pixels, offs);
1996 *dst++ &= ~A1_FILL_MASK (leading_pixels, offs);
1997 width -= leading_pixels;
2003 *dst++ = 0xFFFFFFFF;
2011 *dst |= A1_FILL_MASK (width, 0);
2013 *dst &= ~A1_FILL_MASK (width, 0);
2018 pixman_fill1 (uint32_t *bits,
2026 uint32_t *dst = bits + y * stride + (x >> 5);
2033 pixman_fill1_line (dst, offs, width, 1);
2041 pixman_fill1_line (dst, offs, width, 0);
2048 pixman_fill8 (uint32_t *bits,
2056 int byte_stride = stride * (int) sizeof (uint32_t);
2057 uint8_t *dst = (uint8_t *) bits;
2058 uint8_t v = xor & 0xff;
2061 dst = dst + y * byte_stride + x;
2065 for (i = 0; i < width; ++i)
2073 pixman_fill16 (uint32_t *bits,
2082 (stride * (int)sizeof (uint32_t)) / (int)sizeof (uint16_t);
2083 uint16_t *dst = (uint16_t *)bits;
2084 uint16_t v = xor & 0xffff;
2087 dst = dst + y * short_stride + x;
2091 for (i = 0; i < width; ++i)
2094 dst += short_stride;
2099 pixman_fill32 (uint32_t *bits,
2109 bits = bits + y * stride + x;
2113 for (i = 0; i < width; ++i)
2120 static pixman_bool_t
2121 fast_path_fill (pixman_implementation_t *imp,
2134 pixman_fill1 (bits, stride, x, y, width, height, xor);
2138 pixman_fill8 (bits, stride, x, y, width, height, xor);
2142 pixman_fill16 (bits, stride, x, y, width, height, xor);
2146 pixman_fill32 (bits, stride, x, y, width, height, xor);
2150 return _pixman_implementation_fill (
2151 imp->delegate, bits, stride, bpp, x, y, width, height, xor);
2158 pixman_implementation_t *
2159 _pixman_implementation_create_fast_path (pixman_implementation_t *fallback)
2161 pixman_implementation_t *imp = _pixman_implementation_create (fallback, c_fast_paths);
2163 imp->fill = fast_path_fill;