1 /* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */
3 * Copyright © 2000 SuSE, Inc.
4 * Copyright © 2007 Red Hat, Inc.
6 * Permission to use, copy, modify, distribute, and sell this software and its
7 * documentation for any purpose is hereby granted without fee, provided that
8 * the above copyright notice appear in all copies and that both that
9 * copyright notice and this permission notice appear in supporting
10 * documentation, and that the name of SuSE not be used in advertising or
11 * publicity pertaining to distribution of the software without specific,
12 * written prior permission. SuSE makes no representations about the
13 * suitability of this software for any purpose. It is provided "as is"
14 * without express or implied warranty.
16 * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
18 * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
20 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
21 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 * Author: Keith Packard, SuSE, Inc.
31 #include "pixman-private.h"
32 #include "pixman-combine32.h"
34 static force_inline uint32_t
37 if (((unsigned long)a) & 1)
39 #ifdef WORDS_BIGENDIAN
40 return (*a << 16) | (*(uint16_t *)(a + 1));
42 return *a | (*(uint16_t *)(a + 1) << 8);
47 #ifdef WORDS_BIGENDIAN
48 return (*(uint16_t *)a << 8) | *(a + 2);
50 return *(uint16_t *)a | (*(a + 2) << 16);
55 static force_inline void
59 if (((unsigned long)a) & 1)
61 #ifdef WORDS_BIGENDIAN
62 *a = (uint8_t) (v >> 16);
63 *(uint16_t *)(a + 1) = (uint16_t) (v);
66 *(uint16_t *)(a + 1) = (uint16_t) (v >> 8);
71 #ifdef WORDS_BIGENDIAN
72 *(uint16_t *)a = (uint16_t)(v >> 8);
73 *(a + 2) = (uint8_t)v;
75 *(uint16_t *)a = (uint16_t)v;
76 *(a + 2) = (uint8_t)(v >> 16);
81 static force_inline uint32_t
85 uint32_t a = ~src >> 24;
87 UN8x4_MUL_UN8_ADD_UN8x4 (dest, a, src);
109 fast_composite_over_x888_8_8888 (pixman_implementation_t *imp,
111 pixman_image_t * src_image,
112 pixman_image_t * mask_image,
113 pixman_image_t * dst_image,
123 uint32_t *src, *src_line;
124 uint32_t *dst, *dst_line;
125 uint8_t *mask, *mask_line;
126 int src_stride, mask_stride, dst_stride;
131 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
132 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
133 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
138 src_line += src_stride;
140 dst_line += dst_stride;
142 mask_line += mask_stride;
150 s = *src | 0xff000000;
159 *dst = over (d, *dst);
169 fast_composite_in_n_8_8 (pixman_implementation_t *imp,
171 pixman_image_t * src_image,
172 pixman_image_t * mask_image,
173 pixman_image_t * dest_image,
184 uint8_t *dst_line, *dst;
185 uint8_t *mask_line, *mask, m;
186 int dst_stride, mask_stride;
190 src = _pixman_image_get_solid (src_image, dest_image->bits.format);
194 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
195 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
202 dst_line += dst_stride;
204 mask_line += mask_stride;
214 *dst = MUL_UN8 (m, *dst, t);
225 dst_line += dst_stride;
227 mask_line += mask_stride;
233 m = MUL_UN8 (m, srca, t);
238 *dst = MUL_UN8 (m, *dst, t);
247 fast_composite_in_8_8 (pixman_implementation_t *imp,
249 pixman_image_t * src_image,
250 pixman_image_t * mask_image,
251 pixman_image_t * dest_image,
261 uint8_t *dst_line, *dst;
262 uint8_t *src_line, *src;
263 int dst_stride, src_stride;
268 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
269 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
274 dst_line += dst_stride;
276 src_line += src_stride;
286 *dst = MUL_UN8 (s, *dst, t);
294 fast_composite_over_n_8_8888 (pixman_implementation_t *imp,
296 pixman_image_t * src_image,
297 pixman_image_t * mask_image,
298 pixman_image_t * dst_image,
309 uint32_t *dst_line, *dst, d;
310 uint8_t *mask_line, *mask, m;
311 int dst_stride, mask_stride;
314 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
320 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
321 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
326 dst_line += dst_stride;
328 mask_line += mask_stride;
339 *dst = over (src, *dst);
344 *dst = over (d, *dst);
352 fast_composite_add_n_8888_8888_ca (pixman_implementation_t *imp,
354 pixman_image_t * src_image,
355 pixman_image_t * mask_image,
356 pixman_image_t * dst_image,
366 uint32_t src, srca, s;
367 uint32_t *dst_line, *dst, d;
368 uint32_t *mask_line, *mask, ma;
369 int dst_stride, mask_stride;
372 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
378 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
379 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
384 dst_line += dst_stride;
386 mask_line += mask_stride;
398 UN8x4_MUL_UN8x4_ADD_UN8x4 (s, ma, d);
409 fast_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
411 pixman_image_t * src_image,
412 pixman_image_t * mask_image,
413 pixman_image_t * dst_image,
423 uint32_t src, srca, s;
424 uint32_t *dst_line, *dst, d;
425 uint32_t *mask_line, *mask, ma;
426 int dst_stride, mask_stride;
429 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
435 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
436 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
441 dst_line += dst_stride;
443 mask_line += mask_stride;
449 if (ma == 0xffffffff)
454 *dst = over (src, *dst);
461 UN8x4_MUL_UN8x4 (s, ma);
462 UN8x4_MUL_UN8 (ma, srca);
464 UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s);
475 fast_composite_over_n_8_0888 (pixman_implementation_t *imp,
477 pixman_image_t * src_image,
478 pixman_image_t * mask_image,
479 pixman_image_t * dst_image,
490 uint8_t *dst_line, *dst;
492 uint8_t *mask_line, *mask, m;
493 int dst_stride, mask_stride;
496 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
502 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3);
503 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
508 dst_line += dst_stride;
510 mask_line += mask_stride;
531 d = over (in (src, m), fetch_24 (dst));
540 fast_composite_over_n_8_0565 (pixman_implementation_t *imp,
542 pixman_image_t * src_image,
543 pixman_image_t * mask_image,
544 pixman_image_t * dst_image,
555 uint16_t *dst_line, *dst;
557 uint8_t *mask_line, *mask, m;
558 int dst_stride, mask_stride;
561 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
567 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
568 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
573 dst_line += dst_stride;
575 mask_line += mask_stride;
590 d = over (src, CONVERT_0565_TO_0888 (d));
592 *dst = CONVERT_8888_TO_0565 (d);
597 d = over (in (src, m), CONVERT_0565_TO_0888 (d));
598 *dst = CONVERT_8888_TO_0565 (d);
606 fast_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
608 pixman_image_t * src_image,
609 pixman_image_t * mask_image,
610 pixman_image_t * dst_image,
620 uint32_t src, srca, s;
622 uint16_t *dst_line, *dst;
624 uint32_t *mask_line, *mask, ma;
625 int dst_stride, mask_stride;
628 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
634 src16 = CONVERT_8888_TO_0565 (src);
636 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
637 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
642 dst_line += dst_stride;
644 mask_line += mask_stride;
650 if (ma == 0xffffffff)
659 d = over (src, CONVERT_0565_TO_0888 (d));
660 *dst = CONVERT_8888_TO_0565 (d);
666 d = CONVERT_0565_TO_0888 (d);
670 UN8x4_MUL_UN8x4 (s, ma);
671 UN8x4_MUL_UN8 (ma, srca);
673 UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s);
675 *dst = CONVERT_8888_TO_0565 (d);
683 fast_composite_over_8888_8888 (pixman_implementation_t *imp,
685 pixman_image_t * src_image,
686 pixman_image_t * mask_image,
687 pixman_image_t * dst_image,
697 uint32_t *dst_line, *dst;
698 uint32_t *src_line, *src, s;
699 int dst_stride, src_stride;
703 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
704 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
709 dst_line += dst_stride;
711 src_line += src_stride;
721 *dst = over (s, *dst);
728 fast_composite_src_x888_8888 (pixman_implementation_t *imp,
730 pixman_image_t * src_image,
731 pixman_image_t * mask_image,
732 pixman_image_t * dst_image,
742 uint32_t *dst_line, *dst;
743 uint32_t *src_line, *src;
744 int dst_stride, src_stride;
747 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
748 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
753 dst_line += dst_stride;
755 src_line += src_stride;
759 *dst++ = (*src++) | 0xff000000;
765 fast_composite_over_8888_0888 (pixman_implementation_t *imp,
767 pixman_image_t * src_image,
768 pixman_image_t * mask_image,
769 pixman_image_t * dst_image,
779 uint8_t *dst_line, *dst;
781 uint32_t *src_line, *src, s;
783 int dst_stride, src_stride;
786 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3);
787 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
792 dst_line += dst_stride;
794 src_line += src_stride;
806 d = over (s, fetch_24 (dst));
817 fast_composite_over_8888_0565 (pixman_implementation_t *imp,
819 pixman_image_t * src_image,
820 pixman_image_t * mask_image,
821 pixman_image_t * dst_image,
831 uint16_t *dst_line, *dst;
833 uint32_t *src_line, *src, s;
835 int dst_stride, src_stride;
838 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
839 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
844 dst_line += dst_stride;
846 src_line += src_stride;
862 d = over (s, CONVERT_0565_TO_0888 (d));
864 *dst = CONVERT_8888_TO_0565 (d);
872 fast_composite_src_x888_0565 (pixman_implementation_t *imp,
874 pixman_image_t * src_image,
875 pixman_image_t * mask_image,
876 pixman_image_t * dst_image,
886 uint16_t *dst_line, *dst;
887 uint32_t *src_line, *src, s;
888 int dst_stride, src_stride;
891 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
892 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
897 dst_line += dst_stride;
899 src_line += src_stride;
905 *dst = CONVERT_8888_TO_0565 (s);
912 fast_composite_add_8000_8000 (pixman_implementation_t *imp,
914 pixman_image_t * src_image,
915 pixman_image_t * mask_image,
916 pixman_image_t * dst_image,
926 uint8_t *dst_line, *dst;
927 uint8_t *src_line, *src;
928 int dst_stride, src_stride;
933 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
934 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
939 dst_line += dst_stride;
941 src_line += src_stride;
953 s = t | (0 - (t >> 8));
963 fast_composite_add_8888_8888 (pixman_implementation_t *imp,
965 pixman_image_t * src_image,
966 pixman_image_t * mask_image,
967 pixman_image_t * dst_image,
977 uint32_t *dst_line, *dst;
978 uint32_t *src_line, *src;
979 int dst_stride, src_stride;
983 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
984 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
989 dst_line += dst_stride;
991 src_line += src_stride;
1003 UN8x4_ADD_UN8x4 (s, d);
1013 fast_composite_add_n_8_8 (pixman_implementation_t *imp,
1015 pixman_image_t * src_image,
1016 pixman_image_t * mask_image,
1017 pixman_image_t * dst_image,
1027 uint8_t *dst_line, *dst;
1028 uint8_t *mask_line, *mask;
1029 int dst_stride, mask_stride;
1034 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
1035 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
1036 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
1042 dst_line += dst_stride;
1044 mask_line += mask_stride;
1057 m = MUL_UN8 (sa, a, tmp);
1058 r = ADD_UN8 (m, d, tmp);
1065 #ifdef WORDS_BIGENDIAN
1066 #define CREATE_BITMASK(n) (0x80000000 >> (n))
1067 #define UPDATE_BITMASK(n) ((n) >> 1)
1069 #define CREATE_BITMASK(n) (1 << (n))
1070 #define UPDATE_BITMASK(n) ((n) << 1)
1073 #define TEST_BIT(p, n) \
1074 (*((p) + ((n) >> 5)) & CREATE_BITMASK ((n) & 31))
1075 #define SET_BIT(p, n) \
1076 do { *((p) + ((n) >> 5)) |= CREATE_BITMASK ((n) & 31); } while (0);
1079 fast_composite_add_1000_1000 (pixman_implementation_t *imp,
1081 pixman_image_t * src_image,
1082 pixman_image_t * mask_image,
1083 pixman_image_t * dst_image,
1093 uint32_t *dst_line, *dst;
1094 uint32_t *src_line, *src;
1095 int dst_stride, src_stride;
1098 PIXMAN_IMAGE_GET_LINE (src_image, 0, src_y, uint32_t,
1099 src_stride, src_line, 1);
1100 PIXMAN_IMAGE_GET_LINE (dst_image, 0, dest_y, uint32_t,
1101 dst_stride, dst_line, 1);
1106 dst_line += dst_stride;
1108 src_line += src_stride;
1114 * TODO: improve performance by processing uint32_t data instead
1115 * of individual bits
1117 if (TEST_BIT (src, src_x + w))
1118 SET_BIT (dst, dest_x + w);
1124 fast_composite_over_n_1_8888 (pixman_implementation_t *imp,
1126 pixman_image_t * src_image,
1127 pixman_image_t * mask_image,
1128 pixman_image_t * dst_image,
1139 uint32_t *dst, *dst_line;
1140 uint32_t *mask, *mask_line;
1141 int mask_stride, dst_stride;
1142 uint32_t bitcache, bitmask;
1148 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
1153 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t,
1154 dst_stride, dst_line, 1);
1155 PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t,
1156 mask_stride, mask_line, 1);
1157 mask_line += mask_x >> 5;
1164 dst_line += dst_stride;
1166 mask_line += mask_stride;
1170 bitmask = CREATE_BITMASK (mask_x & 31);
1177 bitmask = CREATE_BITMASK (0);
1179 if (bitcache & bitmask)
1181 bitmask = UPDATE_BITMASK (bitmask);
1191 dst_line += dst_stride;
1193 mask_line += mask_stride;
1197 bitmask = CREATE_BITMASK (mask_x & 31);
1204 bitmask = CREATE_BITMASK (0);
1206 if (bitcache & bitmask)
1207 *dst = over (src, *dst);
1208 bitmask = UPDATE_BITMASK (bitmask);
1216 fast_composite_over_n_1_0565 (pixman_implementation_t *imp,
1218 pixman_image_t * src_image,
1219 pixman_image_t * mask_image,
1220 pixman_image_t * dst_image,
1231 uint16_t *dst, *dst_line;
1232 uint32_t *mask, *mask_line;
1233 int mask_stride, dst_stride;
1234 uint32_t bitcache, bitmask;
1242 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
1247 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t,
1248 dst_stride, dst_line, 1);
1249 PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t,
1250 mask_stride, mask_line, 1);
1251 mask_line += mask_x >> 5;
1255 src565 = CONVERT_8888_TO_0565 (src);
1259 dst_line += dst_stride;
1261 mask_line += mask_stride;
1265 bitmask = CREATE_BITMASK (mask_x & 31);
1272 bitmask = CREATE_BITMASK (0);
1274 if (bitcache & bitmask)
1276 bitmask = UPDATE_BITMASK (bitmask);
1286 dst_line += dst_stride;
1288 mask_line += mask_stride;
1292 bitmask = CREATE_BITMASK (mask_x & 31);
1299 bitmask = CREATE_BITMASK (0);
1301 if (bitcache & bitmask)
1303 d = over (src, CONVERT_0565_TO_0888 (*dst));
1304 *dst = CONVERT_8888_TO_0565 (d);
1306 bitmask = UPDATE_BITMASK (bitmask);
1318 fast_composite_solid_fill (pixman_implementation_t *imp,
1320 pixman_image_t * src_image,
1321 pixman_image_t * mask_image,
1322 pixman_image_t * dst_image,
1334 src = _pixman_image_get_solid (src_image, dst_image->bits.format);
1336 if (dst_image->bits.format == PIXMAN_a8)
1340 else if (dst_image->bits.format == PIXMAN_r5g6b5 ||
1341 dst_image->bits.format == PIXMAN_b5g6r5)
1343 src = CONVERT_8888_TO_0565 (src);
1346 pixman_fill (dst_image->bits.bits, dst_image->bits.rowstride,
1347 PIXMAN_FORMAT_BPP (dst_image->bits.format),
1354 fast_composite_src_memcpy (pixman_implementation_t *imp,
1356 pixman_image_t * src_image,
1357 pixman_image_t * mask_image,
1358 pixman_image_t * dst_image,
1368 int bpp = PIXMAN_FORMAT_BPP (dst_image->bits.format) / 8;
1369 uint32_t n_bytes = width * bpp;
1370 int dst_stride, src_stride;
1374 src_stride = src_image->bits.rowstride * 4;
1375 dst_stride = dst_image->bits.rowstride * 4;
1377 src = (uint8_t *)src_image->bits.bits + src_y * src_stride + src_x * bpp;
1378 dst = (uint8_t *)dst_image->bits.bits + dest_y * dst_stride + dest_x * bpp;
1382 memcpy (dst, src, n_bytes);
1389 static force_inline pixman_bool_t
1390 repeat (pixman_repeat_t repeat, int *c, int size)
1392 if (repeat == PIXMAN_REPEAT_NONE)
1394 if (*c < 0 || *c >= size)
1397 else if (repeat == PIXMAN_REPEAT_NORMAL)
1404 else if (repeat == PIXMAN_REPEAT_PAD)
1406 *c = CLIP (*c, 0, size - 1);
1410 *c = MOD (*c, size * 2);
1412 *c = size * 2 - *c - 1;
1417 /* A macroified version of specialized nearest scalers for some
1418 * common 8888 and 565 formats. It supports SRC and OVER ops.
1420 * There are two repeat versions, one that handles repeat normal,
1421 * and one without repeat handling that only works if the src region
1422 * used is completely covered by the pre-repeated source samples.
1424 * The loops are unrolled to process two pixels per iteration for better
1425 * performance on most CPU architectures (superscalar processors
1426 * can issue several operations simultaneously, other processors can hide
1427 * instructions latencies by pipelining operations). Unrolling more
1428 * does not make much sense because the compiler will start running out
1429 * of spare registers soon.
1432 #define GET_8888_ALPHA(s) ((s) >> 24)
1433 /* This is not actually used since we don't have an OVER with
1434 565 source, but it is needed to build. */
1435 #define GET_0565_ALPHA(s) 0xff
1437 #define FAST_NEAREST(scale_func_name, SRC_FORMAT, DST_FORMAT, \
1438 src_type_t, dst_type_t, OP, repeat_mode) \
1440 fast_composite_scaled_nearest_ ## scale_func_name ## _ ## OP (pixman_implementation_t *imp, \
1442 pixman_image_t * src_image, \
1443 pixman_image_t * mask_image, \
1444 pixman_image_t * dst_image, \
1454 dst_type_t *dst_line; \
1455 src_type_t *src_first_line; \
1457 src_type_t s1, s2; \
1461 pixman_fixed_t orig_vx; \
1462 pixman_fixed_t max_vx, max_vy; \
1463 pixman_vector_t v; \
1464 pixman_fixed_t vx, vy; \
1465 pixman_fixed_t unit_x, unit_y; \
1469 int src_stride, dst_stride; \
1471 if (PIXMAN_OP_ ## OP != PIXMAN_OP_SRC && PIXMAN_OP_ ## OP != PIXMAN_OP_OVER) \
1474 if (PIXMAN_REPEAT_ ## repeat_mode != PIXMAN_REPEAT_NORMAL && \
1475 PIXMAN_REPEAT_ ## repeat_mode != PIXMAN_REPEAT_NONE) \
1480 PIXMAN_IMAGE_GET_LINE (dst_image, dst_x, dst_y, dst_type_t, dst_stride, dst_line, 1); \
1481 /* pass in 0 instead of src_x and src_y because src_x and src_y need to be \
1482 * transformed from destination space to source space */ \
1483 PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, src_type_t, src_stride, src_first_line, 1); \
1485 /* reference point is the center of the pixel */ \
1486 v.vector[0] = pixman_int_to_fixed (src_x) + pixman_fixed_1 / 2; \
1487 v.vector[1] = pixman_int_to_fixed (src_y) + pixman_fixed_1 / 2; \
1488 v.vector[2] = pixman_fixed_1; \
1490 if (!pixman_transform_point_3d (src_image->common.transform, &v)) \
1493 unit_x = src_image->common.transform->matrix[0][0]; \
1494 unit_y = src_image->common.transform->matrix[1][1]; \
1496 /* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */ \
1497 v.vector[0] -= pixman_fixed_e; \
1498 v.vector[1] -= pixman_fixed_e; \
1503 if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \
1505 /* Clamp repeating positions inside the actual samples */ \
1506 max_vx = src_image->bits.width << 16; \
1507 max_vy = src_image->bits.height << 16; \
1509 repeat (PIXMAN_REPEAT_NORMAL, &vx, max_vx); \
1510 repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); \
1515 while (--height >= 0) \
1518 dst_line += dst_stride; \
1522 if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \
1523 repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); \
1525 src = src_first_line + src_stride * y; \
1529 while ((w -= 2) >= 0) \
1533 if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \
1535 /* This works because we know that unit_x is positive */ \
1536 while (vx >= max_vx) \
1543 if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \
1545 /* This works because we know that unit_x is positive */ \
1546 while (vx >= max_vx) \
1551 if (PIXMAN_OP_ ## OP == PIXMAN_OP_OVER) \
1553 a1 = GET_ ## SRC_FORMAT ## _ALPHA(s1); \
1554 a2 = GET_ ## SRC_FORMAT ## _ALPHA(s2); \
1558 *dst = CONVERT_ ## SRC_FORMAT ## _TO_ ## DST_FORMAT (s1); \
1562 d = CONVERT_ ## DST_FORMAT ## _TO_8888 (*dst); \
1563 s1 = CONVERT_ ## SRC_FORMAT ## _TO_8888 (s1); \
1565 UN8x4_MUL_UN8_ADD_UN8x4 (d, a1, s1); \
1566 *dst = CONVERT_8888_TO_ ## DST_FORMAT (d); \
1572 *dst = CONVERT_ ## SRC_FORMAT ## _TO_ ## DST_FORMAT (s2); \
1576 d = CONVERT_## DST_FORMAT ## _TO_8888 (*dst); \
1577 s2 = CONVERT_## SRC_FORMAT ## _TO_8888 (s2); \
1579 UN8x4_MUL_UN8_ADD_UN8x4 (d, a2, s2); \
1580 *dst = CONVERT_8888_TO_ ## DST_FORMAT (d); \
1584 else /* PIXMAN_OP_SRC */ \
1586 *dst++ = CONVERT_ ## SRC_FORMAT ## _TO_ ## DST_FORMAT (s1); \
1587 *dst++ = CONVERT_ ## SRC_FORMAT ## _TO_ ## DST_FORMAT (s2); \
1595 if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \
1597 /* This works because we know that unit_x is positive */ \
1598 while (vx >= max_vx) \
1603 if (PIXMAN_OP_ ## OP == PIXMAN_OP_OVER) \
1605 a1 = GET_ ## SRC_FORMAT ## _ALPHA(s1); \
1609 *dst = CONVERT_ ## SRC_FORMAT ## _TO_ ## DST_FORMAT (s1); \
1613 d = CONVERT_## DST_FORMAT ## _TO_8888 (*dst); \
1614 s1 = CONVERT_ ## SRC_FORMAT ## _TO_8888 (s1); \
1616 UN8x4_MUL_UN8_ADD_UN8x4 (d, a1, s1); \
1617 *dst = CONVERT_8888_TO_ ## DST_FORMAT (d); \
1621 else /* PIXMAN_OP_SRC */ \
1623 *dst++ = CONVERT_ ## SRC_FORMAT ## _TO_ ## DST_FORMAT (s1); \
1629 FAST_NEAREST (8888_8888_none, 8888, 8888, uint32_t, uint32_t, SRC, NONE);
1630 FAST_NEAREST (8888_8888_normal, 8888, 8888, uint32_t, uint32_t, SRC, NORMAL);
1631 FAST_NEAREST (8888_8888_none, 8888, 8888, uint32_t, uint32_t, OVER, NONE);
1632 FAST_NEAREST (8888_8888_normal, 8888, 8888, uint32_t, uint32_t, OVER, NORMAL);
1633 FAST_NEAREST (8888_565_none, 8888, 0565, uint32_t, uint16_t, SRC, NONE);
1634 FAST_NEAREST (8888_565_normal, 8888, 0565, uint32_t, uint16_t, SRC, NORMAL);
1635 FAST_NEAREST (565_565_none, 0565, 0565, uint16_t, uint16_t, SRC, NONE);
1636 FAST_NEAREST (565_565_normal, 0565, 0565, uint16_t, uint16_t, SRC, NORMAL);
1637 FAST_NEAREST (8888_565_none, 8888, 0565, uint32_t, uint16_t, OVER, NONE);
1638 FAST_NEAREST (8888_565_normal, 8888, 0565, uint32_t, uint16_t, OVER, NORMAL);
1640 static force_inline uint32_t
1641 fetch_nearest (pixman_repeat_t src_repeat,
1642 pixman_format_code_t format,
1643 uint32_t *src, int x, int src_width)
1645 if (repeat (src_repeat, &x, src_width))
1647 if (format == PIXMAN_x8r8g8b8)
1648 return *(src + x) | 0xff000000;
1658 static force_inline void
1659 combine_over (uint32_t s, uint32_t *dst)
1663 uint8_t ia = 0xff - (s >> 24);
1666 UN8x4_MUL_UN8_ADD_UN8x4 (*dst, ia, s);
1672 static force_inline void
1673 combine_src (uint32_t s, uint32_t *dst)
1679 fast_composite_scaled_nearest (pixman_implementation_t *imp,
1681 pixman_image_t * src_image,
1682 pixman_image_t * mask_image,
1683 pixman_image_t * dst_image,
1695 int dst_stride, src_stride;
1696 int src_width, src_height;
1697 pixman_repeat_t src_repeat;
1698 pixman_fixed_t unit_x, unit_y;
1699 pixman_format_code_t src_format;
1703 PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
1704 /* pass in 0 instead of src_x and src_y because src_x and src_y need to be
1705 * transformed from destination space to source space
1707 PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, uint32_t, src_stride, src_line, 1);
1709 /* reference point is the center of the pixel */
1710 v.vector[0] = pixman_int_to_fixed (src_x) + pixman_fixed_1 / 2;
1711 v.vector[1] = pixman_int_to_fixed (src_y) + pixman_fixed_1 / 2;
1712 v.vector[2] = pixman_fixed_1;
1714 if (!pixman_transform_point_3d (src_image->common.transform, &v))
1717 unit_x = src_image->common.transform->matrix[0][0];
1718 unit_y = src_image->common.transform->matrix[1][1];
1720 /* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */
1721 v.vector[0] -= pixman_fixed_e;
1722 v.vector[1] -= pixman_fixed_e;
1724 src_height = src_image->bits.height;
1725 src_width = src_image->bits.width;
1726 src_repeat = src_image->common.repeat;
1727 src_format = src_image->bits.format;
1732 pixman_fixed_t vx = v.vector[0];
1733 int y = pixman_fixed_to_int (vy);
1734 uint32_t *dst = dst_line;
1736 dst_line += dst_stride;
1738 /* adjust the y location by a unit vector in the y direction
1739 * this is equivalent to transforming y+1 of the destination point to source space */
1742 if (!repeat (src_repeat, &y, src_height))
1744 if (op == PIXMAN_OP_SRC)
1745 memset (dst, 0, sizeof (*dst) * width);
1751 uint32_t *src = src_line + y * src_stride;
1758 x1 = pixman_fixed_to_int (vx);
1761 x2 = pixman_fixed_to_int (vx);
1766 s1 = fetch_nearest (src_repeat, src_format, src, x1, src_width);
1767 s2 = fetch_nearest (src_repeat, src_format, src, x2, src_width);
1769 if (op == PIXMAN_OP_OVER)
1771 combine_over (s1, dst++);
1772 combine_over (s2, dst++);
1776 combine_src (s1, dst++);
1777 combine_src (s2, dst++);
1786 x = pixman_fixed_to_int (vx);
1789 s = fetch_nearest (src_repeat, src_format, src, x, src_width);
1791 if (op == PIXMAN_OP_OVER)
1792 combine_over (s, dst++);
1794 combine_src (s, dst++);
1800 static const pixman_fast_path_t c_fast_paths[] =
1802 PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, fast_composite_over_n_8_0565),
1803 PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, fast_composite_over_n_8_0565),
1804 PIXMAN_STD_FAST_PATH (OVER, solid, a8, r8g8b8, fast_composite_over_n_8_0888),
1805 PIXMAN_STD_FAST_PATH (OVER, solid, a8, b8g8r8, fast_composite_over_n_8_0888),
1806 PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, fast_composite_over_n_8_8888),
1807 PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, fast_composite_over_n_8_8888),
1808 PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, fast_composite_over_n_8_8888),
1809 PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, fast_composite_over_n_8_8888),
1810 PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8r8g8b8, fast_composite_over_n_1_8888),
1811 PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8r8g8b8, fast_composite_over_n_1_8888),
1812 PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8b8g8r8, fast_composite_over_n_1_8888),
1813 PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8b8g8r8, fast_composite_over_n_1_8888),
1814 PIXMAN_STD_FAST_PATH (OVER, solid, a1, r5g6b5, fast_composite_over_n_1_0565),
1815 PIXMAN_STD_FAST_PATH (OVER, solid, a1, b5g6r5, fast_composite_over_n_1_0565),
1816 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, fast_composite_over_n_8888_8888_ca),
1817 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, fast_composite_over_n_8888_8888_ca),
1818 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, fast_composite_over_n_8888_0565_ca),
1819 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, fast_composite_over_n_8888_8888_ca),
1820 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, fast_composite_over_n_8888_8888_ca),
1821 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, fast_composite_over_n_8888_0565_ca),
1822 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, fast_composite_over_x888_8_8888),
1823 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, fast_composite_over_x888_8_8888),
1824 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, x8b8g8r8, fast_composite_over_x888_8_8888),
1825 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, a8b8g8r8, fast_composite_over_x888_8_8888),
1826 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, fast_composite_over_8888_8888),
1827 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, fast_composite_over_8888_8888),
1828 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, fast_composite_over_8888_0565),
1829 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, fast_composite_over_8888_8888),
1830 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, fast_composite_over_8888_8888),
1831 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, fast_composite_over_8888_0565),
1832 PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, fast_composite_add_8888_8888),
1833 PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, fast_composite_add_8888_8888),
1834 PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, fast_composite_add_8000_8000),
1835 PIXMAN_STD_FAST_PATH (ADD, a1, null, a1, fast_composite_add_1000_1000),
1836 PIXMAN_STD_FAST_PATH_CA (ADD, solid, a8r8g8b8, a8r8g8b8, fast_composite_add_n_8888_8888_ca),
1837 PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, fast_composite_add_n_8_8),
1838 PIXMAN_STD_FAST_PATH (SRC, solid, null, a8r8g8b8, fast_composite_solid_fill),
1839 PIXMAN_STD_FAST_PATH (SRC, solid, null, x8r8g8b8, fast_composite_solid_fill),
1840 PIXMAN_STD_FAST_PATH (SRC, solid, null, a8b8g8r8, fast_composite_solid_fill),
1841 PIXMAN_STD_FAST_PATH (SRC, solid, null, x8b8g8r8, fast_composite_solid_fill),
1842 PIXMAN_STD_FAST_PATH (SRC, solid, null, a8, fast_composite_solid_fill),
1843 PIXMAN_STD_FAST_PATH (SRC, solid, null, r5g6b5, fast_composite_solid_fill),
1844 PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, fast_composite_src_x888_8888),
1845 PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, fast_composite_src_x888_8888),
1846 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, fast_composite_src_memcpy),
1847 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, fast_composite_src_memcpy),
1848 PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, fast_composite_src_memcpy),
1849 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, fast_composite_src_memcpy),
1850 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, fast_composite_src_memcpy),
1851 PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, fast_composite_src_memcpy),
1852 PIXMAN_STD_FAST_PATH (SRC, b8g8r8a8, null, b8g8r8x8, fast_composite_src_memcpy),
1853 PIXMAN_STD_FAST_PATH (SRC, b8g8r8a8, null, b8g8r8a8, fast_composite_src_memcpy),
1854 PIXMAN_STD_FAST_PATH (SRC, b8g8r8x8, null, b8g8r8x8, fast_composite_src_memcpy),
1855 PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, fast_composite_src_memcpy),
1856 PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, fast_composite_src_memcpy),
1857 PIXMAN_STD_FAST_PATH (SRC, r8g8b8, null, r8g8b8, fast_composite_src_memcpy),
1858 PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, b8g8r8, fast_composite_src_memcpy),
1859 PIXMAN_STD_FAST_PATH (SRC, x1r5g5b5, null, x1r5g5b5, fast_composite_src_memcpy),
1860 PIXMAN_STD_FAST_PATH (SRC, a1r5g5b5, null, x1r5g5b5, fast_composite_src_memcpy),
1861 PIXMAN_STD_FAST_PATH (SRC, a8, null, a8, fast_composite_src_memcpy),
1862 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, fast_composite_src_x888_0565),
1863 PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, fast_composite_src_x888_0565),
1864 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, fast_composite_src_x888_0565),
1865 PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, fast_composite_src_x888_0565),
1866 PIXMAN_STD_FAST_PATH (IN, a8, null, a8, fast_composite_in_8_8),
1867 PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, fast_composite_in_n_8_8),
1869 #define SCALED_NEAREST_FLAGS \
1870 (FAST_PATH_SCALE_TRANSFORM | \
1871 FAST_PATH_NO_ALPHA_MAP | \
1872 FAST_PATH_NEAREST_FILTER | \
1873 FAST_PATH_NO_ACCESSORS | \
1874 FAST_PATH_NO_WIDE_FORMAT)
1876 #define HAS_NORMAL_REPEAT_FLAGS \
1877 (FAST_PATH_NO_REFLECT_REPEAT | \
1878 FAST_PATH_NO_PAD_REPEAT | \
1879 FAST_PATH_NO_NONE_REPEAT)
1881 #define SIMPLE_NEAREST_FAST_PATH(op,s,d,func) \
1882 { PIXMAN_OP_ ## op, \
1884 SCALED_NEAREST_FLAGS | HAS_NORMAL_REPEAT_FLAGS | FAST_PATH_16BIT_SAFE | FAST_PATH_X_UNIT_POSITIVE, \
1886 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1887 fast_composite_scaled_nearest_ ## func ## _normal ## _ ## op, \
1889 { PIXMAN_OP_ ## op, \
1891 SCALED_NEAREST_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP, \
1893 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1894 fast_composite_scaled_nearest_ ## func ## _none ## _ ## op, \
1896 SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, 8888_8888),
1897 SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, 8888_8888),
1898 SIMPLE_NEAREST_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8, 8888_8888),
1899 SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8, 8888_8888),
1901 SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, 8888_8888),
1902 SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8, 8888_8888),
1904 SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, r5g6b5, 8888_565),
1905 SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, r5g6b5, 8888_565),
1907 SIMPLE_NEAREST_FAST_PATH (SRC, r5g6b5, r5g6b5, 565_565),
1909 SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, 8888_8888),
1910 SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, 8888_8888),
1911 SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, 8888_8888),
1912 SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, 8888_8888),
1914 SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, r5g6b5, 8888_565),
1916 #define NEAREST_FAST_PATH(op,s,d) \
1917 { PIXMAN_OP_ ## op, \
1918 PIXMAN_ ## s, SCALED_NEAREST_FLAGS, \
1920 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1921 fast_composite_scaled_nearest, \
1924 NEAREST_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8),
1925 NEAREST_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8),
1926 NEAREST_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8),
1927 NEAREST_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8),
1929 NEAREST_FAST_PATH (SRC, x8r8g8b8, a8r8g8b8),
1930 NEAREST_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8),
1931 NEAREST_FAST_PATH (SRC, x8b8g8r8, a8b8g8r8),
1932 NEAREST_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8),
1934 NEAREST_FAST_PATH (OVER, x8r8g8b8, x8r8g8b8),
1935 NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8),
1936 NEAREST_FAST_PATH (OVER, x8b8g8r8, x8b8g8r8),
1937 NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8),
1939 NEAREST_FAST_PATH (OVER, x8r8g8b8, a8r8g8b8),
1940 NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8),
1941 NEAREST_FAST_PATH (OVER, x8b8g8r8, a8b8g8r8),
1942 NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8),
1948 pixman_fill8 (uint32_t *bits,
1956 int byte_stride = stride * (int) sizeof (uint32_t);
1957 uint8_t *dst = (uint8_t *) bits;
1958 uint8_t v = xor & 0xff;
1961 dst = dst + y * byte_stride + x;
1965 for (i = 0; i < width; ++i)
1973 pixman_fill16 (uint32_t *bits,
1982 (stride * (int)sizeof (uint32_t)) / (int)sizeof (uint16_t);
1983 uint16_t *dst = (uint16_t *)bits;
1984 uint16_t v = xor & 0xffff;
1987 dst = dst + y * short_stride + x;
1991 for (i = 0; i < width; ++i)
1994 dst += short_stride;
1999 pixman_fill32 (uint32_t *bits,
2009 bits = bits + y * stride + x;
2013 for (i = 0; i < width; ++i)
2020 static pixman_bool_t
2021 fast_path_fill (pixman_implementation_t *imp,
2034 pixman_fill8 (bits, stride, x, y, width, height, xor);
2038 pixman_fill16 (bits, stride, x, y, width, height, xor);
2042 pixman_fill32 (bits, stride, x, y, width, height, xor);
2046 return _pixman_implementation_fill (
2047 imp->delegate, bits, stride, bpp, x, y, width, height, xor);
2054 pixman_implementation_t *
2055 _pixman_implementation_create_fast_path (void)
2057 pixman_implementation_t *general = _pixman_implementation_create_general ();
2058 pixman_implementation_t *imp = _pixman_implementation_create (general, c_fast_paths);
2060 imp->fill = fast_path_fill;