2 * Copyright © 2004, 2005 Red Hat, Inc.
3 * Copyright © 2004 Nicholas Miell
4 * Copyright © 2005 Trolltech AS
6 * Permission to use, copy, modify, distribute, and sell this software and its
7 * documentation for any purpose is hereby granted without fee, provided that
8 * the above copyright notice appear in all copies and that both that
9 * copyright notice and this permission notice appear in supporting
10 * documentation, and that the name of Red Hat not be used in advertising or
11 * publicity pertaining to distribution of the software without specific,
12 * written prior permission. Red Hat makes no representations about the
13 * suitability of this software for any purpose. It is provided "as is"
14 * without express or implied warranty.
16 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
17 * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
18 * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
20 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
21 * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
22 * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
25 * Author: Søren Sandmann (sandmann@redhat.com)
26 * Minor Improvements: Nicholas Miell (nmiell@gmail.com)
27 * MMX code paths for fbcompose.c by Lars Knoll (lars@trolltech.com)
29 * Based on work by Owen Taylor
36 #if defined USE_X86_MMX || defined USE_ARM_IWMMXT
39 #include "pixman-private.h"
40 #include "pixman-combine32.h"
45 #define CHECKPOINT() error_f ("at %s %d\n", __FUNCTION__, __LINE__)
51 /* Empty the multimedia state. For some reason, ARM's mmintrin.h doesn't provide this. */
52 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
59 /* Notes about writing mmx code
61 * give memory operands as the second operand. If you give it as the
62 * first, gcc will first load it into a register, then use that
67 * _mm_mullo_pi16 (x, mmx_constant);
71 * _mm_mullo_pi16 (mmx_constant, x);
73 * Also try to minimize dependencies. i.e. when you need a value, try
74 * to calculate it from a value that was calculated as early as
78 /* --------------- MMX primitives ------------------------------------- */
81 typedef uint64_t mmxdatafield;
83 typedef __m64 mmxdatafield;
84 /* If __m64 is defined as a struct or union, define M64_MEMBER to be the
85 name of the member used to access the data */
87 # define M64_MEMBER m64_u64
88 # elif defined(__SUNPRO_C)
89 # define M64_MEMBER l_
95 mmxdatafield mmx_4x00ff;
96 mmxdatafield mmx_4x0080;
97 mmxdatafield mmx_565_rgb;
98 mmxdatafield mmx_565_unpack_multiplier;
99 mmxdatafield mmx_565_r;
100 mmxdatafield mmx_565_g;
101 mmxdatafield mmx_565_b;
102 mmxdatafield mmx_mask_0;
103 mmxdatafield mmx_mask_1;
104 mmxdatafield mmx_mask_2;
105 mmxdatafield mmx_mask_3;
106 mmxdatafield mmx_full_alpha;
107 mmxdatafield mmx_ffff0000ffff0000;
108 mmxdatafield mmx_0000ffff00000000;
109 mmxdatafield mmx_000000000000ffff;
112 #if defined(_MSC_VER)
113 # define MMXDATA_INIT(field, val) { val ## UI64 }
114 #elif defined(M64_MEMBER) /* __m64 is a struct, not an integral type */
115 # define MMXDATA_INIT(field, val) field = { val ## ULL }
116 #else /* __m64 is an integral type */
117 # define MMXDATA_INIT(field, val) field = val ## ULL
120 static const mmx_data_t c =
122 MMXDATA_INIT (.mmx_4x00ff, 0x00ff00ff00ff00ff),
123 MMXDATA_INIT (.mmx_4x0080, 0x0080008000800080),
124 MMXDATA_INIT (.mmx_565_rgb, 0x000001f0003f001f),
125 MMXDATA_INIT (.mmx_565_unpack_multiplier, 0x0000008404100840),
126 MMXDATA_INIT (.mmx_565_r, 0x000000f800000000),
127 MMXDATA_INIT (.mmx_565_g, 0x0000000000fc0000),
128 MMXDATA_INIT (.mmx_565_b, 0x00000000000000f8),
129 MMXDATA_INIT (.mmx_mask_0, 0xffffffffffff0000),
130 MMXDATA_INIT (.mmx_mask_1, 0xffffffff0000ffff),
131 MMXDATA_INIT (.mmx_mask_2, 0xffff0000ffffffff),
132 MMXDATA_INIT (.mmx_mask_3, 0x0000ffffffffffff),
133 MMXDATA_INIT (.mmx_full_alpha, 0x00ff000000000000),
134 MMXDATA_INIT (.mmx_ffff0000ffff0000, 0xffff0000ffff0000),
135 MMXDATA_INIT (.mmx_0000ffff00000000, 0x0000ffff00000000),
136 MMXDATA_INIT (.mmx_000000000000ffff, 0x000000000000ffff),
141 # define MC(x) to_m64 (c.mmx_ ## x)
143 # define MC(x) ((__m64)c.mmx_ ## x)
146 # define MC(x) c.mmx_ ## x
149 static force_inline __m64
153 return _mm_cvtsi64_m64 (x);
154 #elif defined M64_MEMBER /* __m64 is a struct, not an integral type */
159 #else /* __m64 is an integral type */
164 static force_inline uint64_t
168 return _mm_cvtm64_si64 (x);
169 #elif defined M64_MEMBER /* __m64 is a struct, not an integral type */
170 uint64_t res = x.M64_MEMBER;
172 #else /* __m64 is an integral type */
177 static force_inline __m64
182 return _mm_slli_si64 (v, s);
184 return _mm_srli_si64 (v, -s);
189 static force_inline __m64
192 return _mm_xor_si64 (mask, MC (4x00ff));
195 static force_inline __m64
196 pix_multiply (__m64 a, __m64 b)
200 res = _mm_mullo_pi16 (a, b);
201 res = _mm_adds_pu16 (res, MC (4x0080));
202 res = _mm_adds_pu16 (res, _mm_srli_pi16 (res, 8));
203 res = _mm_srli_pi16 (res, 8);
208 static force_inline __m64
209 pix_add (__m64 a, __m64 b)
211 return _mm_adds_pu8 (a, b);
214 static force_inline __m64
215 expand_alpha (__m64 pixel)
219 t1 = shift (pixel, -48);
221 t1 = _mm_or_si64 (t1, t2);
223 t1 = _mm_or_si64 (t1, t2);
228 static force_inline __m64
229 expand_alpha_rev (__m64 pixel)
233 /* move alpha to low 16 bits and zero the rest */
234 t1 = shift (pixel, 48);
235 t1 = shift (t1, -48);
238 t1 = _mm_or_si64 (t1, t2);
240 t1 = _mm_or_si64 (t1, t2);
245 static force_inline __m64
246 invert_colors (__m64 pixel)
252 x = _mm_and_si64 (x, MC (ffff0000ffff0000));
253 y = _mm_and_si64 (y, MC (000000000000ffff));
254 z = _mm_and_si64 (z, MC (0000ffff00000000));
259 x = _mm_or_si64 (x, y);
260 x = _mm_or_si64 (x, z);
265 static force_inline __m64
270 return _mm_adds_pu8 (src, pix_multiply (dest, negate (srca)));
273 static force_inline __m64
274 over_rev_non_pre (__m64 src, __m64 dest)
276 __m64 srca = expand_alpha (src);
277 __m64 srcfaaa = _mm_or_si64 (srca, MC (full_alpha));
279 return over (pix_multiply (invert_colors (src), srcfaaa), srca, dest);
282 static force_inline __m64
283 in (__m64 src, __m64 mask)
285 return pix_multiply (src, mask);
288 static force_inline __m64
289 in_over_full_src_alpha (__m64 src, __m64 mask, __m64 dest)
291 src = _mm_or_si64 (src, MC (full_alpha));
293 return over (in (src, mask), mask, dest);
297 static force_inline __m64
298 in_over (__m64 src, __m64 srca, __m64 mask, __m64 dest)
300 return over (in (src, mask), pix_multiply (srca, mask), dest);
305 #define in_over(src, srca, mask, dest) \
306 over (in (src, mask), pix_multiply (srca, mask), dest)
310 /* Elemental unaligned loads */
312 static __inline__ __m64 ldq_u(uint64_t *p)
315 /* x86's alignment restrictions are very relaxed. */
317 #elif defined USE_ARM_IWMMXT
318 int align = (uintptr_t)p & 7;
322 aligned_p = (__m64 *)((uintptr_t)p & ~7);
323 return (__m64) _mm_align_si64 (aligned_p[0], aligned_p[1], align);
325 struct __una_u64 { uint64_t x __attribute__((packed)); };
326 const struct __una_u64 *ptr = (const struct __una_u64 *) p;
327 return (__m64) ptr->x;
331 static __inline__ uint32_t ldl_u(uint32_t *p)
334 /* x86's alignment restrictions are very relaxed. */
337 struct __una_u32 { uint32_t x __attribute__((packed)); };
338 const struct __una_u32 *ptr = (const struct __una_u32 *) p;
343 static force_inline __m64
344 load8888 (uint32_t v)
346 return _mm_unpacklo_pi8 (_mm_cvtsi32_si64 (v), _mm_setzero_si64 ());
349 static force_inline __m64
350 pack8888 (__m64 lo, __m64 hi)
352 return _mm_packs_pu16 (lo, hi);
355 static force_inline uint32_t
358 return _mm_cvtsi64_si32 (pack8888 (v, _mm_setzero_si64 ()));
361 /* Expand 16 bits positioned at @pos (0-3) of a mmx register into
365 * --- Expanding 565 in the low word ---
367 * m = (m << (32 - 3)) | (m << (16 - 5)) | m;
368 * m = m & (01f0003f001f);
369 * m = m * (008404100840);
372 * Note the trick here - the top word is shifted by another nibble to
373 * avoid it bumping into the middle word
375 static force_inline __m64
376 expand565 (__m64 pixel, int pos)
381 /* move pixel to low 16 bit and zero the rest */
382 p = shift (shift (p, (3 - pos) * 16), -48);
384 t1 = shift (p, 36 - 11);
385 t2 = shift (p, 16 - 5);
387 p = _mm_or_si64 (t1, p);
388 p = _mm_or_si64 (t2, p);
389 p = _mm_and_si64 (p, MC (565_rgb));
391 pixel = _mm_mullo_pi16 (p, MC (565_unpack_multiplier));
392 return _mm_srli_pi16 (pixel, 8);
395 static force_inline __m64
396 expand8888 (__m64 in, int pos)
399 return _mm_unpacklo_pi8 (in, _mm_setzero_si64 ());
401 return _mm_unpackhi_pi8 (in, _mm_setzero_si64 ());
404 static force_inline __m64
405 expandx888 (__m64 in, int pos)
407 return _mm_or_si64 (expand8888 (in, pos), MC (full_alpha));
410 static force_inline __m64
411 pack_565 (__m64 pixel, __m64 target, int pos)
417 r = _mm_and_si64 (p, MC (565_r));
418 g = _mm_and_si64 (p, MC (565_g));
419 b = _mm_and_si64 (p, MC (565_b));
421 r = shift (r, -(32 - 8) + pos * 16);
422 g = shift (g, -(16 - 3) + pos * 16);
423 b = shift (b, -(0 + 3) + pos * 16);
426 t = _mm_and_si64 (t, MC (mask_0));
428 t = _mm_and_si64 (t, MC (mask_1));
430 t = _mm_and_si64 (t, MC (mask_2));
432 t = _mm_and_si64 (t, MC (mask_3));
434 p = _mm_or_si64 (r, t);
435 p = _mm_or_si64 (g, p);
437 return _mm_or_si64 (b, p);
442 static force_inline __m64
443 pix_add_mul (__m64 x, __m64 a, __m64 y, __m64 b)
445 x = pix_multiply (x, a);
446 y = pix_multiply (y, b);
448 return pix_add (x, y);
453 #define pix_add_mul(x, a, y, b) \
454 ( x = pix_multiply (x, a), \
455 y = pix_multiply (y, a), \
460 /* --------------- MMX code patch for fbcompose.c --------------------- */
462 static force_inline uint32_t
463 combine (const uint32_t *src, const uint32_t *mask)
465 uint32_t ssrc = *src;
469 __m64 m = load8888 (*mask);
470 __m64 s = load8888 (ssrc);
472 m = expand_alpha (m);
473 s = pix_multiply (s, m);
475 ssrc = store8888 (s);
482 mmx_combine_over_u (pixman_implementation_t *imp,
485 const uint32_t * src,
486 const uint32_t * mask,
489 const uint32_t *end = dest + width;
493 uint32_t ssrc = combine (src, mask);
494 uint32_t a = ssrc >> 24;
504 sa = expand_alpha (s);
505 *dest = store8888 (over (s, sa, load8888 (*dest)));
517 mmx_combine_over_reverse_u (pixman_implementation_t *imp,
520 const uint32_t * src,
521 const uint32_t * mask,
524 const uint32_t *end = dest + width;
529 uint32_t s = combine (src, mask);
531 d = load8888 (*dest);
532 da = expand_alpha (d);
533 *dest = store8888 (over (d, da, load8888 (s)));
544 mmx_combine_in_u (pixman_implementation_t *imp,
547 const uint32_t * src,
548 const uint32_t * mask,
551 const uint32_t *end = dest + width;
557 x = load8888 (combine (src, mask));
558 a = load8888 (*dest);
559 a = expand_alpha (a);
560 x = pix_multiply (x, a);
562 *dest = store8888 (x);
573 mmx_combine_in_reverse_u (pixman_implementation_t *imp,
576 const uint32_t * src,
577 const uint32_t * mask,
580 const uint32_t *end = dest + width;
586 x = load8888 (*dest);
587 a = load8888 (combine (src, mask));
588 a = expand_alpha (a);
589 x = pix_multiply (x, a);
590 *dest = store8888 (x);
601 mmx_combine_out_u (pixman_implementation_t *imp,
604 const uint32_t * src,
605 const uint32_t * mask,
608 const uint32_t *end = dest + width;
614 x = load8888 (combine (src, mask));
615 a = load8888 (*dest);
616 a = expand_alpha (a);
618 x = pix_multiply (x, a);
619 *dest = store8888 (x);
630 mmx_combine_out_reverse_u (pixman_implementation_t *imp,
633 const uint32_t * src,
634 const uint32_t * mask,
637 const uint32_t *end = dest + width;
643 x = load8888 (*dest);
644 a = load8888 (combine (src, mask));
645 a = expand_alpha (a);
647 x = pix_multiply (x, a);
649 *dest = store8888 (x);
660 mmx_combine_atop_u (pixman_implementation_t *imp,
663 const uint32_t * src,
664 const uint32_t * mask,
667 const uint32_t *end = dest + width;
673 s = load8888 (combine (src, mask));
674 d = load8888 (*dest);
675 sia = expand_alpha (s);
677 da = expand_alpha (d);
678 s = pix_add_mul (s, da, d, sia);
679 *dest = store8888 (s);
690 mmx_combine_atop_reverse_u (pixman_implementation_t *imp,
693 const uint32_t * src,
694 const uint32_t * mask,
705 s = load8888 (combine (src, mask));
706 d = load8888 (*dest);
707 sa = expand_alpha (s);
708 dia = expand_alpha (d);
710 s = pix_add_mul (s, dia, d, sa);
711 *dest = store8888 (s);
722 mmx_combine_xor_u (pixman_implementation_t *imp,
725 const uint32_t * src,
726 const uint32_t * mask,
729 const uint32_t *end = dest + width;
733 __m64 s, dia, d, sia;
735 s = load8888 (combine (src, mask));
736 d = load8888 (*dest);
737 sia = expand_alpha (s);
738 dia = expand_alpha (d);
741 s = pix_add_mul (s, dia, d, sia);
742 *dest = store8888 (s);
753 mmx_combine_add_u (pixman_implementation_t *imp,
756 const uint32_t * src,
757 const uint32_t * mask,
760 const uint32_t *end = dest + width;
766 s = load8888 (combine (src, mask));
767 d = load8888 (*dest);
769 *dest = store8888 (s);
780 mmx_combine_saturate_u (pixman_implementation_t *imp,
783 const uint32_t * src,
784 const uint32_t * mask,
787 const uint32_t *end = dest + width;
791 uint32_t s = combine (src, mask);
793 __m64 ms = load8888 (s);
794 __m64 md = load8888 (d);
795 uint32_t sa = s >> 24;
796 uint32_t da = ~d >> 24;
800 __m64 msa = load8888 (DIV_UN8 (da, sa) << 24);
801 msa = expand_alpha (msa);
802 ms = pix_multiply (ms, msa);
805 md = pix_add (md, ms);
806 *dest = store8888 (md);
817 mmx_combine_src_ca (pixman_implementation_t *imp,
820 const uint32_t * src,
821 const uint32_t * mask,
824 const uint32_t *end = src + width;
828 __m64 a = load8888 (*mask);
829 __m64 s = load8888 (*src);
831 s = pix_multiply (s, a);
832 *dest = store8888 (s);
842 mmx_combine_over_ca (pixman_implementation_t *imp,
845 const uint32_t * src,
846 const uint32_t * mask,
849 const uint32_t *end = src + width;
853 __m64 a = load8888 (*mask);
854 __m64 s = load8888 (*src);
855 __m64 d = load8888 (*dest);
856 __m64 sa = expand_alpha (s);
858 *dest = store8888 (in_over (s, sa, a, d));
868 mmx_combine_over_reverse_ca (pixman_implementation_t *imp,
871 const uint32_t * src,
872 const uint32_t * mask,
875 const uint32_t *end = src + width;
879 __m64 a = load8888 (*mask);
880 __m64 s = load8888 (*src);
881 __m64 d = load8888 (*dest);
882 __m64 da = expand_alpha (d);
884 *dest = store8888 (over (d, da, in (s, a)));
894 mmx_combine_in_ca (pixman_implementation_t *imp,
897 const uint32_t * src,
898 const uint32_t * mask,
901 const uint32_t *end = src + width;
905 __m64 a = load8888 (*mask);
906 __m64 s = load8888 (*src);
907 __m64 d = load8888 (*dest);
908 __m64 da = expand_alpha (d);
910 s = pix_multiply (s, a);
911 s = pix_multiply (s, da);
912 *dest = store8888 (s);
922 mmx_combine_in_reverse_ca (pixman_implementation_t *imp,
925 const uint32_t * src,
926 const uint32_t * mask,
929 const uint32_t *end = src + width;
933 __m64 a = load8888 (*mask);
934 __m64 s = load8888 (*src);
935 __m64 d = load8888 (*dest);
936 __m64 sa = expand_alpha (s);
938 a = pix_multiply (a, sa);
939 d = pix_multiply (d, a);
940 *dest = store8888 (d);
950 mmx_combine_out_ca (pixman_implementation_t *imp,
953 const uint32_t * src,
954 const uint32_t * mask,
957 const uint32_t *end = src + width;
961 __m64 a = load8888 (*mask);
962 __m64 s = load8888 (*src);
963 __m64 d = load8888 (*dest);
964 __m64 da = expand_alpha (d);
967 s = pix_multiply (s, a);
968 s = pix_multiply (s, da);
969 *dest = store8888 (s);
979 mmx_combine_out_reverse_ca (pixman_implementation_t *imp,
982 const uint32_t * src,
983 const uint32_t * mask,
986 const uint32_t *end = src + width;
990 __m64 a = load8888 (*mask);
991 __m64 s = load8888 (*src);
992 __m64 d = load8888 (*dest);
993 __m64 sa = expand_alpha (s);
995 a = pix_multiply (a, sa);
997 d = pix_multiply (d, a);
998 *dest = store8888 (d);
1008 mmx_combine_atop_ca (pixman_implementation_t *imp,
1011 const uint32_t * src,
1012 const uint32_t * mask,
1015 const uint32_t *end = src + width;
1019 __m64 a = load8888 (*mask);
1020 __m64 s = load8888 (*src);
1021 __m64 d = load8888 (*dest);
1022 __m64 da = expand_alpha (d);
1023 __m64 sa = expand_alpha (s);
1025 s = pix_multiply (s, a);
1026 a = pix_multiply (a, sa);
1028 d = pix_add_mul (d, a, s, da);
1029 *dest = store8888 (d);
1039 mmx_combine_atop_reverse_ca (pixman_implementation_t *imp,
1042 const uint32_t * src,
1043 const uint32_t * mask,
1046 const uint32_t *end = src + width;
1050 __m64 a = load8888 (*mask);
1051 __m64 s = load8888 (*src);
1052 __m64 d = load8888 (*dest);
1053 __m64 da = expand_alpha (d);
1054 __m64 sa = expand_alpha (s);
1056 s = pix_multiply (s, a);
1057 a = pix_multiply (a, sa);
1059 d = pix_add_mul (d, a, s, da);
1060 *dest = store8888 (d);
1070 mmx_combine_xor_ca (pixman_implementation_t *imp,
1073 const uint32_t * src,
1074 const uint32_t * mask,
1077 const uint32_t *end = src + width;
1081 __m64 a = load8888 (*mask);
1082 __m64 s = load8888 (*src);
1083 __m64 d = load8888 (*dest);
1084 __m64 da = expand_alpha (d);
1085 __m64 sa = expand_alpha (s);
1087 s = pix_multiply (s, a);
1088 a = pix_multiply (a, sa);
1091 d = pix_add_mul (d, a, s, da);
1092 *dest = store8888 (d);
1102 mmx_combine_add_ca (pixman_implementation_t *imp,
1105 const uint32_t * src,
1106 const uint32_t * mask,
1109 const uint32_t *end = src + width;
1113 __m64 a = load8888 (*mask);
1114 __m64 s = load8888 (*src);
1115 __m64 d = load8888 (*dest);
1117 s = pix_multiply (s, a);
1119 *dest = store8888 (d);
1128 /* ------------- MMX code paths called from fbpict.c -------------------- */
1131 mmx_composite_over_n_8888 (pixman_implementation_t *imp,
1132 pixman_composite_info_t *info)
1134 PIXMAN_COMPOSITE_ARGS (info);
1136 uint32_t *dst_line, *dst;
1143 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
1148 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
1150 vsrc = load8888 (src);
1151 vsrca = expand_alpha (vsrc);
1156 dst_line += dst_stride;
1161 while (w && (unsigned long)dst & 7)
1163 *dst = store8888 (over (vsrc, vsrca, load8888 (*dst)));
1174 vdest = *(__m64 *)dst;
1176 dest0 = over (vsrc, vsrca, expand8888 (vdest, 0));
1177 dest1 = over (vsrc, vsrca, expand8888 (vdest, 1));
1179 *(__m64 *)dst = pack8888 (dest0, dest1);
1189 *dst = store8888 (over (vsrc, vsrca, load8888 (*dst)));
1197 mmx_composite_over_n_0565 (pixman_implementation_t *imp,
1198 pixman_composite_info_t *info)
1200 PIXMAN_COMPOSITE_ARGS (info);
1202 uint16_t *dst_line, *dst;
1209 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
1214 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
1216 vsrc = load8888 (src);
1217 vsrca = expand_alpha (vsrc);
1222 dst_line += dst_stride;
1227 while (w && (unsigned long)dst & 7)
1230 __m64 vdest = expand565 (to_m64 (d), 0);
1232 vdest = pack_565 (over (vsrc, vsrca, vdest), vdest, 0);
1233 *dst = to_uint64 (vdest);
1243 vdest = *(__m64 *)dst;
1245 vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 0)), vdest, 0);
1246 vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 1)), vdest, 1);
1247 vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 2)), vdest, 2);
1248 vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 3)), vdest, 3);
1250 *(__m64 *)dst = vdest;
1261 __m64 vdest = expand565 (to_m64 (d), 0);
1263 vdest = pack_565 (over (vsrc, vsrca, vdest), vdest, 0);
1264 *dst = to_uint64 (vdest);
1275 mmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
1276 pixman_composite_info_t *info)
1278 PIXMAN_COMPOSITE_ARGS (info);
1281 uint32_t *mask_line;
1282 int dst_stride, mask_stride;
1287 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
1292 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
1293 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
1295 vsrc = load8888 (src);
1296 vsrca = expand_alpha (vsrc);
1301 uint32_t *p = (uint32_t *)mask_line;
1302 uint32_t *q = (uint32_t *)dst_line;
1304 while (twidth && (unsigned long)q & 7)
1306 uint32_t m = *(uint32_t *)p;
1310 __m64 vdest = load8888 (*q);
1311 vdest = in_over (vsrc, vsrca, load8888 (m), vdest);
1312 *q = store8888 (vdest);
1329 __m64 vdest = *(__m64 *)q;
1331 dest0 = in_over (vsrc, vsrca, load8888 (m0),
1332 expand8888 (vdest, 0));
1333 dest1 = in_over (vsrc, vsrca, load8888 (m1),
1334 expand8888 (vdest, 1));
1336 *(__m64 *)q = pack8888 (dest0, dest1);
1346 uint32_t m = *(uint32_t *)p;
1350 __m64 vdest = load8888 (*q);
1351 vdest = in_over (vsrc, vsrca, load8888 (m), vdest);
1352 *q = store8888 (vdest);
1360 dst_line += dst_stride;
1361 mask_line += mask_stride;
1368 mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp,
1369 pixman_composite_info_t *info)
1371 PIXMAN_COMPOSITE_ARGS (info);
1372 uint32_t *dst_line, *dst;
1373 uint32_t *src_line, *src;
1376 int dst_stride, src_stride;
1381 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
1382 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
1384 mask = _pixman_image_get_solid (imp, mask_image, dest_image->bits.format);
1386 mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
1387 vmask = load8888 (mask);
1392 dst_line += dst_stride;
1394 src_line += src_stride;
1397 while (w && (unsigned long)dst & 7)
1399 __m64 s = load8888 (*src);
1400 __m64 d = load8888 (*dst);
1402 *dst = store8888 (in_over (s, expand_alpha (s), vmask, d));
1411 __m64 vs = ldq_u((uint64_t *)src);
1412 __m64 vd = *(__m64 *)dst;
1413 __m64 vsrc0 = expand8888 (vs, 0);
1414 __m64 vsrc1 = expand8888 (vs, 1);
1416 *(__m64 *)dst = pack8888 (
1417 in_over (vsrc0, expand_alpha (vsrc0), vmask, expand8888 (vd, 0)),
1418 in_over (vsrc1, expand_alpha (vsrc1), vmask, expand8888 (vd, 1)));
1427 __m64 s = load8888 (*src);
1428 __m64 d = load8888 (*dst);
1430 *dst = store8888 (in_over (s, expand_alpha (s), vmask, d));
1438 mmx_composite_over_x888_n_8888 (pixman_implementation_t *imp,
1439 pixman_composite_info_t *info)
1441 PIXMAN_COMPOSITE_ARGS (info);
1442 uint32_t *dst_line, *dst;
1443 uint32_t *src_line, *src;
1446 int dst_stride, src_stride;
1452 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
1453 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
1454 mask = _pixman_image_get_solid (imp, mask_image, dest_image->bits.format);
1457 mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
1458 vmask = load8888 (mask);
1464 dst_line += dst_stride;
1466 src_line += src_stride;
1469 while (w && (unsigned long)dst & 7)
1471 __m64 s = load8888 (*src | 0xff000000);
1472 __m64 d = load8888 (*dst);
1474 *dst = store8888 (in_over (s, srca, vmask, d));
1483 __m64 vd0 = *(__m64 *)(dst + 0);
1484 __m64 vd1 = *(__m64 *)(dst + 2);
1485 __m64 vd2 = *(__m64 *)(dst + 4);
1486 __m64 vd3 = *(__m64 *)(dst + 6);
1487 __m64 vd4 = *(__m64 *)(dst + 8);
1488 __m64 vd5 = *(__m64 *)(dst + 10);
1489 __m64 vd6 = *(__m64 *)(dst + 12);
1490 __m64 vd7 = *(__m64 *)(dst + 14);
1492 __m64 vs0 = ldq_u((uint64_t *)(src + 0));
1493 __m64 vs1 = ldq_u((uint64_t *)(src + 2));
1494 __m64 vs2 = ldq_u((uint64_t *)(src + 4));
1495 __m64 vs3 = ldq_u((uint64_t *)(src + 6));
1496 __m64 vs4 = ldq_u((uint64_t *)(src + 8));
1497 __m64 vs5 = ldq_u((uint64_t *)(src + 10));
1498 __m64 vs6 = ldq_u((uint64_t *)(src + 12));
1499 __m64 vs7 = ldq_u((uint64_t *)(src + 14));
1502 in_over (expandx888 (vs0, 0), srca, vmask, expand8888 (vd0, 0)),
1503 in_over (expandx888 (vs0, 1), srca, vmask, expand8888 (vd0, 1)));
1506 in_over (expandx888 (vs1, 0), srca, vmask, expand8888 (vd1, 0)),
1507 in_over (expandx888 (vs1, 1), srca, vmask, expand8888 (vd1, 1)));
1510 in_over (expandx888 (vs2, 0), srca, vmask, expand8888 (vd2, 0)),
1511 in_over (expandx888 (vs2, 1), srca, vmask, expand8888 (vd2, 1)));
1514 in_over (expandx888 (vs3, 0), srca, vmask, expand8888 (vd3, 0)),
1515 in_over (expandx888 (vs3, 1), srca, vmask, expand8888 (vd3, 1)));
1518 in_over (expandx888 (vs4, 0), srca, vmask, expand8888 (vd4, 0)),
1519 in_over (expandx888 (vs4, 1), srca, vmask, expand8888 (vd4, 1)));
1522 in_over (expandx888 (vs5, 0), srca, vmask, expand8888 (vd5, 0)),
1523 in_over (expandx888 (vs5, 1), srca, vmask, expand8888 (vd5, 1)));
1526 in_over (expandx888 (vs6, 0), srca, vmask, expand8888 (vd6, 0)),
1527 in_over (expandx888 (vs6, 1), srca, vmask, expand8888 (vd6, 1)));
1530 in_over (expandx888 (vs7, 0), srca, vmask, expand8888 (vd7, 0)),
1531 in_over (expandx888 (vs7, 1), srca, vmask, expand8888 (vd7, 1)));
1533 *(__m64 *)(dst + 0) = vd0;
1534 *(__m64 *)(dst + 2) = vd1;
1535 *(__m64 *)(dst + 4) = vd2;
1536 *(__m64 *)(dst + 6) = vd3;
1537 *(__m64 *)(dst + 8) = vd4;
1538 *(__m64 *)(dst + 10) = vd5;
1539 *(__m64 *)(dst + 12) = vd6;
1540 *(__m64 *)(dst + 14) = vd7;
1549 __m64 s = load8888 (*src | 0xff000000);
1550 __m64 d = load8888 (*dst);
1552 *dst = store8888 (in_over (s, srca, vmask, d));
1564 mmx_composite_over_8888_8888 (pixman_implementation_t *imp,
1565 pixman_composite_info_t *info)
1567 PIXMAN_COMPOSITE_ARGS (info);
1568 uint32_t *dst_line, *dst;
1569 uint32_t *src_line, *src;
1571 int dst_stride, src_stride;
1577 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
1578 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
1583 dst_line += dst_stride;
1585 src_line += src_stride;
1601 sa = expand_alpha (ms);
1602 *dst = store8888 (over (ms, sa, load8888 (*dst)));
1612 mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
1613 pixman_composite_info_t *info)
1615 PIXMAN_COMPOSITE_ARGS (info);
1616 uint16_t *dst_line, *dst;
1617 uint32_t *src_line, *src;
1618 int dst_stride, src_stride;
1623 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
1624 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
1628 assert (src_image->drawable == mask_image->drawable);
1634 dst_line += dst_stride;
1636 src_line += src_stride;
1641 while (w && (unsigned long)dst & 7)
1643 __m64 vsrc = load8888 (*src);
1645 __m64 vdest = expand565 (to_m64 (d), 0);
1648 over (vsrc, expand_alpha (vsrc), vdest), vdest, 0);
1650 *dst = to_uint64 (vdest);
1661 __m64 vsrc0, vsrc1, vsrc2, vsrc3;
1664 vsrc0 = load8888 (*(src + 0));
1665 vsrc1 = load8888 (*(src + 1));
1666 vsrc2 = load8888 (*(src + 2));
1667 vsrc3 = load8888 (*(src + 3));
1669 vdest = *(__m64 *)dst;
1671 vdest = pack_565 (over (vsrc0, expand_alpha (vsrc0), expand565 (vdest, 0)), vdest, 0);
1672 vdest = pack_565 (over (vsrc1, expand_alpha (vsrc1), expand565 (vdest, 1)), vdest, 1);
1673 vdest = pack_565 (over (vsrc2, expand_alpha (vsrc2), expand565 (vdest, 2)), vdest, 2);
1674 vdest = pack_565 (over (vsrc3, expand_alpha (vsrc3), expand565 (vdest, 3)), vdest, 3);
1676 *(__m64 *)dst = vdest;
1687 __m64 vsrc = load8888 (*src);
1689 __m64 vdest = expand565 (to_m64 (d), 0);
1691 vdest = pack_565 (over (vsrc, expand_alpha (vsrc), vdest), vdest, 0);
1693 *dst = to_uint64 (vdest);
1705 mmx_composite_over_n_8_8888 (pixman_implementation_t *imp,
1706 pixman_composite_info_t *info)
1708 PIXMAN_COMPOSITE_ARGS (info);
1710 uint32_t *dst_line, *dst;
1711 uint8_t *mask_line, *mask;
1712 int dst_stride, mask_stride;
1719 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
1725 srcsrc = (uint64_t)src << 32 | src;
1727 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
1728 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
1730 vsrc = load8888 (src);
1731 vsrca = expand_alpha (vsrc);
1736 dst_line += dst_stride;
1738 mask_line += mask_stride;
1743 while (w && (unsigned long)dst & 7)
1749 __m64 vdest = in_over (vsrc, vsrca,
1750 expand_alpha_rev (to_m64 (m)),
1753 *dst = store8888 (vdest);
1770 if (srca == 0xff && (m0 & m1) == 0xff)
1772 *(uint64_t *)dst = srcsrc;
1779 vdest = *(__m64 *)dst;
1781 dest0 = in_over (vsrc, vsrca, expand_alpha_rev (to_m64 (m0)),
1782 expand8888 (vdest, 0));
1783 dest1 = in_over (vsrc, vsrca, expand_alpha_rev (to_m64 (m1)),
1784 expand8888 (vdest, 1));
1786 *(__m64 *)dst = pack8888 (dest0, dest1);
1802 __m64 vdest = load8888 (*dst);
1805 vsrc, vsrca, expand_alpha_rev (to_m64 (m)), vdest);
1806 *dst = store8888 (vdest);
1815 pixman_fill_mmx (uint32_t *bits,
1826 uint32_t byte_width;
1829 #if defined __GNUC__ && defined USE_X86_MMX
1830 __m64 v1, v2, v3, v4, v5, v6, v7;
1833 if (bpp != 16 && bpp != 32 && bpp != 8)
1838 stride = stride * (int) sizeof (uint32_t) / 1;
1839 byte_line = (uint8_t *)(((uint8_t *)bits) + stride * y + x);
1842 xor = (xor & 0xff) * 0x01010101;
1846 stride = stride * (int) sizeof (uint32_t) / 2;
1847 byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x);
1848 byte_width = 2 * width;
1850 xor = (xor & 0xffff) * 0x00010001;
1854 stride = stride * (int) sizeof (uint32_t) / 4;
1855 byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x);
1856 byte_width = 4 * width;
1860 fill = ((uint64_t)xor << 32) | xor;
1861 vfill = to_m64 (fill);
1863 #if defined __GNUC__ && defined USE_X86_MMX
1872 : "=&y" (v1), "=&y" (v2), "=&y" (v3),
1873 "=&y" (v4), "=&y" (v5), "=&y" (v6), "=y" (v7)
1880 uint8_t *d = byte_line;
1882 byte_line += stride;
1885 while (w >= 1 && ((unsigned long)d & 1))
1887 *(uint8_t *)d = (xor & 0xff);
1892 while (w >= 2 && ((unsigned long)d & 3))
1894 *(uint16_t *)d = xor;
1899 while (w >= 4 && ((unsigned long)d & 7))
1901 *(uint32_t *)d = xor;
1909 #if defined __GNUC__ && defined USE_X86_MMX
1921 "y" (vfill), "y" (v1), "y" (v2), "y" (v3),
1922 "y" (v4), "y" (v5), "y" (v6), "y" (v7)
1925 *(__m64*) (d + 0) = vfill;
1926 *(__m64*) (d + 8) = vfill;
1927 *(__m64*) (d + 16) = vfill;
1928 *(__m64*) (d + 24) = vfill;
1929 *(__m64*) (d + 32) = vfill;
1930 *(__m64*) (d + 40) = vfill;
1931 *(__m64*) (d + 48) = vfill;
1932 *(__m64*) (d + 56) = vfill;
1940 *(uint32_t *)d = xor;
1947 *(uint16_t *)d = xor;
1953 *(uint8_t *)d = (xor & 0xff);
1965 mmx_composite_src_n_8_8888 (pixman_implementation_t *imp,
1966 pixman_composite_info_t *info)
1968 PIXMAN_COMPOSITE_ARGS (info);
1970 uint32_t *dst_line, *dst;
1971 uint8_t *mask_line, *mask;
1972 int dst_stride, mask_stride;
1979 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
1984 pixman_fill_mmx (dest_image->bits.bits, dest_image->bits.rowstride,
1985 PIXMAN_FORMAT_BPP (dest_image->bits.format),
1986 dest_x, dest_y, width, height, 0);
1990 srcsrc = (uint64_t)src << 32 | src;
1992 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
1993 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
1995 vsrc = load8888 (src);
2000 dst_line += dst_stride;
2002 mask_line += mask_stride;
2007 while (w && (unsigned long)dst & 7)
2013 __m64 vdest = in (vsrc, expand_alpha_rev (to_m64 (m)));
2015 *dst = store8888 (vdest);
2035 if (srca == 0xff && (m0 & m1) == 0xff)
2037 *(uint64_t *)dst = srcsrc;
2043 dest0 = in (vsrc, expand_alpha_rev (to_m64 (m0)));
2044 dest1 = in (vsrc, expand_alpha_rev (to_m64 (m1)));
2046 *(__m64 *)dst = pack8888 (dest0, dest1);
2050 *(uint64_t *)dst = 0;
2066 __m64 vdest = load8888 (*dst);
2068 vdest = in (vsrc, expand_alpha_rev (to_m64 (m)));
2069 *dst = store8888 (vdest);
2082 mmx_composite_over_n_8_0565 (pixman_implementation_t *imp,
2083 pixman_composite_info_t *info)
2085 PIXMAN_COMPOSITE_ARGS (info);
2087 uint16_t *dst_line, *dst;
2088 uint8_t *mask_line, *mask;
2089 int dst_stride, mask_stride;
2091 __m64 vsrc, vsrca, tmp;
2092 uint64_t srcsrcsrcsrc, src16;
2096 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
2102 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
2103 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
2105 vsrc = load8888 (src);
2106 vsrca = expand_alpha (vsrc);
2108 tmp = pack_565 (vsrc, _mm_setzero_si64 (), 0);
2109 src16 = to_uint64 (tmp);
2112 (uint64_t)src16 << 48 | (uint64_t)src16 << 32 |
2113 (uint64_t)src16 << 16 | (uint64_t)src16;
2118 dst_line += dst_stride;
2120 mask_line += mask_stride;
2125 while (w && (unsigned long)dst & 7)
2132 __m64 vd = to_m64 (d);
2133 __m64 vdest = in_over (
2134 vsrc, vsrca, expand_alpha_rev (to_m64 (m)), expand565 (vd, 0));
2136 vd = pack_565 (vdest, _mm_setzero_si64 (), 0);
2137 *dst = to_uint64 (vd);
2149 uint64_t m0, m1, m2, m3;
2155 if (srca == 0xff && (m0 & m1 & m2 & m3) == 0xff)
2157 *(uint64_t *)dst = srcsrcsrcsrc;
2159 else if (m0 | m1 | m2 | m3)
2162 __m64 vm0, vm1, vm2, vm3;
2164 vdest = *(__m64 *)dst;
2167 vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm0),
2168 expand565 (vdest, 0)), vdest, 0);
2170 vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm1),
2171 expand565 (vdest, 1)), vdest, 1);
2173 vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm2),
2174 expand565 (vdest, 2)), vdest, 2);
2176 vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm3),
2177 expand565 (vdest, 3)), vdest, 3);
2179 *(__m64 *)dst = vdest;
2196 __m64 vd = to_m64 (d);
2197 __m64 vdest = in_over (vsrc, vsrca, expand_alpha_rev (to_m64 (m)),
2199 vd = pack_565 (vdest, _mm_setzero_si64 (), 0);
2200 *dst = to_uint64 (vd);
2213 mmx_composite_over_pixbuf_0565 (pixman_implementation_t *imp,
2214 pixman_composite_info_t *info)
2216 PIXMAN_COMPOSITE_ARGS (info);
2217 uint16_t *dst_line, *dst;
2218 uint32_t *src_line, *src;
2219 int dst_stride, src_stride;
2224 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
2225 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
2229 assert (src_image->drawable == mask_image->drawable);
2235 dst_line += dst_stride;
2237 src_line += src_stride;
2242 while (w && (unsigned long)dst & 7)
2244 __m64 vsrc = load8888 (*src);
2246 __m64 vdest = expand565 (to_m64 (d), 0);
2248 vdest = pack_565 (over_rev_non_pre (vsrc, vdest), vdest, 0);
2250 *dst = to_uint64 (vdest);
2261 uint32_t s0, s1, s2, s3;
2262 unsigned char a0, a1, a2, a3;
2274 if ((a0 & a1 & a2 & a3) == 0xFF)
2277 vdest = pack_565 (invert_colors (load8888 (s0)), _mm_setzero_si64 (), 0);
2278 vdest = pack_565 (invert_colors (load8888 (s1)), vdest, 1);
2279 vdest = pack_565 (invert_colors (load8888 (s2)), vdest, 2);
2280 vdest = pack_565 (invert_colors (load8888 (s3)), vdest, 3);
2282 *(__m64 *)dst = vdest;
2284 else if (s0 | s1 | s2 | s3)
2286 __m64 vdest = *(__m64 *)dst;
2288 vdest = pack_565 (over_rev_non_pre (load8888 (s0), expand565 (vdest, 0)), vdest, 0);
2289 vdest = pack_565 (over_rev_non_pre (load8888 (s1), expand565 (vdest, 1)), vdest, 1);
2290 vdest = pack_565 (over_rev_non_pre (load8888 (s2), expand565 (vdest, 2)), vdest, 2);
2291 vdest = pack_565 (over_rev_non_pre (load8888 (s3), expand565 (vdest, 3)), vdest, 3);
2293 *(__m64 *)dst = vdest;
2305 __m64 vsrc = load8888 (*src);
2307 __m64 vdest = expand565 (to_m64 (d), 0);
2309 vdest = pack_565 (over_rev_non_pre (vsrc, vdest), vdest, 0);
2311 *dst = to_uint64 (vdest);
2323 mmx_composite_over_pixbuf_8888 (pixman_implementation_t *imp,
2324 pixman_composite_info_t *info)
2326 PIXMAN_COMPOSITE_ARGS (info);
2327 uint32_t *dst_line, *dst;
2328 uint32_t *src_line, *src;
2329 int dst_stride, src_stride;
2334 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
2335 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
2339 assert (src_image->drawable == mask_image->drawable);
2345 dst_line += dst_stride;
2347 src_line += src_stride;
2350 while (w && (unsigned long)dst & 7)
2352 __m64 s = load8888 (*src);
2353 __m64 d = load8888 (*dst);
2355 *dst = store8888 (over_rev_non_pre (s, d));
2365 unsigned char a0, a1;
2374 if ((a0 & a1) == 0xFF)
2376 d0 = invert_colors (load8888 (s0));
2377 d1 = invert_colors (load8888 (s1));
2379 *(__m64 *)dst = pack8888 (d0, d1);
2383 __m64 vdest = *(__m64 *)dst;
2385 d0 = over_rev_non_pre (load8888 (s0), expand8888 (vdest, 0));
2386 d1 = over_rev_non_pre (load8888 (s1), expand8888 (vdest, 1));
2388 *(__m64 *)dst = pack8888 (d0, d1);
2398 __m64 s = load8888 (*src);
2399 __m64 d = load8888 (*dst);
2401 *dst = store8888 (over_rev_non_pre (s, d));
2409 mmx_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
2410 pixman_composite_info_t *info)
2412 PIXMAN_COMPOSITE_ARGS (info);
2415 uint32_t *mask_line;
2416 int dst_stride, mask_stride;
2421 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
2426 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
2427 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
2429 vsrc = load8888 (src);
2430 vsrca = expand_alpha (vsrc);
2435 uint32_t *p = (uint32_t *)mask_line;
2436 uint16_t *q = (uint16_t *)dst_line;
2438 while (twidth && ((unsigned long)q & 7))
2440 uint32_t m = *(uint32_t *)p;
2445 __m64 vdest = expand565 (to_m64 (d), 0);
2446 vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m), vdest), vdest, 0);
2447 *q = to_uint64 (vdest);
2457 uint32_t m0, m1, m2, m3;
2464 if ((m0 | m1 | m2 | m3))
2466 __m64 vdest = *(__m64 *)q;
2468 vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m0), expand565 (vdest, 0)), vdest, 0);
2469 vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m1), expand565 (vdest, 1)), vdest, 1);
2470 vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m2), expand565 (vdest, 2)), vdest, 2);
2471 vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m3), expand565 (vdest, 3)), vdest, 3);
2473 *(__m64 *)q = vdest;
2488 __m64 vdest = expand565 (to_m64 (d), 0);
2489 vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m), vdest), vdest, 0);
2490 *q = to_uint64 (vdest);
2498 mask_line += mask_stride;
2499 dst_line += dst_stride;
2506 mmx_composite_in_n_8_8 (pixman_implementation_t *imp,
2507 pixman_composite_info_t *info)
2509 PIXMAN_COMPOSITE_ARGS (info);
2510 uint8_t *dst_line, *dst;
2511 uint8_t *mask_line, *mask;
2512 int dst_stride, mask_stride;
2518 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
2519 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
2521 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
2525 vsrc = load8888 (src);
2526 vsrca = expand_alpha (vsrc);
2531 dst_line += dst_stride;
2533 mask_line += mask_stride;
2536 while (w && (unsigned long)dst & 7)
2545 m = MUL_UN8 (sa, a, tmp);
2546 d = MUL_UN8 (m, d, tmp);
2557 vmask = load8888 (ldl_u((uint32_t *)mask));
2558 vdest = load8888 (*(uint32_t *)dst);
2560 *(uint32_t *)dst = store8888 (in (in (vsrca, vmask), vdest));
2576 m = MUL_UN8 (sa, a, tmp);
2577 d = MUL_UN8 (m, d, tmp);
2587 mmx_composite_in_8_8 (pixman_implementation_t *imp,
2588 pixman_composite_info_t *info)
2590 PIXMAN_COMPOSITE_ARGS (info);
2591 uint8_t *dst_line, *dst;
2592 uint8_t *src_line, *src;
2593 int src_stride, dst_stride;
2596 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
2597 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
2602 dst_line += dst_stride;
2604 src_line += src_stride;
2607 while (w && (unsigned long)dst & 3)
2615 *dst = MUL_UN8 (s, d, tmp);
2624 uint32_t *s = (uint32_t *)src;
2625 uint32_t *d = (uint32_t *)dst;
2627 *d = store8888 (in (load8888 (ldl_u((uint32_t *)s)), load8888 (*d)));
2642 *dst = MUL_UN8 (s, d, tmp);
2653 mmx_composite_add_n_8_8 (pixman_implementation_t *imp,
2654 pixman_composite_info_t *info)
2656 PIXMAN_COMPOSITE_ARGS (info);
2657 uint8_t *dst_line, *dst;
2658 uint8_t *mask_line, *mask;
2659 int dst_stride, mask_stride;
2665 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
2666 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
2668 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
2675 vsrc = load8888 (src);
2676 vsrca = expand_alpha (vsrc);
2681 dst_line += dst_stride;
2683 mask_line += mask_stride;
2686 while (w && (unsigned long)dst & 3)
2696 m = MUL_UN8 (sa, a, tmp);
2697 r = ADD_UN8 (m, d, tmp);
2708 vmask = load8888 (ldl_u((uint32_t *)mask));
2709 vdest = load8888 (*(uint32_t *)dst);
2711 *(uint32_t *)dst = store8888 (_mm_adds_pu8 (in (vsrca, vmask), vdest));
2728 m = MUL_UN8 (sa, a, tmp);
2729 r = ADD_UN8 (m, d, tmp);
2739 mmx_composite_add_8_8 (pixman_implementation_t *imp,
2740 pixman_composite_info_t *info)
2742 PIXMAN_COMPOSITE_ARGS (info);
2743 uint8_t *dst_line, *dst;
2744 uint8_t *src_line, *src;
2745 int dst_stride, src_stride;
2752 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
2753 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
2758 dst_line += dst_stride;
2760 src_line += src_stride;
2763 while (w && (unsigned long)dst & 7)
2768 s = t | (0 - (t >> 8));
2778 *(__m64*)dst = _mm_adds_pu8 (ldq_u((uint64_t *)src), *(__m64*)dst);
2789 s = t | (0 - (t >> 8));
2802 mmx_composite_add_8888_8888 (pixman_implementation_t *imp,
2803 pixman_composite_info_t *info)
2805 PIXMAN_COMPOSITE_ARGS (info);
2807 uint32_t *dst_line, *dst;
2808 uint32_t *src_line, *src;
2809 int dst_stride, src_stride;
2814 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
2815 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
2820 dst_line += dst_stride;
2822 src_line += src_stride;
2825 while (w && (unsigned long)dst & 7)
2827 *dst = _mm_cvtsi64_si32 (_mm_adds_pu8 (_mm_cvtsi32_si64 (*src),
2828 _mm_cvtsi32_si64 (*dst)));
2836 dst64 = _mm_adds_pu8 (ldq_u((uint64_t *)src), *(__m64*)dst);
2837 *(uint64_t*)dst = to_uint64 (dst64);
2845 *dst = _mm_cvtsi64_si32 (_mm_adds_pu8 (_mm_cvtsi32_si64 (*src),
2846 _mm_cvtsi32_si64 (*dst)));
2854 static pixman_bool_t
2855 pixman_blt_mmx (uint32_t *src_bits,
2868 uint8_t * src_bytes;
2869 uint8_t * dst_bytes;
2872 if (src_bpp != dst_bpp)
2877 src_stride = src_stride * (int) sizeof (uint32_t) / 2;
2878 dst_stride = dst_stride * (int) sizeof (uint32_t) / 2;
2879 src_bytes = (uint8_t *)(((uint16_t *)src_bits) + src_stride * (src_y) + (src_x));
2880 dst_bytes = (uint8_t *)(((uint16_t *)dst_bits) + dst_stride * (dest_y) + (dest_x));
2881 byte_width = 2 * width;
2885 else if (src_bpp == 32)
2887 src_stride = src_stride * (int) sizeof (uint32_t) / 4;
2888 dst_stride = dst_stride * (int) sizeof (uint32_t) / 4;
2889 src_bytes = (uint8_t *)(((uint32_t *)src_bits) + src_stride * (src_y) + (src_x));
2890 dst_bytes = (uint8_t *)(((uint32_t *)dst_bits) + dst_stride * (dest_y) + (dest_x));
2891 byte_width = 4 * width;
2903 uint8_t *s = src_bytes;
2904 uint8_t *d = dst_bytes;
2905 src_bytes += src_stride;
2906 dst_bytes += dst_stride;
2909 while (w >= 1 && ((unsigned long)d & 1))
2911 *(uint8_t *)d = *(uint8_t *)s;
2917 while (w >= 2 && ((unsigned long)d & 3))
2919 *(uint16_t *)d = *(uint16_t *)s;
2925 while (w >= 4 && ((unsigned long)d & 7))
2927 *(uint32_t *)d = ldl_u((uint32_t *)s);
2936 #if (defined (__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))) && defined USE_X86_MMX
2938 "movq (%1), %%mm0\n"
2939 "movq 8(%1), %%mm1\n"
2940 "movq 16(%1), %%mm2\n"
2941 "movq 24(%1), %%mm3\n"
2942 "movq 32(%1), %%mm4\n"
2943 "movq 40(%1), %%mm5\n"
2944 "movq 48(%1), %%mm6\n"
2945 "movq 56(%1), %%mm7\n"
2947 "movq %%mm0, (%0)\n"
2948 "movq %%mm1, 8(%0)\n"
2949 "movq %%mm2, 16(%0)\n"
2950 "movq %%mm3, 24(%0)\n"
2951 "movq %%mm4, 32(%0)\n"
2952 "movq %%mm5, 40(%0)\n"
2953 "movq %%mm6, 48(%0)\n"
2954 "movq %%mm7, 56(%0)\n"
2958 "%mm0", "%mm1", "%mm2", "%mm3",
2959 "%mm4", "%mm5", "%mm6", "%mm7");
2961 __m64 v0 = ldq_u((uint64_t *)(s + 0));
2962 __m64 v1 = ldq_u((uint64_t *)(s + 8));
2963 __m64 v2 = ldq_u((uint64_t *)(s + 16));
2964 __m64 v3 = ldq_u((uint64_t *)(s + 24));
2965 __m64 v4 = ldq_u((uint64_t *)(s + 32));
2966 __m64 v5 = ldq_u((uint64_t *)(s + 40));
2967 __m64 v6 = ldq_u((uint64_t *)(s + 48));
2968 __m64 v7 = ldq_u((uint64_t *)(s + 56));
2969 *(__m64 *)(d + 0) = v0;
2970 *(__m64 *)(d + 8) = v1;
2971 *(__m64 *)(d + 16) = v2;
2972 *(__m64 *)(d + 24) = v3;
2973 *(__m64 *)(d + 32) = v4;
2974 *(__m64 *)(d + 40) = v5;
2975 *(__m64 *)(d + 48) = v6;
2976 *(__m64 *)(d + 56) = v7;
2985 *(uint32_t *)d = ldl_u((uint32_t *)s);
2993 *(uint16_t *)d = *(uint16_t *)s;
3006 mmx_composite_copy_area (pixman_implementation_t *imp,
3007 pixman_composite_info_t *info)
3009 PIXMAN_COMPOSITE_ARGS (info);
3011 pixman_blt_mmx (src_image->bits.bits,
3012 dest_image->bits.bits,
3013 src_image->bits.rowstride,
3014 dest_image->bits.rowstride,
3015 PIXMAN_FORMAT_BPP (src_image->bits.format),
3016 PIXMAN_FORMAT_BPP (dest_image->bits.format),
3017 src_x, src_y, dest_x, dest_y, width, height);
3022 mmx_composite_over_x888_8_8888 (pixman_implementation_t *imp,
3023 pixman_composite_info_t *info)
3025 PIXMAN_COMPOSITE_ARGS (info);
3026 uint32_t *src, *src_line;
3027 uint32_t *dst, *dst_line;
3028 uint8_t *mask, *mask_line;
3029 int src_stride, mask_stride, dst_stride;
3032 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3033 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
3034 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3039 src_line += src_stride;
3041 dst_line += dst_stride;
3043 mask_line += mask_stride;
3053 __m64 s = load8888 (*src | 0xff000000);
3057 *dst = store8888 (s);
3061 __m64 sa = expand_alpha (s);
3062 __m64 vm = expand_alpha_rev (to_m64 (m));
3063 __m64 vdest = in_over (s, sa, vm, load8888 (*dst));
3065 *dst = store8888 (vdest);
3079 static const pixman_fast_path_t mmx_fast_paths[] =
3081 PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, mmx_composite_over_n_8_0565 ),
3082 PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, mmx_composite_over_n_8_0565 ),
3083 PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, mmx_composite_over_n_8_8888 ),
3084 PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, mmx_composite_over_n_8_8888 ),
3085 PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, mmx_composite_over_n_8_8888 ),
3086 PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, mmx_composite_over_n_8_8888 ),
3087 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, mmx_composite_over_n_8888_8888_ca ),
3088 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, mmx_composite_over_n_8888_8888_ca ),
3089 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, mmx_composite_over_n_8888_0565_ca ),
3090 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, mmx_composite_over_n_8888_8888_ca ),
3091 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, mmx_composite_over_n_8888_8888_ca ),
3092 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, mmx_composite_over_n_8888_0565_ca ),
3093 PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, a8r8g8b8, mmx_composite_over_pixbuf_8888 ),
3094 PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, x8r8g8b8, mmx_composite_over_pixbuf_8888 ),
3095 PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, r5g6b5, mmx_composite_over_pixbuf_0565 ),
3096 PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, a8b8g8r8, mmx_composite_over_pixbuf_8888 ),
3097 PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, x8b8g8r8, mmx_composite_over_pixbuf_8888 ),
3098 PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, b5g6r5, mmx_composite_over_pixbuf_0565 ),
3099 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, a8r8g8b8, mmx_composite_over_x888_n_8888 ),
3100 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, x8r8g8b8, mmx_composite_over_x888_n_8888 ),
3101 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, a8b8g8r8, mmx_composite_over_x888_n_8888 ),
3102 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, x8b8g8r8, mmx_composite_over_x888_n_8888 ),
3103 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, mmx_composite_over_8888_n_8888 ),
3104 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, mmx_composite_over_8888_n_8888 ),
3105 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, a8b8g8r8, mmx_composite_over_8888_n_8888 ),
3106 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, x8b8g8r8, mmx_composite_over_8888_n_8888 ),
3108 /* FIXME: This code is commented out since it's apparently
3109 * not actually faster than the generic code.
3111 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, mmx_composite_over_x888_8_8888 ),
3112 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, mmx_composite_over_x888_8_8888 ),
3113 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, x8b8g8r8, mmx_composite_over_x888_8_8888 ),
3114 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, a8b8g8r8, mmx_composite_over_x888_8_8888 ),
3116 PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, mmx_composite_over_n_8888 ),
3117 PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, mmx_composite_over_n_8888 ),
3118 PIXMAN_STD_FAST_PATH (OVER, solid, null, r5g6b5, mmx_composite_over_n_0565 ),
3119 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, null, x8r8g8b8, mmx_composite_copy_area ),
3120 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, null, x8b8g8r8, mmx_composite_copy_area ),
3122 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, mmx_composite_over_8888_8888 ),
3123 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, mmx_composite_over_8888_8888 ),
3124 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, mmx_composite_over_8888_0565 ),
3125 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, mmx_composite_over_8888_8888 ),
3126 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, mmx_composite_over_8888_8888 ),
3127 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, mmx_composite_over_8888_0565 ),
3129 PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, mmx_composite_add_8888_8888 ),
3130 PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, mmx_composite_add_8888_8888 ),
3131 PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, mmx_composite_add_8_8 ),
3132 PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, mmx_composite_add_n_8_8 ),
3134 PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8r8g8b8, mmx_composite_src_n_8_8888 ),
3135 PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8r8g8b8, mmx_composite_src_n_8_8888 ),
3136 PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8b8g8r8, mmx_composite_src_n_8_8888 ),
3137 PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8b8g8r8, mmx_composite_src_n_8_8888 ),
3138 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, mmx_composite_copy_area ),
3139 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, mmx_composite_copy_area ),
3140 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, mmx_composite_copy_area ),
3141 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, mmx_composite_copy_area ),
3142 PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, mmx_composite_copy_area ),
3143 PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, mmx_composite_copy_area ),
3144 PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, mmx_composite_copy_area ),
3145 PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, mmx_composite_copy_area ),
3147 PIXMAN_STD_FAST_PATH (IN, a8, null, a8, mmx_composite_in_8_8 ),
3148 PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, mmx_composite_in_n_8_8 ),
3153 static pixman_bool_t
3154 mmx_blt (pixman_implementation_t *imp,
3155 uint32_t * src_bits,
3156 uint32_t * dst_bits,
3168 if (!pixman_blt_mmx (
3169 src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
3170 src_x, src_y, dest_x, dest_y, width, height))
3173 return _pixman_implementation_blt (
3175 src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
3176 src_x, src_y, dest_x, dest_y, width, height);
3182 static pixman_bool_t
3183 mmx_fill (pixman_implementation_t *imp,
3193 if (!pixman_fill_mmx (bits, stride, bpp, x, y, width, height, xor))
3195 return _pixman_implementation_fill (
3196 imp->delegate, bits, stride, bpp, x, y, width, height, xor);
3202 pixman_implementation_t *
3203 _pixman_implementation_create_mmx (pixman_implementation_t *fallback)
3205 pixman_implementation_t *imp = _pixman_implementation_create (fallback, mmx_fast_paths);
3207 imp->combine_32[PIXMAN_OP_OVER] = mmx_combine_over_u;
3208 imp->combine_32[PIXMAN_OP_OVER_REVERSE] = mmx_combine_over_reverse_u;
3209 imp->combine_32[PIXMAN_OP_IN] = mmx_combine_in_u;
3210 imp->combine_32[PIXMAN_OP_IN_REVERSE] = mmx_combine_in_reverse_u;
3211 imp->combine_32[PIXMAN_OP_OUT] = mmx_combine_out_u;
3212 imp->combine_32[PIXMAN_OP_OUT_REVERSE] = mmx_combine_out_reverse_u;
3213 imp->combine_32[PIXMAN_OP_ATOP] = mmx_combine_atop_u;
3214 imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = mmx_combine_atop_reverse_u;
3215 imp->combine_32[PIXMAN_OP_XOR] = mmx_combine_xor_u;
3216 imp->combine_32[PIXMAN_OP_ADD] = mmx_combine_add_u;
3217 imp->combine_32[PIXMAN_OP_SATURATE] = mmx_combine_saturate_u;
3219 imp->combine_32_ca[PIXMAN_OP_SRC] = mmx_combine_src_ca;
3220 imp->combine_32_ca[PIXMAN_OP_OVER] = mmx_combine_over_ca;
3221 imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = mmx_combine_over_reverse_ca;
3222 imp->combine_32_ca[PIXMAN_OP_IN] = mmx_combine_in_ca;
3223 imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = mmx_combine_in_reverse_ca;
3224 imp->combine_32_ca[PIXMAN_OP_OUT] = mmx_combine_out_ca;
3225 imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = mmx_combine_out_reverse_ca;
3226 imp->combine_32_ca[PIXMAN_OP_ATOP] = mmx_combine_atop_ca;
3227 imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = mmx_combine_atop_reverse_ca;
3228 imp->combine_32_ca[PIXMAN_OP_XOR] = mmx_combine_xor_ca;
3229 imp->combine_32_ca[PIXMAN_OP_ADD] = mmx_combine_add_ca;
3232 imp->fill = mmx_fill;
3237 #endif /* USE_X86_MMX || USE_ARM_IWMMXT */