2 * Copyright © 2004, 2005 Red Hat, Inc.
3 * Copyright © 2004 Nicholas Miell
4 * Copyright © 2005 Trolltech AS
6 * Permission to use, copy, modify, distribute, and sell this software and its
7 * documentation for any purpose is hereby granted without fee, provided that
8 * the above copyright notice appear in all copies and that both that
9 * copyright notice and this permission notice appear in supporting
10 * documentation, and that the name of Red Hat not be used in advertising or
11 * publicity pertaining to distribution of the software without specific,
12 * written prior permission. Red Hat makes no representations about the
13 * suitability of this software for any purpose. It is provided "as is"
14 * without express or implied warranty.
16 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
17 * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
18 * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
20 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
21 * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
22 * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
25 * Author: Søren Sandmann (sandmann@redhat.com)
26 * Minor Improvements: Nicholas Miell (nmiell@gmail.com)
27 * MMX code paths for fbcompose.c by Lars Knoll (lars@trolltech.com)
29 * Based on work by Owen Taylor
36 #if defined USE_X86_MMX || defined USE_ARM_IWMMXT
39 #include "pixman-private.h"
40 #include "pixman-combine32.h"
45 #define CHECKPOINT() error_f ("at %s %d\n", __FUNCTION__, __LINE__)
51 /* Empty the multimedia state. For some reason, ARM's mmintrin.h doesn't provide this. */
52 extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
60 # if (defined(__SUNPRO_C) || defined(_MSC_VER))
61 # include <xmmintrin.h>
63 /* We have to compile with -msse to use xmmintrin.h, but that causes SSE
64 * instructions to be generated that we don't want. Just duplicate the
65 * functions we want to use. */
66 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
67 _mm_mulhi_pu16 (__m64 __A, __m64 __B)
69 asm ("pmulhuw %1, %0\n\t"
77 extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
78 _mm_shuffle_pi16 (__m64 __A, int8_t const __N)
82 asm ("pshufw %2, %1, %0\n\t"
84 : "y" (__A), "K" (__N)
90 # define _mm_shuffle_pi16(A, N) \
91 ((__m64) __builtin_ia32_pshufw ((__v4hi)(__m64)(A), (int)(N)))
97 #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
98 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
101 /* Notes about writing mmx code
103 * give memory operands as the second operand. If you give it as the
104 * first, gcc will first load it into a register, then use that
109 * _mm_mullo_pi16 (x, mmx_constant);
113 * _mm_mullo_pi16 (mmx_constant, x);
115 * Also try to minimize dependencies. i.e. when you need a value, try
116 * to calculate it from a value that was calculated as early as
120 /* --------------- MMX primitives ------------------------------------- */
122 /* If __m64 is defined as a struct or union, then define M64_MEMBER to be
123 * the name of the member used to access the data.
124 * If __m64 requires using mm_cvt* intrinsics functions to convert between
125 * uint64_t and __m64 values, then define USE_CVT_INTRINSICS.
126 * If __m64 and uint64_t values can just be cast to each other directly,
127 * then define USE_M64_CASTS.
130 # define M64_MEMBER m64_u64
132 # define USE_CVT_INTRINSICS
133 #elif defined(__GNUC__)
134 # define USE_M64_CASTS
135 #elif defined(__SUNPRO_C)
136 # if (__SUNPRO_C >= 0x5120) && !defined(__NOVECTORSIZE__)
137 /* Solaris Studio 12.3 (Sun C 5.12) introduces __attribute__(__vector_size__)
138 * support, and defaults to using it to define __m64, unless __NOVECTORSIZE__
139 * is defined. If it is used, then the mm_cvt* intrinsics must be used.
141 # define USE_CVT_INTRINSICS
143 /* For Studio 12.2 or older, or when __attribute__(__vector_size__) is
144 * disabled, __m64 is defined as a struct containing "unsigned long long l_".
146 # define M64_MEMBER l_
150 #if defined(USE_M64_CASTS) || defined(USE_CVT_INTRINSICS)
151 typedef uint64_t mmxdatafield;
153 typedef __m64 mmxdatafield;
158 mmxdatafield mmx_4x00ff;
159 mmxdatafield mmx_4x0080;
160 mmxdatafield mmx_565_rgb;
161 mmxdatafield mmx_565_unpack_multiplier;
162 mmxdatafield mmx_565_r;
163 mmxdatafield mmx_565_g;
164 mmxdatafield mmx_565_b;
165 mmxdatafield mmx_mask_0;
166 mmxdatafield mmx_mask_1;
167 mmxdatafield mmx_mask_2;
168 mmxdatafield mmx_mask_3;
169 mmxdatafield mmx_full_alpha;
170 mmxdatafield mmx_4x0101;
173 #if defined(_MSC_VER)
174 # define MMXDATA_INIT(field, val) { val ## UI64 }
175 #elif defined(M64_MEMBER) /* __m64 is a struct, not an integral type */
176 # define MMXDATA_INIT(field, val) field = { val ## ULL }
177 #else /* mmxdatafield is an integral type */
178 # define MMXDATA_INIT(field, val) field = val ## ULL
181 static const mmx_data_t c =
183 MMXDATA_INIT (.mmx_4x00ff, 0x00ff00ff00ff00ff),
184 MMXDATA_INIT (.mmx_4x0080, 0x0080008000800080),
185 MMXDATA_INIT (.mmx_565_rgb, 0x000001f0003f001f),
186 MMXDATA_INIT (.mmx_565_unpack_multiplier, 0x0000008404100840),
187 MMXDATA_INIT (.mmx_565_r, 0x000000f800000000),
188 MMXDATA_INIT (.mmx_565_g, 0x0000000000fc0000),
189 MMXDATA_INIT (.mmx_565_b, 0x00000000000000f8),
190 MMXDATA_INIT (.mmx_mask_0, 0xffffffffffff0000),
191 MMXDATA_INIT (.mmx_mask_1, 0xffffffff0000ffff),
192 MMXDATA_INIT (.mmx_mask_2, 0xffff0000ffffffff),
193 MMXDATA_INIT (.mmx_mask_3, 0x0000ffffffffffff),
194 MMXDATA_INIT (.mmx_full_alpha, 0x00ff000000000000),
195 MMXDATA_INIT (.mmx_4x0101, 0x0101010101010101),
198 #ifdef USE_CVT_INTRINSICS
199 # define MC(x) to_m64 (c.mmx_ ## x)
200 #elif defined(USE_M64_CASTS)
201 # define MC(x) ((__m64)c.mmx_ ## x)
203 # define MC(x) c.mmx_ ## x
206 static force_inline __m64
209 #ifdef USE_CVT_INTRINSICS
210 return _mm_cvtsi64_m64 (x);
211 #elif defined M64_MEMBER /* __m64 is a struct, not an integral type */
216 #else /* USE_M64_CASTS */
221 static force_inline uint64_t
224 #ifdef USE_CVT_INTRINSICS
225 return _mm_cvtm64_si64 (x);
226 #elif defined M64_MEMBER /* __m64 is a struct, not an integral type */
227 uint64_t res = x.M64_MEMBER;
229 #else /* USE_M64_CASTS */
234 static force_inline __m64
239 return _mm_slli_si64 (v, s);
241 return _mm_srli_si64 (v, -s);
246 static force_inline __m64
249 return _mm_xor_si64 (mask, MC (4x00ff));
252 static force_inline __m64
253 pix_multiply (__m64 a, __m64 b)
257 res = _mm_mullo_pi16 (a, b);
258 res = _mm_adds_pu16 (res, MC (4x0080));
259 res = _mm_mulhi_pu16 (res, MC (4x0101));
264 static force_inline __m64
265 pix_add (__m64 a, __m64 b)
267 return _mm_adds_pu8 (a, b);
270 static force_inline __m64
271 expand_alpha (__m64 pixel)
273 return _mm_shuffle_pi16 (pixel, _MM_SHUFFLE (3, 3, 3, 3));
276 static force_inline __m64
277 expand_alpha_rev (__m64 pixel)
279 return _mm_shuffle_pi16 (pixel, _MM_SHUFFLE (0, 0, 0, 0));
282 static force_inline __m64
283 invert_colors (__m64 pixel)
285 return _mm_shuffle_pi16 (pixel, _MM_SHUFFLE (3, 0, 1, 2));
288 static force_inline __m64
293 return _mm_adds_pu8 (src, pix_multiply (dest, negate (srca)));
296 static force_inline __m64
297 over_rev_non_pre (__m64 src, __m64 dest)
299 __m64 srca = expand_alpha (src);
300 __m64 srcfaaa = _mm_or_si64 (srca, MC (full_alpha));
302 return over (pix_multiply (invert_colors (src), srcfaaa), srca, dest);
305 static force_inline __m64
306 in (__m64 src, __m64 mask)
308 return pix_multiply (src, mask);
312 static force_inline __m64
313 in_over (__m64 src, __m64 srca, __m64 mask, __m64 dest)
315 return over (in (src, mask), pix_multiply (srca, mask), dest);
320 #define in_over(src, srca, mask, dest) \
321 over (in (src, mask), pix_multiply (srca, mask), dest)
325 /* Elemental unaligned loads */
327 static force_inline __m64 ldq_u(uint64_t *p)
330 /* x86's alignment restrictions are very relaxed. */
332 #elif defined USE_ARM_IWMMXT
333 int align = (uintptr_t)p & 7;
337 aligned_p = (__m64 *)((uintptr_t)p & ~7);
338 return (__m64) _mm_align_si64 (aligned_p[0], aligned_p[1], align);
340 struct __una_u64 { uint64_t x __attribute__((packed)); };
341 const struct __una_u64 *ptr = (const struct __una_u64 *) p;
342 return (__m64) ptr->x;
346 static force_inline uint32_t ldl_u(const uint32_t *p)
349 /* x86's alignment restrictions are very relaxed. */
352 struct __una_u32 { uint32_t x __attribute__((packed)); };
353 const struct __una_u32 *ptr = (const struct __una_u32 *) p;
358 static force_inline __m64
359 load8888 (const uint32_t *v)
361 return _mm_unpacklo_pi8 (_mm_cvtsi32_si64 (*v), _mm_setzero_si64 ());
364 static force_inline __m64
365 load8888u (const uint32_t *v)
367 uint32_t l = ldl_u(v);
371 static force_inline __m64
372 pack8888 (__m64 lo, __m64 hi)
374 return _mm_packs_pu16 (lo, hi);
377 static force_inline void
378 store (uint32_t *dest, __m64 v)
380 *dest = _mm_cvtsi64_si32 (v);
383 static force_inline void
384 store8888 (uint32_t *dest, __m64 v)
386 v = pack8888 (v, _mm_setzero_si64());
390 /* Expand 16 bits positioned at @pos (0-3) of a mmx register into
394 * --- Expanding 565 in the low word ---
396 * m = (m << (32 - 3)) | (m << (16 - 5)) | m;
397 * m = m & (01f0003f001f);
398 * m = m * (008404100840);
401 * Note the trick here - the top word is shifted by another nibble to
402 * avoid it bumping into the middle word
404 static force_inline __m64
405 expand565 (__m64 pixel, int pos)
410 /* move pixel to low 16 bit and zero the rest */
411 p = shift (shift (p, (3 - pos) * 16), -48);
413 t1 = shift (p, 36 - 11);
414 t2 = shift (p, 16 - 5);
416 p = _mm_or_si64 (t1, p);
417 p = _mm_or_si64 (t2, p);
418 p = _mm_and_si64 (p, MC (565_rgb));
420 pixel = _mm_mullo_pi16 (p, MC (565_unpack_multiplier));
421 return _mm_srli_pi16 (pixel, 8);
424 static force_inline __m64
425 expand8888 (__m64 in, int pos)
428 return _mm_unpacklo_pi8 (in, _mm_setzero_si64 ());
430 return _mm_unpackhi_pi8 (in, _mm_setzero_si64 ());
433 static force_inline __m64
434 expandx888 (__m64 in, int pos)
436 return _mm_or_si64 (expand8888 (in, pos), MC (full_alpha));
439 static force_inline __m64
440 pack_565 (__m64 pixel, __m64 target, int pos)
446 r = _mm_and_si64 (p, MC (565_r));
447 g = _mm_and_si64 (p, MC (565_g));
448 b = _mm_and_si64 (p, MC (565_b));
450 r = shift (r, -(32 - 8) + pos * 16);
451 g = shift (g, -(16 - 3) + pos * 16);
452 b = shift (b, -(0 + 3) + pos * 16);
455 t = _mm_and_si64 (t, MC (mask_0));
457 t = _mm_and_si64 (t, MC (mask_1));
459 t = _mm_and_si64 (t, MC (mask_2));
461 t = _mm_and_si64 (t, MC (mask_3));
463 p = _mm_or_si64 (r, t);
464 p = _mm_or_si64 (g, p);
466 return _mm_or_si64 (b, p);
471 static force_inline __m64
472 pix_add_mul (__m64 x, __m64 a, __m64 y, __m64 b)
474 x = pix_multiply (x, a);
475 y = pix_multiply (y, b);
477 return pix_add (x, y);
482 #define pix_add_mul(x, a, y, b) \
483 ( x = pix_multiply (x, a), \
484 y = pix_multiply (y, b), \
489 /* --------------- MMX code patch for fbcompose.c --------------------- */
491 static force_inline uint32_t
492 combine (const uint32_t *src, const uint32_t *mask)
494 uint32_t ssrc = *src;
498 __m64 m = load8888 (mask);
499 __m64 s = load8888 (&ssrc);
501 m = expand_alpha (m);
502 s = pix_multiply (s, m);
504 store8888 (&ssrc, s);
511 mmx_combine_over_u (pixman_implementation_t *imp,
514 const uint32_t * src,
515 const uint32_t * mask,
518 const uint32_t *end = dest + width;
522 uint32_t ssrc = combine (src, mask);
523 uint32_t a = ssrc >> 24;
532 s = load8888 (&ssrc);
533 sa = expand_alpha (s);
534 store8888 (dest, over (s, sa, load8888 (dest)));
546 mmx_combine_over_reverse_u (pixman_implementation_t *imp,
549 const uint32_t * src,
550 const uint32_t * mask,
553 const uint32_t *end = dest + width;
558 uint32_t s = combine (src, mask);
561 da = expand_alpha (d);
562 store8888 (dest, over (d, da, load8888 (&s)));
573 mmx_combine_in_u (pixman_implementation_t *imp,
576 const uint32_t * src,
577 const uint32_t * mask,
580 const uint32_t *end = dest + width;
585 uint32_t ssrc = combine (src, mask);
587 x = load8888 (&ssrc);
589 a = expand_alpha (a);
590 x = pix_multiply (x, a);
603 mmx_combine_in_reverse_u (pixman_implementation_t *imp,
606 const uint32_t * src,
607 const uint32_t * mask,
610 const uint32_t *end = dest + width;
615 uint32_t ssrc = combine (src, mask);
618 a = load8888 (&ssrc);
619 a = expand_alpha (a);
620 x = pix_multiply (x, a);
632 mmx_combine_out_u (pixman_implementation_t *imp,
635 const uint32_t * src,
636 const uint32_t * mask,
639 const uint32_t *end = dest + width;
644 uint32_t ssrc = combine (src, mask);
646 x = load8888 (&ssrc);
648 a = expand_alpha (a);
650 x = pix_multiply (x, a);
662 mmx_combine_out_reverse_u (pixman_implementation_t *imp,
665 const uint32_t * src,
666 const uint32_t * mask,
669 const uint32_t *end = dest + width;
674 uint32_t ssrc = combine (src, mask);
677 a = load8888 (&ssrc);
678 a = expand_alpha (a);
680 x = pix_multiply (x, a);
693 mmx_combine_atop_u (pixman_implementation_t *imp,
696 const uint32_t * src,
697 const uint32_t * mask,
700 const uint32_t *end = dest + width;
705 uint32_t ssrc = combine (src, mask);
707 s = load8888 (&ssrc);
709 sia = expand_alpha (s);
711 da = expand_alpha (d);
712 s = pix_add_mul (s, da, d, sia);
724 mmx_combine_atop_reverse_u (pixman_implementation_t *imp,
727 const uint32_t * src,
728 const uint32_t * mask,
738 uint32_t ssrc = combine (src, mask);
740 s = load8888 (&ssrc);
742 sa = expand_alpha (s);
743 dia = expand_alpha (d);
745 s = pix_add_mul (s, dia, d, sa);
757 mmx_combine_xor_u (pixman_implementation_t *imp,
760 const uint32_t * src,
761 const uint32_t * mask,
764 const uint32_t *end = dest + width;
768 __m64 s, dia, d, sia;
769 uint32_t ssrc = combine (src, mask);
771 s = load8888 (&ssrc);
773 sia = expand_alpha (s);
774 dia = expand_alpha (d);
777 s = pix_add_mul (s, dia, d, sia);
789 mmx_combine_add_u (pixman_implementation_t *imp,
792 const uint32_t * src,
793 const uint32_t * mask,
796 const uint32_t *end = dest + width;
801 uint32_t ssrc = combine (src, mask);
803 s = load8888 (&ssrc);
817 mmx_combine_saturate_u (pixman_implementation_t *imp,
820 const uint32_t * src,
821 const uint32_t * mask,
824 const uint32_t *end = dest + width;
828 uint32_t s = combine (src, mask);
830 __m64 ms = load8888 (&s);
831 __m64 md = load8888 (&d);
832 uint32_t sa = s >> 24;
833 uint32_t da = ~d >> 24;
837 uint32_t quot = DIV_UN8 (da, sa) << 24;
838 __m64 msa = load8888 (");
839 msa = expand_alpha (msa);
840 ms = pix_multiply (ms, msa);
843 md = pix_add (md, ms);
844 store8888 (dest, md);
855 mmx_combine_src_ca (pixman_implementation_t *imp,
858 const uint32_t * src,
859 const uint32_t * mask,
862 const uint32_t *end = src + width;
866 __m64 a = load8888 (mask);
867 __m64 s = load8888 (src);
869 s = pix_multiply (s, a);
880 mmx_combine_over_ca (pixman_implementation_t *imp,
883 const uint32_t * src,
884 const uint32_t * mask,
887 const uint32_t *end = src + width;
891 __m64 a = load8888 (mask);
892 __m64 s = load8888 (src);
893 __m64 d = load8888 (dest);
894 __m64 sa = expand_alpha (s);
896 store8888 (dest, in_over (s, sa, a, d));
906 mmx_combine_over_reverse_ca (pixman_implementation_t *imp,
909 const uint32_t * src,
910 const uint32_t * mask,
913 const uint32_t *end = src + width;
917 __m64 a = load8888 (mask);
918 __m64 s = load8888 (src);
919 __m64 d = load8888 (dest);
920 __m64 da = expand_alpha (d);
922 store8888 (dest, over (d, da, in (s, a)));
932 mmx_combine_in_ca (pixman_implementation_t *imp,
935 const uint32_t * src,
936 const uint32_t * mask,
939 const uint32_t *end = src + width;
943 __m64 a = load8888 (mask);
944 __m64 s = load8888 (src);
945 __m64 d = load8888 (dest);
946 __m64 da = expand_alpha (d);
948 s = pix_multiply (s, a);
949 s = pix_multiply (s, da);
960 mmx_combine_in_reverse_ca (pixman_implementation_t *imp,
963 const uint32_t * src,
964 const uint32_t * mask,
967 const uint32_t *end = src + width;
971 __m64 a = load8888 (mask);
972 __m64 s = load8888 (src);
973 __m64 d = load8888 (dest);
974 __m64 sa = expand_alpha (s);
976 a = pix_multiply (a, sa);
977 d = pix_multiply (d, a);
988 mmx_combine_out_ca (pixman_implementation_t *imp,
991 const uint32_t * src,
992 const uint32_t * mask,
995 const uint32_t *end = src + width;
999 __m64 a = load8888 (mask);
1000 __m64 s = load8888 (src);
1001 __m64 d = load8888 (dest);
1002 __m64 da = expand_alpha (d);
1005 s = pix_multiply (s, a);
1006 s = pix_multiply (s, da);
1007 store8888 (dest, s);
1017 mmx_combine_out_reverse_ca (pixman_implementation_t *imp,
1020 const uint32_t * src,
1021 const uint32_t * mask,
1024 const uint32_t *end = src + width;
1028 __m64 a = load8888 (mask);
1029 __m64 s = load8888 (src);
1030 __m64 d = load8888 (dest);
1031 __m64 sa = expand_alpha (s);
1033 a = pix_multiply (a, sa);
1035 d = pix_multiply (d, a);
1036 store8888 (dest, d);
1046 mmx_combine_atop_ca (pixman_implementation_t *imp,
1049 const uint32_t * src,
1050 const uint32_t * mask,
1053 const uint32_t *end = src + width;
1057 __m64 a = load8888 (mask);
1058 __m64 s = load8888 (src);
1059 __m64 d = load8888 (dest);
1060 __m64 da = expand_alpha (d);
1061 __m64 sa = expand_alpha (s);
1063 s = pix_multiply (s, a);
1064 a = pix_multiply (a, sa);
1066 d = pix_add_mul (d, a, s, da);
1067 store8888 (dest, d);
1077 mmx_combine_atop_reverse_ca (pixman_implementation_t *imp,
1080 const uint32_t * src,
1081 const uint32_t * mask,
1084 const uint32_t *end = src + width;
1088 __m64 a = load8888 (mask);
1089 __m64 s = load8888 (src);
1090 __m64 d = load8888 (dest);
1091 __m64 da = expand_alpha (d);
1092 __m64 sa = expand_alpha (s);
1094 s = pix_multiply (s, a);
1095 a = pix_multiply (a, sa);
1097 d = pix_add_mul (d, a, s, da);
1098 store8888 (dest, d);
1108 mmx_combine_xor_ca (pixman_implementation_t *imp,
1111 const uint32_t * src,
1112 const uint32_t * mask,
1115 const uint32_t *end = src + width;
1119 __m64 a = load8888 (mask);
1120 __m64 s = load8888 (src);
1121 __m64 d = load8888 (dest);
1122 __m64 da = expand_alpha (d);
1123 __m64 sa = expand_alpha (s);
1125 s = pix_multiply (s, a);
1126 a = pix_multiply (a, sa);
1129 d = pix_add_mul (d, a, s, da);
1130 store8888 (dest, d);
1140 mmx_combine_add_ca (pixman_implementation_t *imp,
1143 const uint32_t * src,
1144 const uint32_t * mask,
1147 const uint32_t *end = src + width;
1151 __m64 a = load8888 (mask);
1152 __m64 s = load8888 (src);
1153 __m64 d = load8888 (dest);
1155 s = pix_multiply (s, a);
1157 store8888 (dest, d);
1166 /* ------------- MMX code paths called from fbpict.c -------------------- */
1169 mmx_composite_over_n_8888 (pixman_implementation_t *imp,
1170 pixman_composite_info_t *info)
1172 PIXMAN_COMPOSITE_ARGS (info);
1174 uint32_t *dst_line, *dst;
1181 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
1186 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
1188 vsrc = load8888 (&src);
1189 vsrca = expand_alpha (vsrc);
1194 dst_line += dst_stride;
1199 while (w && (unsigned long)dst & 7)
1201 store8888 (dst, over (vsrc, vsrca, load8888 (dst)));
1212 vdest = *(__m64 *)dst;
1214 dest0 = over (vsrc, vsrca, expand8888 (vdest, 0));
1215 dest1 = over (vsrc, vsrca, expand8888 (vdest, 1));
1217 *(__m64 *)dst = pack8888 (dest0, dest1);
1227 store8888 (dst, over (vsrc, vsrca, load8888 (dst)));
1235 mmx_composite_over_n_0565 (pixman_implementation_t *imp,
1236 pixman_composite_info_t *info)
1238 PIXMAN_COMPOSITE_ARGS (info);
1240 uint16_t *dst_line, *dst;
1247 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
1252 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
1254 vsrc = load8888 (&src);
1255 vsrca = expand_alpha (vsrc);
1260 dst_line += dst_stride;
1265 while (w && (unsigned long)dst & 7)
1268 __m64 vdest = expand565 (to_m64 (d), 0);
1270 vdest = pack_565 (over (vsrc, vsrca, vdest), vdest, 0);
1271 *dst = to_uint64 (vdest);
1281 vdest = *(__m64 *)dst;
1283 vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 0)), vdest, 0);
1284 vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 1)), vdest, 1);
1285 vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 2)), vdest, 2);
1286 vdest = pack_565 (over (vsrc, vsrca, expand565 (vdest, 3)), vdest, 3);
1288 *(__m64 *)dst = vdest;
1299 __m64 vdest = expand565 (to_m64 (d), 0);
1301 vdest = pack_565 (over (vsrc, vsrca, vdest), vdest, 0);
1302 *dst = to_uint64 (vdest);
1313 mmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
1314 pixman_composite_info_t *info)
1316 PIXMAN_COMPOSITE_ARGS (info);
1319 uint32_t *mask_line;
1320 int dst_stride, mask_stride;
1325 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
1330 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
1331 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
1333 vsrc = load8888 (&src);
1334 vsrca = expand_alpha (vsrc);
1339 uint32_t *p = (uint32_t *)mask_line;
1340 uint32_t *q = (uint32_t *)dst_line;
1342 while (twidth && (unsigned long)q & 7)
1344 uint32_t m = *(uint32_t *)p;
1348 __m64 vdest = load8888 (q);
1349 vdest = in_over (vsrc, vsrca, load8888 (&m), vdest);
1350 store8888 (q, vdest);
1367 __m64 vdest = *(__m64 *)q;
1369 dest0 = in_over (vsrc, vsrca, load8888 (&m0),
1370 expand8888 (vdest, 0));
1371 dest1 = in_over (vsrc, vsrca, load8888 (&m1),
1372 expand8888 (vdest, 1));
1374 *(__m64 *)q = pack8888 (dest0, dest1);
1384 uint32_t m = *(uint32_t *)p;
1388 __m64 vdest = load8888 (q);
1389 vdest = in_over (vsrc, vsrca, load8888 (&m), vdest);
1390 store8888 (q, vdest);
1398 dst_line += dst_stride;
1399 mask_line += mask_stride;
1406 mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp,
1407 pixman_composite_info_t *info)
1409 PIXMAN_COMPOSITE_ARGS (info);
1410 uint32_t *dst_line, *dst;
1411 uint32_t *src_line, *src;
1414 int dst_stride, src_stride;
1419 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
1420 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
1422 mask = _pixman_image_get_solid (imp, mask_image, dest_image->bits.format);
1424 mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
1425 vmask = load8888 (&mask);
1430 dst_line += dst_stride;
1432 src_line += src_stride;
1435 while (w && (unsigned long)dst & 7)
1437 __m64 s = load8888 (src);
1438 __m64 d = load8888 (dst);
1440 store8888 (dst, in_over (s, expand_alpha (s), vmask, d));
1449 __m64 vs = ldq_u((uint64_t *)src);
1450 __m64 vd = *(__m64 *)dst;
1451 __m64 vsrc0 = expand8888 (vs, 0);
1452 __m64 vsrc1 = expand8888 (vs, 1);
1454 *(__m64 *)dst = pack8888 (
1455 in_over (vsrc0, expand_alpha (vsrc0), vmask, expand8888 (vd, 0)),
1456 in_over (vsrc1, expand_alpha (vsrc1), vmask, expand8888 (vd, 1)));
1465 __m64 s = load8888 (src);
1466 __m64 d = load8888 (dst);
1468 store8888 (dst, in_over (s, expand_alpha (s), vmask, d));
1476 mmx_composite_over_x888_n_8888 (pixman_implementation_t *imp,
1477 pixman_composite_info_t *info)
1479 PIXMAN_COMPOSITE_ARGS (info);
1480 uint32_t *dst_line, *dst;
1481 uint32_t *src_line, *src;
1484 int dst_stride, src_stride;
1490 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
1491 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
1492 mask = _pixman_image_get_solid (imp, mask_image, dest_image->bits.format);
1495 mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
1496 vmask = load8888 (&mask);
1502 dst_line += dst_stride;
1504 src_line += src_stride;
1507 while (w && (unsigned long)dst & 7)
1509 uint32_t ssrc = *src | 0xff000000;
1510 __m64 s = load8888 (&ssrc);
1511 __m64 d = load8888 (dst);
1513 store8888 (dst, in_over (s, srca, vmask, d));
1522 __m64 vd0 = *(__m64 *)(dst + 0);
1523 __m64 vd1 = *(__m64 *)(dst + 2);
1524 __m64 vd2 = *(__m64 *)(dst + 4);
1525 __m64 vd3 = *(__m64 *)(dst + 6);
1526 __m64 vd4 = *(__m64 *)(dst + 8);
1527 __m64 vd5 = *(__m64 *)(dst + 10);
1528 __m64 vd6 = *(__m64 *)(dst + 12);
1529 __m64 vd7 = *(__m64 *)(dst + 14);
1531 __m64 vs0 = ldq_u((uint64_t *)(src + 0));
1532 __m64 vs1 = ldq_u((uint64_t *)(src + 2));
1533 __m64 vs2 = ldq_u((uint64_t *)(src + 4));
1534 __m64 vs3 = ldq_u((uint64_t *)(src + 6));
1535 __m64 vs4 = ldq_u((uint64_t *)(src + 8));
1536 __m64 vs5 = ldq_u((uint64_t *)(src + 10));
1537 __m64 vs6 = ldq_u((uint64_t *)(src + 12));
1538 __m64 vs7 = ldq_u((uint64_t *)(src + 14));
1541 in_over (expandx888 (vs0, 0), srca, vmask, expand8888 (vd0, 0)),
1542 in_over (expandx888 (vs0, 1), srca, vmask, expand8888 (vd0, 1)));
1545 in_over (expandx888 (vs1, 0), srca, vmask, expand8888 (vd1, 0)),
1546 in_over (expandx888 (vs1, 1), srca, vmask, expand8888 (vd1, 1)));
1549 in_over (expandx888 (vs2, 0), srca, vmask, expand8888 (vd2, 0)),
1550 in_over (expandx888 (vs2, 1), srca, vmask, expand8888 (vd2, 1)));
1553 in_over (expandx888 (vs3, 0), srca, vmask, expand8888 (vd3, 0)),
1554 in_over (expandx888 (vs3, 1), srca, vmask, expand8888 (vd3, 1)));
1557 in_over (expandx888 (vs4, 0), srca, vmask, expand8888 (vd4, 0)),
1558 in_over (expandx888 (vs4, 1), srca, vmask, expand8888 (vd4, 1)));
1561 in_over (expandx888 (vs5, 0), srca, vmask, expand8888 (vd5, 0)),
1562 in_over (expandx888 (vs5, 1), srca, vmask, expand8888 (vd5, 1)));
1565 in_over (expandx888 (vs6, 0), srca, vmask, expand8888 (vd6, 0)),
1566 in_over (expandx888 (vs6, 1), srca, vmask, expand8888 (vd6, 1)));
1569 in_over (expandx888 (vs7, 0), srca, vmask, expand8888 (vd7, 0)),
1570 in_over (expandx888 (vs7, 1), srca, vmask, expand8888 (vd7, 1)));
1572 *(__m64 *)(dst + 0) = vd0;
1573 *(__m64 *)(dst + 2) = vd1;
1574 *(__m64 *)(dst + 4) = vd2;
1575 *(__m64 *)(dst + 6) = vd3;
1576 *(__m64 *)(dst + 8) = vd4;
1577 *(__m64 *)(dst + 10) = vd5;
1578 *(__m64 *)(dst + 12) = vd6;
1579 *(__m64 *)(dst + 14) = vd7;
1588 uint32_t ssrc = *src | 0xff000000;
1589 __m64 s = load8888 (&ssrc);
1590 __m64 d = load8888 (dst);
1592 store8888 (dst, in_over (s, srca, vmask, d));
1604 mmx_composite_over_8888_8888 (pixman_implementation_t *imp,
1605 pixman_composite_info_t *info)
1607 PIXMAN_COMPOSITE_ARGS (info);
1608 uint32_t *dst_line, *dst;
1609 uint32_t *src_line, *src;
1611 int dst_stride, src_stride;
1617 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
1618 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
1623 dst_line += dst_stride;
1625 src_line += src_stride;
1641 sa = expand_alpha (ms);
1642 store8888 (dst, over (ms, sa, load8888 (dst)));
1652 mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
1653 pixman_composite_info_t *info)
1655 PIXMAN_COMPOSITE_ARGS (info);
1656 uint16_t *dst_line, *dst;
1657 uint32_t *src_line, *src;
1658 int dst_stride, src_stride;
1663 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
1664 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
1668 assert (src_image->drawable == mask_image->drawable);
1674 dst_line += dst_stride;
1676 src_line += src_stride;
1681 while (w && (unsigned long)dst & 7)
1683 __m64 vsrc = load8888 (src);
1685 __m64 vdest = expand565 (to_m64 (d), 0);
1688 over (vsrc, expand_alpha (vsrc), vdest), vdest, 0);
1690 *dst = to_uint64 (vdest);
1701 __m64 vsrc0, vsrc1, vsrc2, vsrc3;
1704 vsrc0 = load8888 ((src + 0));
1705 vsrc1 = load8888 ((src + 1));
1706 vsrc2 = load8888 ((src + 2));
1707 vsrc3 = load8888 ((src + 3));
1709 vdest = *(__m64 *)dst;
1711 vdest = pack_565 (over (vsrc0, expand_alpha (vsrc0), expand565 (vdest, 0)), vdest, 0);
1712 vdest = pack_565 (over (vsrc1, expand_alpha (vsrc1), expand565 (vdest, 1)), vdest, 1);
1713 vdest = pack_565 (over (vsrc2, expand_alpha (vsrc2), expand565 (vdest, 2)), vdest, 2);
1714 vdest = pack_565 (over (vsrc3, expand_alpha (vsrc3), expand565 (vdest, 3)), vdest, 3);
1716 *(__m64 *)dst = vdest;
1727 __m64 vsrc = load8888 (src);
1729 __m64 vdest = expand565 (to_m64 (d), 0);
1731 vdest = pack_565 (over (vsrc, expand_alpha (vsrc), vdest), vdest, 0);
1733 *dst = to_uint64 (vdest);
1745 mmx_composite_over_n_8_8888 (pixman_implementation_t *imp,
1746 pixman_composite_info_t *info)
1748 PIXMAN_COMPOSITE_ARGS (info);
1750 uint32_t *dst_line, *dst;
1751 uint8_t *mask_line, *mask;
1752 int dst_stride, mask_stride;
1759 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
1765 srcsrc = (uint64_t)src << 32 | src;
1767 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
1768 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
1770 vsrc = load8888 (&src);
1771 vsrca = expand_alpha (vsrc);
1776 dst_line += dst_stride;
1778 mask_line += mask_stride;
1783 while (w && (unsigned long)dst & 7)
1789 __m64 vdest = in_over (vsrc, vsrca,
1790 expand_alpha_rev (to_m64 (m)),
1793 store8888 (dst, vdest);
1810 if (srca == 0xff && (m0 & m1) == 0xff)
1812 *(uint64_t *)dst = srcsrc;
1819 vdest = *(__m64 *)dst;
1821 dest0 = in_over (vsrc, vsrca, expand_alpha_rev (to_m64 (m0)),
1822 expand8888 (vdest, 0));
1823 dest1 = in_over (vsrc, vsrca, expand_alpha_rev (to_m64 (m1)),
1824 expand8888 (vdest, 1));
1826 *(__m64 *)dst = pack8888 (dest0, dest1);
1842 __m64 vdest = load8888 (dst);
1845 vsrc, vsrca, expand_alpha_rev (to_m64 (m)), vdest);
1846 store8888 (dst, vdest);
1855 pixman_fill_mmx (uint32_t *bits,
1866 uint32_t byte_width;
1869 #if defined __GNUC__ && defined USE_X86_MMX
1870 __m64 v1, v2, v3, v4, v5, v6, v7;
1873 if (bpp != 16 && bpp != 32 && bpp != 8)
1878 stride = stride * (int) sizeof (uint32_t) / 1;
1879 byte_line = (uint8_t *)(((uint8_t *)bits) + stride * y + x);
1882 xor = (xor & 0xff) * 0x01010101;
1886 stride = stride * (int) sizeof (uint32_t) / 2;
1887 byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x);
1888 byte_width = 2 * width;
1890 xor = (xor & 0xffff) * 0x00010001;
1894 stride = stride * (int) sizeof (uint32_t) / 4;
1895 byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x);
1896 byte_width = 4 * width;
1900 fill = ((uint64_t)xor << 32) | xor;
1901 vfill = to_m64 (fill);
1903 #if defined __GNUC__ && defined USE_X86_MMX
1912 : "=&y" (v1), "=&y" (v2), "=&y" (v3),
1913 "=&y" (v4), "=&y" (v5), "=&y" (v6), "=y" (v7)
1920 uint8_t *d = byte_line;
1922 byte_line += stride;
1925 if (w >= 1 && ((unsigned long)d & 1))
1927 *(uint8_t *)d = (xor & 0xff);
1932 if (w >= 2 && ((unsigned long)d & 3))
1934 *(uint16_t *)d = xor;
1939 while (w >= 4 && ((unsigned long)d & 7))
1941 *(uint32_t *)d = xor;
1949 #if defined __GNUC__ && defined USE_X86_MMX
1961 "y" (vfill), "y" (v1), "y" (v2), "y" (v3),
1962 "y" (v4), "y" (v5), "y" (v6), "y" (v7)
1965 *(__m64*) (d + 0) = vfill;
1966 *(__m64*) (d + 8) = vfill;
1967 *(__m64*) (d + 16) = vfill;
1968 *(__m64*) (d + 24) = vfill;
1969 *(__m64*) (d + 32) = vfill;
1970 *(__m64*) (d + 40) = vfill;
1971 *(__m64*) (d + 48) = vfill;
1972 *(__m64*) (d + 56) = vfill;
1980 *(uint32_t *)d = xor;
1987 *(uint16_t *)d = xor;
1993 *(uint8_t *)d = (xor & 0xff);
2005 mmx_composite_src_n_8_8888 (pixman_implementation_t *imp,
2006 pixman_composite_info_t *info)
2008 PIXMAN_COMPOSITE_ARGS (info);
2010 uint32_t *dst_line, *dst;
2011 uint8_t *mask_line, *mask;
2012 int dst_stride, mask_stride;
2019 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
2024 pixman_fill_mmx (dest_image->bits.bits, dest_image->bits.rowstride,
2025 PIXMAN_FORMAT_BPP (dest_image->bits.format),
2026 dest_x, dest_y, width, height, 0);
2030 srcsrc = (uint64_t)src << 32 | src;
2032 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
2033 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
2035 vsrc = load8888 (&src);
2040 dst_line += dst_stride;
2042 mask_line += mask_stride;
2047 while (w && (unsigned long)dst & 7)
2053 __m64 vdest = in (vsrc, expand_alpha_rev (to_m64 (m)));
2055 store8888 (dst, vdest);
2075 if (srca == 0xff && (m0 & m1) == 0xff)
2077 *(uint64_t *)dst = srcsrc;
2083 dest0 = in (vsrc, expand_alpha_rev (to_m64 (m0)));
2084 dest1 = in (vsrc, expand_alpha_rev (to_m64 (m1)));
2086 *(__m64 *)dst = pack8888 (dest0, dest1);
2090 *(uint64_t *)dst = 0;
2106 __m64 vdest = load8888 (dst);
2108 vdest = in (vsrc, expand_alpha_rev (to_m64 (m)));
2109 store8888 (dst, vdest);
2122 mmx_composite_over_n_8_0565 (pixman_implementation_t *imp,
2123 pixman_composite_info_t *info)
2125 PIXMAN_COMPOSITE_ARGS (info);
2127 uint16_t *dst_line, *dst;
2128 uint8_t *mask_line, *mask;
2129 int dst_stride, mask_stride;
2131 __m64 vsrc, vsrca, tmp;
2132 uint64_t srcsrcsrcsrc, src16;
2136 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
2142 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
2143 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
2145 vsrc = load8888 (&src);
2146 vsrca = expand_alpha (vsrc);
2148 tmp = pack_565 (vsrc, _mm_setzero_si64 (), 0);
2149 src16 = to_uint64 (tmp);
2152 (uint64_t)src16 << 48 | (uint64_t)src16 << 32 |
2153 (uint64_t)src16 << 16 | (uint64_t)src16;
2158 dst_line += dst_stride;
2160 mask_line += mask_stride;
2165 while (w && (unsigned long)dst & 7)
2172 __m64 vd = to_m64 (d);
2173 __m64 vdest = in_over (
2174 vsrc, vsrca, expand_alpha_rev (to_m64 (m)), expand565 (vd, 0));
2176 vd = pack_565 (vdest, _mm_setzero_si64 (), 0);
2177 *dst = to_uint64 (vd);
2189 uint64_t m0, m1, m2, m3;
2195 if (srca == 0xff && (m0 & m1 & m2 & m3) == 0xff)
2197 *(uint64_t *)dst = srcsrcsrcsrc;
2199 else if (m0 | m1 | m2 | m3)
2202 __m64 vm0, vm1, vm2, vm3;
2204 vdest = *(__m64 *)dst;
2207 vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm0),
2208 expand565 (vdest, 0)), vdest, 0);
2210 vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm1),
2211 expand565 (vdest, 1)), vdest, 1);
2213 vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm2),
2214 expand565 (vdest, 2)), vdest, 2);
2216 vdest = pack_565 (in_over (vsrc, vsrca, expand_alpha_rev (vm3),
2217 expand565 (vdest, 3)), vdest, 3);
2219 *(__m64 *)dst = vdest;
2236 __m64 vd = to_m64 (d);
2237 __m64 vdest = in_over (vsrc, vsrca, expand_alpha_rev (to_m64 (m)),
2239 vd = pack_565 (vdest, _mm_setzero_si64 (), 0);
2240 *dst = to_uint64 (vd);
2253 mmx_composite_over_pixbuf_0565 (pixman_implementation_t *imp,
2254 pixman_composite_info_t *info)
2256 PIXMAN_COMPOSITE_ARGS (info);
2257 uint16_t *dst_line, *dst;
2258 uint32_t *src_line, *src;
2259 int dst_stride, src_stride;
2264 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
2265 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
2269 assert (src_image->drawable == mask_image->drawable);
2275 dst_line += dst_stride;
2277 src_line += src_stride;
2282 while (w && (unsigned long)dst & 7)
2284 __m64 vsrc = load8888 (src);
2286 __m64 vdest = expand565 (to_m64 (d), 0);
2288 vdest = pack_565 (over_rev_non_pre (vsrc, vdest), vdest, 0);
2290 *dst = to_uint64 (vdest);
2301 uint32_t s0, s1, s2, s3;
2302 unsigned char a0, a1, a2, a3;
2314 if ((a0 & a1 & a2 & a3) == 0xFF)
2317 vdest = pack_565 (invert_colors (load8888 (&s0)), _mm_setzero_si64 (), 0);
2318 vdest = pack_565 (invert_colors (load8888 (&s1)), vdest, 1);
2319 vdest = pack_565 (invert_colors (load8888 (&s2)), vdest, 2);
2320 vdest = pack_565 (invert_colors (load8888 (&s3)), vdest, 3);
2322 *(__m64 *)dst = vdest;
2324 else if (s0 | s1 | s2 | s3)
2326 __m64 vdest = *(__m64 *)dst;
2328 vdest = pack_565 (over_rev_non_pre (load8888 (&s0), expand565 (vdest, 0)), vdest, 0);
2329 vdest = pack_565 (over_rev_non_pre (load8888 (&s1), expand565 (vdest, 1)), vdest, 1);
2330 vdest = pack_565 (over_rev_non_pre (load8888 (&s2), expand565 (vdest, 2)), vdest, 2);
2331 vdest = pack_565 (over_rev_non_pre (load8888 (&s3), expand565 (vdest, 3)), vdest, 3);
2333 *(__m64 *)dst = vdest;
2345 __m64 vsrc = load8888 (src);
2347 __m64 vdest = expand565 (to_m64 (d), 0);
2349 vdest = pack_565 (over_rev_non_pre (vsrc, vdest), vdest, 0);
2351 *dst = to_uint64 (vdest);
2363 mmx_composite_over_pixbuf_8888 (pixman_implementation_t *imp,
2364 pixman_composite_info_t *info)
2366 PIXMAN_COMPOSITE_ARGS (info);
2367 uint32_t *dst_line, *dst;
2368 uint32_t *src_line, *src;
2369 int dst_stride, src_stride;
2374 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
2375 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
2379 assert (src_image->drawable == mask_image->drawable);
2385 dst_line += dst_stride;
2387 src_line += src_stride;
2390 while (w && (unsigned long)dst & 7)
2392 __m64 s = load8888 (src);
2393 __m64 d = load8888 (dst);
2395 store8888 (dst, over_rev_non_pre (s, d));
2405 unsigned char a0, a1;
2414 if ((a0 & a1) == 0xFF)
2416 d0 = invert_colors (load8888 (&s0));
2417 d1 = invert_colors (load8888 (&s1));
2419 *(__m64 *)dst = pack8888 (d0, d1);
2423 __m64 vdest = *(__m64 *)dst;
2425 d0 = over_rev_non_pre (load8888 (&s0), expand8888 (vdest, 0));
2426 d1 = over_rev_non_pre (load8888 (&s1), expand8888 (vdest, 1));
2428 *(__m64 *)dst = pack8888 (d0, d1);
2438 __m64 s = load8888 (src);
2439 __m64 d = load8888 (dst);
2441 store8888 (dst, over_rev_non_pre (s, d));
2449 mmx_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
2450 pixman_composite_info_t *info)
2452 PIXMAN_COMPOSITE_ARGS (info);
2455 uint32_t *mask_line;
2456 int dst_stride, mask_stride;
2461 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
2466 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
2467 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
2469 vsrc = load8888 (&src);
2470 vsrca = expand_alpha (vsrc);
2475 uint32_t *p = (uint32_t *)mask_line;
2476 uint16_t *q = (uint16_t *)dst_line;
2478 while (twidth && ((unsigned long)q & 7))
2480 uint32_t m = *(uint32_t *)p;
2485 __m64 vdest = expand565 (to_m64 (d), 0);
2486 vdest = pack_565 (in_over (vsrc, vsrca, load8888 (&m), vdest), vdest, 0);
2487 *q = to_uint64 (vdest);
2497 uint32_t m0, m1, m2, m3;
2504 if ((m0 | m1 | m2 | m3))
2506 __m64 vdest = *(__m64 *)q;
2508 vdest = pack_565 (in_over (vsrc, vsrca, load8888 (&m0), expand565 (vdest, 0)), vdest, 0);
2509 vdest = pack_565 (in_over (vsrc, vsrca, load8888 (&m1), expand565 (vdest, 1)), vdest, 1);
2510 vdest = pack_565 (in_over (vsrc, vsrca, load8888 (&m2), expand565 (vdest, 2)), vdest, 2);
2511 vdest = pack_565 (in_over (vsrc, vsrca, load8888 (&m3), expand565 (vdest, 3)), vdest, 3);
2513 *(__m64 *)q = vdest;
2528 __m64 vdest = expand565 (to_m64 (d), 0);
2529 vdest = pack_565 (in_over (vsrc, vsrca, load8888 (&m), vdest), vdest, 0);
2530 *q = to_uint64 (vdest);
2538 mask_line += mask_stride;
2539 dst_line += dst_stride;
2546 mmx_composite_in_n_8_8 (pixman_implementation_t *imp,
2547 pixman_composite_info_t *info)
2549 PIXMAN_COMPOSITE_ARGS (info);
2550 uint8_t *dst_line, *dst;
2551 uint8_t *mask_line, *mask;
2552 int dst_stride, mask_stride;
2558 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
2559 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
2561 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
2565 vsrc = load8888 (&src);
2566 vsrca = expand_alpha (vsrc);
2571 dst_line += dst_stride;
2573 mask_line += mask_stride;
2576 while (w && (unsigned long)dst & 7)
2585 m = MUL_UN8 (sa, a, tmp);
2586 d = MUL_UN8 (m, d, tmp);
2597 vmask = load8888u ((uint32_t *)mask);
2598 vdest = load8888 ((uint32_t *)dst);
2600 store8888 ((uint32_t *)dst, in (in (vsrca, vmask), vdest));
2616 m = MUL_UN8 (sa, a, tmp);
2617 d = MUL_UN8 (m, d, tmp);
2627 mmx_composite_in_8_8 (pixman_implementation_t *imp,
2628 pixman_composite_info_t *info)
2630 PIXMAN_COMPOSITE_ARGS (info);
2631 uint8_t *dst_line, *dst;
2632 uint8_t *src_line, *src;
2633 int src_stride, dst_stride;
2636 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
2637 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
2642 dst_line += dst_stride;
2644 src_line += src_stride;
2647 while (w && (unsigned long)dst & 3)
2655 *dst = MUL_UN8 (s, d, tmp);
2664 uint32_t *s = (uint32_t *)src;
2665 uint32_t *d = (uint32_t *)dst;
2667 store8888 (d, in (load8888u (s), load8888 (d)));
2682 *dst = MUL_UN8 (s, d, tmp);
2693 mmx_composite_add_n_8_8 (pixman_implementation_t *imp,
2694 pixman_composite_info_t *info)
2696 PIXMAN_COMPOSITE_ARGS (info);
2697 uint8_t *dst_line, *dst;
2698 uint8_t *mask_line, *mask;
2699 int dst_stride, mask_stride;
2705 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
2706 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
2708 src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
2715 vsrc = load8888 (&src);
2716 vsrca = expand_alpha (vsrc);
2721 dst_line += dst_stride;
2723 mask_line += mask_stride;
2726 while (w && (unsigned long)dst & 3)
2736 m = MUL_UN8 (sa, a, tmp);
2737 r = ADD_UN8 (m, d, tmp);
2748 vmask = load8888u ((uint32_t *)mask);
2749 vdest = load8888 ((uint32_t *)dst);
2751 store8888 ((uint32_t *)dst, _mm_adds_pu8 (in (vsrca, vmask), vdest));
2768 m = MUL_UN8 (sa, a, tmp);
2769 r = ADD_UN8 (m, d, tmp);
2779 mmx_composite_add_8_8 (pixman_implementation_t *imp,
2780 pixman_composite_info_t *info)
2782 PIXMAN_COMPOSITE_ARGS (info);
2783 uint8_t *dst_line, *dst;
2784 uint8_t *src_line, *src;
2785 int dst_stride, src_stride;
2792 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
2793 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
2798 dst_line += dst_stride;
2800 src_line += src_stride;
2803 while (w && (unsigned long)dst & 7)
2808 s = t | (0 - (t >> 8));
2818 *(__m64*)dst = _mm_adds_pu8 (ldq_u((uint64_t *)src), *(__m64*)dst);
2829 s = t | (0 - (t >> 8));
2842 mmx_composite_add_8888_8888 (pixman_implementation_t *imp,
2843 pixman_composite_info_t *info)
2845 PIXMAN_COMPOSITE_ARGS (info);
2847 uint32_t *dst_line, *dst;
2848 uint32_t *src_line, *src;
2849 int dst_stride, src_stride;
2854 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
2855 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
2860 dst_line += dst_stride;
2862 src_line += src_stride;
2865 while (w && (unsigned long)dst & 7)
2867 store (dst, _mm_adds_pu8 (_mm_cvtsi32_si64 (*src),
2868 _mm_cvtsi32_si64 (*dst)));
2876 dst64 = _mm_adds_pu8 (ldq_u((uint64_t *)src), *(__m64*)dst);
2877 *(uint64_t*)dst = to_uint64 (dst64);
2885 store (dst, _mm_adds_pu8 (_mm_cvtsi32_si64 (*src),
2886 _mm_cvtsi32_si64 (*dst)));
2894 static pixman_bool_t
2895 pixman_blt_mmx (uint32_t *src_bits,
2908 uint8_t * src_bytes;
2909 uint8_t * dst_bytes;
2912 if (src_bpp != dst_bpp)
2917 src_stride = src_stride * (int) sizeof (uint32_t) / 2;
2918 dst_stride = dst_stride * (int) sizeof (uint32_t) / 2;
2919 src_bytes = (uint8_t *)(((uint16_t *)src_bits) + src_stride * (src_y) + (src_x));
2920 dst_bytes = (uint8_t *)(((uint16_t *)dst_bits) + dst_stride * (dest_y) + (dest_x));
2921 byte_width = 2 * width;
2925 else if (src_bpp == 32)
2927 src_stride = src_stride * (int) sizeof (uint32_t) / 4;
2928 dst_stride = dst_stride * (int) sizeof (uint32_t) / 4;
2929 src_bytes = (uint8_t *)(((uint32_t *)src_bits) + src_stride * (src_y) + (src_x));
2930 dst_bytes = (uint8_t *)(((uint32_t *)dst_bits) + dst_stride * (dest_y) + (dest_x));
2931 byte_width = 4 * width;
2943 uint8_t *s = src_bytes;
2944 uint8_t *d = dst_bytes;
2945 src_bytes += src_stride;
2946 dst_bytes += dst_stride;
2949 if (w >= 1 && ((unsigned long)d & 1))
2951 *(uint8_t *)d = *(uint8_t *)s;
2957 if (w >= 2 && ((unsigned long)d & 3))
2959 *(uint16_t *)d = *(uint16_t *)s;
2965 while (w >= 4 && ((unsigned long)d & 7))
2967 *(uint32_t *)d = ldl_u((uint32_t *)s);
2976 #if (defined (__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))) && defined USE_X86_MMX
2978 "movq (%1), %%mm0\n"
2979 "movq 8(%1), %%mm1\n"
2980 "movq 16(%1), %%mm2\n"
2981 "movq 24(%1), %%mm3\n"
2982 "movq 32(%1), %%mm4\n"
2983 "movq 40(%1), %%mm5\n"
2984 "movq 48(%1), %%mm6\n"
2985 "movq 56(%1), %%mm7\n"
2987 "movq %%mm0, (%0)\n"
2988 "movq %%mm1, 8(%0)\n"
2989 "movq %%mm2, 16(%0)\n"
2990 "movq %%mm3, 24(%0)\n"
2991 "movq %%mm4, 32(%0)\n"
2992 "movq %%mm5, 40(%0)\n"
2993 "movq %%mm6, 48(%0)\n"
2994 "movq %%mm7, 56(%0)\n"
2998 "%mm0", "%mm1", "%mm2", "%mm3",
2999 "%mm4", "%mm5", "%mm6", "%mm7");
3001 __m64 v0 = ldq_u((uint64_t *)(s + 0));
3002 __m64 v1 = ldq_u((uint64_t *)(s + 8));
3003 __m64 v2 = ldq_u((uint64_t *)(s + 16));
3004 __m64 v3 = ldq_u((uint64_t *)(s + 24));
3005 __m64 v4 = ldq_u((uint64_t *)(s + 32));
3006 __m64 v5 = ldq_u((uint64_t *)(s + 40));
3007 __m64 v6 = ldq_u((uint64_t *)(s + 48));
3008 __m64 v7 = ldq_u((uint64_t *)(s + 56));
3009 *(__m64 *)(d + 0) = v0;
3010 *(__m64 *)(d + 8) = v1;
3011 *(__m64 *)(d + 16) = v2;
3012 *(__m64 *)(d + 24) = v3;
3013 *(__m64 *)(d + 32) = v4;
3014 *(__m64 *)(d + 40) = v5;
3015 *(__m64 *)(d + 48) = v6;
3016 *(__m64 *)(d + 56) = v7;
3025 *(uint32_t *)d = ldl_u((uint32_t *)s);
3033 *(uint16_t *)d = *(uint16_t *)s;
3046 mmx_composite_copy_area (pixman_implementation_t *imp,
3047 pixman_composite_info_t *info)
3049 PIXMAN_COMPOSITE_ARGS (info);
3051 pixman_blt_mmx (src_image->bits.bits,
3052 dest_image->bits.bits,
3053 src_image->bits.rowstride,
3054 dest_image->bits.rowstride,
3055 PIXMAN_FORMAT_BPP (src_image->bits.format),
3056 PIXMAN_FORMAT_BPP (dest_image->bits.format),
3057 src_x, src_y, dest_x, dest_y, width, height);
3061 mmx_composite_over_x888_8_8888 (pixman_implementation_t *imp,
3062 pixman_composite_info_t *info)
3064 PIXMAN_COMPOSITE_ARGS (info);
3065 uint32_t *src, *src_line;
3066 uint32_t *dst, *dst_line;
3067 uint8_t *mask, *mask_line;
3068 int src_stride, mask_stride, dst_stride;
3071 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3072 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
3073 PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3078 src_line += src_stride;
3080 dst_line += dst_stride;
3082 mask_line += mask_stride;
3092 uint32_t ssrc = *src | 0xff000000;
3093 __m64 s = load8888 (&ssrc);
3101 __m64 sa = expand_alpha (s);
3102 __m64 vm = expand_alpha_rev (to_m64 (m));
3103 __m64 vdest = in_over (s, sa, vm, load8888 (dst));
3105 store8888 (dst, vdest);
3118 static const pixman_fast_path_t mmx_fast_paths[] =
3120 PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, mmx_composite_over_n_8_0565 ),
3121 PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, mmx_composite_over_n_8_0565 ),
3122 PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, mmx_composite_over_n_8_8888 ),
3123 PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, mmx_composite_over_n_8_8888 ),
3124 PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, mmx_composite_over_n_8_8888 ),
3125 PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, mmx_composite_over_n_8_8888 ),
3126 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, mmx_composite_over_n_8888_8888_ca ),
3127 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, mmx_composite_over_n_8888_8888_ca ),
3128 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, mmx_composite_over_n_8888_0565_ca ),
3129 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, mmx_composite_over_n_8888_8888_ca ),
3130 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, mmx_composite_over_n_8888_8888_ca ),
3131 PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, mmx_composite_over_n_8888_0565_ca ),
3132 PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, a8r8g8b8, mmx_composite_over_pixbuf_8888 ),
3133 PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, x8r8g8b8, mmx_composite_over_pixbuf_8888 ),
3134 PIXMAN_STD_FAST_PATH (OVER, pixbuf, pixbuf, r5g6b5, mmx_composite_over_pixbuf_0565 ),
3135 PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, a8b8g8r8, mmx_composite_over_pixbuf_8888 ),
3136 PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, x8b8g8r8, mmx_composite_over_pixbuf_8888 ),
3137 PIXMAN_STD_FAST_PATH (OVER, rpixbuf, rpixbuf, b5g6r5, mmx_composite_over_pixbuf_0565 ),
3138 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, a8r8g8b8, mmx_composite_over_x888_n_8888 ),
3139 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, solid, x8r8g8b8, mmx_composite_over_x888_n_8888 ),
3140 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, a8b8g8r8, mmx_composite_over_x888_n_8888 ),
3141 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, solid, x8b8g8r8, mmx_composite_over_x888_n_8888 ),
3142 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, mmx_composite_over_8888_n_8888 ),
3143 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, mmx_composite_over_8888_n_8888 ),
3144 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, a8b8g8r8, mmx_composite_over_8888_n_8888 ),
3145 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, x8b8g8r8, mmx_composite_over_8888_n_8888 ),
3146 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, mmx_composite_over_x888_8_8888 ),
3147 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, mmx_composite_over_x888_8_8888 ),
3148 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, x8b8g8r8, mmx_composite_over_x888_8_8888 ),
3149 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, a8b8g8r8, mmx_composite_over_x888_8_8888 ),
3150 PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, mmx_composite_over_n_8888 ),
3151 PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, mmx_composite_over_n_8888 ),
3152 PIXMAN_STD_FAST_PATH (OVER, solid, null, r5g6b5, mmx_composite_over_n_0565 ),
3153 PIXMAN_STD_FAST_PATH (OVER, solid, null, b5g6r5, mmx_composite_over_n_0565 ),
3154 PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, null, x8r8g8b8, mmx_composite_copy_area ),
3155 PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, null, x8b8g8r8, mmx_composite_copy_area ),
3157 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, mmx_composite_over_8888_8888 ),
3158 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, mmx_composite_over_8888_8888 ),
3159 PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, mmx_composite_over_8888_0565 ),
3160 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, mmx_composite_over_8888_8888 ),
3161 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, mmx_composite_over_8888_8888 ),
3162 PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, mmx_composite_over_8888_0565 ),
3164 PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, mmx_composite_add_8888_8888 ),
3165 PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, mmx_composite_add_8888_8888 ),
3166 PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, mmx_composite_add_8_8 ),
3167 PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, mmx_composite_add_n_8_8 ),
3169 PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8r8g8b8, mmx_composite_src_n_8_8888 ),
3170 PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8r8g8b8, mmx_composite_src_n_8_8888 ),
3171 PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8b8g8r8, mmx_composite_src_n_8_8888 ),
3172 PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8b8g8r8, mmx_composite_src_n_8_8888 ),
3173 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, mmx_composite_copy_area ),
3174 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, mmx_composite_copy_area ),
3175 PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, mmx_composite_copy_area ),
3176 PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, mmx_composite_copy_area ),
3177 PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, mmx_composite_copy_area ),
3178 PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, mmx_composite_copy_area ),
3179 PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, mmx_composite_copy_area ),
3180 PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, mmx_composite_copy_area ),
3182 PIXMAN_STD_FAST_PATH (IN, a8, null, a8, mmx_composite_in_8_8 ),
3183 PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, mmx_composite_in_n_8_8 ),
3188 static pixman_bool_t
3189 mmx_blt (pixman_implementation_t *imp,
3190 uint32_t * src_bits,
3191 uint32_t * dst_bits,
3203 if (!pixman_blt_mmx (
3204 src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
3205 src_x, src_y, dest_x, dest_y, width, height))
3208 return _pixman_implementation_blt (
3210 src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
3211 src_x, src_y, dest_x, dest_y, width, height);
3217 static pixman_bool_t
3218 mmx_fill (pixman_implementation_t *imp,
3228 if (!pixman_fill_mmx (bits, stride, bpp, x, y, width, height, xor))
3230 return _pixman_implementation_fill (
3231 imp->delegate, bits, stride, bpp, x, y, width, height, xor);
3237 pixman_implementation_t *
3238 _pixman_implementation_create_mmx (pixman_implementation_t *fallback)
3240 pixman_implementation_t *imp = _pixman_implementation_create (fallback, mmx_fast_paths);
3242 imp->combine_32[PIXMAN_OP_OVER] = mmx_combine_over_u;
3243 imp->combine_32[PIXMAN_OP_OVER_REVERSE] = mmx_combine_over_reverse_u;
3244 imp->combine_32[PIXMAN_OP_IN] = mmx_combine_in_u;
3245 imp->combine_32[PIXMAN_OP_IN_REVERSE] = mmx_combine_in_reverse_u;
3246 imp->combine_32[PIXMAN_OP_OUT] = mmx_combine_out_u;
3247 imp->combine_32[PIXMAN_OP_OUT_REVERSE] = mmx_combine_out_reverse_u;
3248 imp->combine_32[PIXMAN_OP_ATOP] = mmx_combine_atop_u;
3249 imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = mmx_combine_atop_reverse_u;
3250 imp->combine_32[PIXMAN_OP_XOR] = mmx_combine_xor_u;
3251 imp->combine_32[PIXMAN_OP_ADD] = mmx_combine_add_u;
3252 imp->combine_32[PIXMAN_OP_SATURATE] = mmx_combine_saturate_u;
3254 imp->combine_32_ca[PIXMAN_OP_SRC] = mmx_combine_src_ca;
3255 imp->combine_32_ca[PIXMAN_OP_OVER] = mmx_combine_over_ca;
3256 imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = mmx_combine_over_reverse_ca;
3257 imp->combine_32_ca[PIXMAN_OP_IN] = mmx_combine_in_ca;
3258 imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = mmx_combine_in_reverse_ca;
3259 imp->combine_32_ca[PIXMAN_OP_OUT] = mmx_combine_out_ca;
3260 imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = mmx_combine_out_reverse_ca;
3261 imp->combine_32_ca[PIXMAN_OP_ATOP] = mmx_combine_atop_ca;
3262 imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = mmx_combine_atop_reverse_ca;
3263 imp->combine_32_ca[PIXMAN_OP_XOR] = mmx_combine_xor_ca;
3264 imp->combine_32_ca[PIXMAN_OP_ADD] = mmx_combine_add_ca;
3267 imp->fill = mmx_fill;
3272 #endif /* USE_X86_MMX || USE_ARM_IWMMXT */