2 * Copyright © 2004, 2005 Red Hat, Inc.
3 * Copyright © 2004 Nicholas Miell
4 * Copyright © 2005 Trolltech AS
6 * Permission to use, copy, modify, distribute, and sell this software and its
7 * documentation for any purpose is hereby granted without fee, provided that
8 * the above copyright notice appear in all copies and that both that
9 * copyright notice and this permission notice appear in supporting
10 * documentation, and that the name of Red Hat not be used in advertising or
11 * publicity pertaining to distribution of the software without specific,
12 * written prior permission. Red Hat makes no representations about the
13 * suitability of this software for any purpose. It is provided "as is"
14 * without express or implied warranty.
16 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
17 * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
18 * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
20 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
21 * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
22 * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
25 * Author: Søren Sandmann (sandmann@redhat.com)
26 * Minor Improvements: Nicholas Miell (nmiell@gmail.com)
27 * MMX code paths for fbcompose.c by Lars Knoll (lars@trolltech.com)
29 * Based on work by Owen Taylor
40 #include "pixman-mmx.h"
44 #define READ(img,x) *(x)
45 #define WRITE(img,ptr,v) (*(ptr) = (v));
50 #define CHECKPOINT() ErrorF ("at %s %d\n", __FUNCTION__, __LINE__)
55 /* Notes about writing mmx code
57 * give memory operands as the second operand. If you give it as the
58 * first, gcc will first load it into a register, then use that
63 * _mm_mullo_pi16 (x, mmx_constant);
67 * _mm_mullo_pi16 (mmx_constant, x);
69 * Also try to minimize dependencies. i.e. when you need a value, try
70 * to calculate it from a value that was calculated as early as
74 /* --------------- MMX primitives ------------------------------------- */
77 typedef uint64_t mmxdatafield;
79 typedef __m64 mmxdatafield;
80 /* If __m64 is defined as a struct or union, define M64_MEMBER to be the
81 name of the member used to access the data */
83 # define M64_MEMBER m64_u64
84 # elif defined(__SUNPRO_C)
85 # define M64_MEMBER l_
91 mmxdatafield mmx_4x00ff;
92 mmxdatafield mmx_4x0080;
93 mmxdatafield mmx_565_rgb;
94 mmxdatafield mmx_565_unpack_multiplier;
95 mmxdatafield mmx_565_r;
96 mmxdatafield mmx_565_g;
97 mmxdatafield mmx_565_b;
98 mmxdatafield mmx_mask_0;
99 mmxdatafield mmx_mask_1;
100 mmxdatafield mmx_mask_2;
101 mmxdatafield mmx_mask_3;
102 mmxdatafield mmx_full_alpha;
103 mmxdatafield mmx_ffff0000ffff0000;
104 mmxdatafield mmx_0000ffff00000000;
105 mmxdatafield mmx_000000000000ffff;
108 #if defined(_MSC_VER)
109 # define MMXDATA_INIT(field, val) { val##UI64 }
110 #elif defined(M64_MEMBER) /* __m64 is a struct, not an integral type */
111 # define MMXDATA_INIT(field, val) field = { val##ULL }
112 #else /* __m64 is an integral type */
113 # define MMXDATA_INIT(field, val) field = val##ULL
116 static const MMXData c =
118 MMXDATA_INIT(.mmx_4x00ff, 0x00ff00ff00ff00ff),
119 MMXDATA_INIT(.mmx_4x0080, 0x0080008000800080),
120 MMXDATA_INIT(.mmx_565_rgb, 0x000001f0003f001f),
121 MMXDATA_INIT(.mmx_565_unpack_multiplier, 0x0000008404100840),
122 MMXDATA_INIT(.mmx_565_r, 0x000000f800000000),
123 MMXDATA_INIT(.mmx_565_g, 0x0000000000fc0000),
124 MMXDATA_INIT(.mmx_565_b, 0x00000000000000f8),
125 MMXDATA_INIT(.mmx_mask_0, 0xffffffffffff0000),
126 MMXDATA_INIT(.mmx_mask_1, 0xffffffff0000ffff),
127 MMXDATA_INIT(.mmx_mask_2, 0xffff0000ffffffff),
128 MMXDATA_INIT(.mmx_mask_3, 0x0000ffffffffffff),
129 MMXDATA_INIT(.mmx_full_alpha, 0x00ff000000000000),
130 MMXDATA_INIT(.mmx_ffff0000ffff0000, 0xffff0000ffff0000),
131 MMXDATA_INIT(.mmx_0000ffff00000000, 0x0000ffff00000000),
132 MMXDATA_INIT(.mmx_000000000000ffff, 0x000000000000ffff),
137 # define MC(x) M64(c.mmx_##x)
139 # define MC(x) ((__m64)c.mmx_##x)
142 # define MC(x) c.mmx_##x
145 static force_inline __m64
149 return _mm_cvtsi64_m64 (x);
150 #elif defined M64_MEMBER /* __m64 is a struct, not an integral type */
155 #else /* __m64 is an integral type */
160 static force_inline uint64_t
164 return _mm_cvtm64_si64 (x);
165 #elif defined M64_MEMBER /* __m64 is a struct, not an integral type */
166 uint64_t res = x.M64_MEMBER;
168 #else /* __m64 is an integral type */
173 static force_inline __m64
174 shift (__m64 v, int s)
177 return _mm_slli_si64 (v, s);
179 return _mm_srli_si64 (v, -s);
184 static force_inline __m64
187 return _mm_xor_si64 (mask, MC(4x00ff));
190 static force_inline __m64
191 pix_multiply (__m64 a, __m64 b)
195 res = _mm_mullo_pi16 (a, b);
196 res = _mm_adds_pu16 (res, MC(4x0080));
197 res = _mm_adds_pu16 (res, _mm_srli_pi16 (res, 8));
198 res = _mm_srli_pi16 (res, 8);
203 static force_inline __m64
204 pix_add (__m64 a, __m64 b)
206 return _mm_adds_pu8 (a, b);
209 static force_inline __m64
210 expand_alpha (__m64 pixel)
214 t1 = shift (pixel, -48);
216 t1 = _mm_or_si64 (t1, t2);
218 t1 = _mm_or_si64 (t1, t2);
223 static force_inline __m64
224 expand_alpha_rev (__m64 pixel)
228 /* move alpha to low 16 bits and zero the rest */
229 t1 = shift (pixel, 48);
230 t1 = shift (t1, -48);
233 t1 = _mm_or_si64 (t1, t2);
235 t1 = _mm_or_si64 (t1, t2);
240 static force_inline __m64
241 invert_colors (__m64 pixel)
247 x = _mm_and_si64 (x, MC(ffff0000ffff0000));
248 y = _mm_and_si64 (y, MC(000000000000ffff));
249 z = _mm_and_si64 (z, MC(0000ffff00000000));
254 x = _mm_or_si64 (x, y);
255 x = _mm_or_si64 (x, z);
260 static force_inline __m64
261 over (__m64 src, __m64 srca, __m64 dest)
263 return _mm_adds_pu8 (src, pix_multiply(dest, negate(srca)));
266 static force_inline __m64
267 over_rev_non_pre (__m64 src, __m64 dest)
269 __m64 srca = expand_alpha (src);
270 __m64 srcfaaa = _mm_or_si64 (srca, MC(full_alpha));
272 return over(pix_multiply(invert_colors(src), srcfaaa), srca, dest);
275 static force_inline __m64
279 return pix_multiply (src, mask);
282 static force_inline __m64
283 in_over_full_src_alpha (__m64 src, __m64 mask, __m64 dest)
285 src = _mm_or_si64 (src, MC(full_alpha));
287 return over(in (src, mask), mask, dest);
291 static force_inline __m64
297 return over(in(src, mask), pix_multiply(srca, mask), dest);
300 #define in_over(src, srca, mask, dest) over(in(src, mask), pix_multiply(srca, mask), dest)
303 static force_inline __m64
304 load8888 (uint32_t v)
306 return _mm_unpacklo_pi8 (_mm_cvtsi32_si64 (v), _mm_setzero_si64());
309 static force_inline __m64
310 pack8888 (__m64 lo, __m64 hi)
312 return _mm_packs_pu16 (lo, hi);
315 static force_inline uint32_t
318 return _mm_cvtsi64_si32(pack8888(v, _mm_setzero_si64()));
321 /* Expand 16 bits positioned at @pos (0-3) of a mmx register into
325 * --- Expanding 565 in the low word ---
327 * m = (m << (32 - 3)) | (m << (16 - 5)) | m;
328 * m = m & (01f0003f001f);
329 * m = m * (008404100840);
332 * Note the trick here - the top word is shifted by another nibble to
333 * avoid it bumping into the middle word
335 static force_inline __m64
336 expand565 (__m64 pixel, int pos)
341 /* move pixel to low 16 bit and zero the rest */
342 p = shift (shift (p, (3 - pos) * 16), -48);
344 t1 = shift (p, 36 - 11);
345 t2 = shift (p, 16 - 5);
347 p = _mm_or_si64 (t1, p);
348 p = _mm_or_si64 (t2, p);
349 p = _mm_and_si64 (p, MC(565_rgb));
351 pixel = _mm_mullo_pi16 (p, MC(565_unpack_multiplier));
352 return _mm_srli_pi16 (pixel, 8);
355 static force_inline __m64
356 expand8888 (__m64 in, int pos)
359 return _mm_unpacklo_pi8 (in, _mm_setzero_si64());
361 return _mm_unpackhi_pi8 (in, _mm_setzero_si64());
364 static force_inline __m64
365 expandx888 (__m64 in, int pos)
367 return _mm_or_si64 (expand8888 (in, pos), MC(full_alpha));
370 static force_inline __m64
371 pack565 (__m64 pixel, __m64 target, int pos)
377 r = _mm_and_si64 (p, MC(565_r));
378 g = _mm_and_si64 (p, MC(565_g));
379 b = _mm_and_si64 (p, MC(565_b));
381 r = shift (r, - (32 - 8) + pos * 16);
382 g = shift (g, - (16 - 3) + pos * 16);
383 b = shift (b, - (0 + 3) + pos * 16);
386 t = _mm_and_si64 (t, MC(mask_0));
388 t = _mm_and_si64 (t, MC(mask_1));
390 t = _mm_and_si64 (t, MC(mask_2));
392 t = _mm_and_si64 (t, MC(mask_3));
394 p = _mm_or_si64 (r, t);
395 p = _mm_or_si64 (g, p);
397 return _mm_or_si64 (b, p);
401 static force_inline __m64
402 pix_add_mul (__m64 x, __m64 a, __m64 y, __m64 b)
404 x = _mm_mullo_pi16 (x, a);
405 y = _mm_mullo_pi16 (y, b);
406 x = _mm_adds_pu16 (x, MC(4x0080));
407 x = _mm_adds_pu16 (x, y);
408 x = _mm_adds_pu16 (x, _mm_srli_pi16 (x, 8));
409 x = _mm_srli_pi16 (x, 8);
414 #define pix_add_mul(x, a, y, b) \
415 ( x = _mm_mullo_pi16 (x, a), \
416 y = _mm_mullo_pi16 (y, b), \
417 x = _mm_adds_pu16 (x, MC(4x0080)), \
418 x = _mm_adds_pu16 (x, y), \
419 x = _mm_adds_pu16 (x, _mm_srli_pi16 (x, 8)), \
420 _mm_srli_pi16 (x, 8) )
423 /* --------------- MMX code patch for fbcompose.c --------------------- */
426 mmxCombineMaskU (uint32_t *src, const uint32_t *mask, int width)
428 const uint32_t *end = mask + width;
430 uint32_t mmask = *mask;
431 uint32_t maska = mmask >> 24;
434 } else if (maska != 0xff) {
435 __m64 a = load8888(mmask);
436 __m64 s = load8888(*src);
438 s = pix_multiply(s, a);
447 static force_inline uint32_t
448 combine (const uint32_t *src, const uint32_t *mask)
450 uint32_t ssrc = *src;
454 __m64 m = load8888 (*mask);
455 __m64 s = load8888 (ssrc);
457 m = expand_alpha (m);
458 s = pix_multiply (s, m);
460 ssrc = store8888 (s);
467 mmxCombineOverU (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
469 const uint32_t *end = dest + width;
472 uint32_t ssrc = combine (src, mask);
473 uint32_t a = ssrc >> 24;
479 sa = expand_alpha(s);
480 *dest = store8888(over(s, sa, load8888(*dest)));
491 mmxCombineOverReverseU (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
493 const uint32_t *end = dest + width;
497 uint32_t s = combine (src, mask);
499 da = expand_alpha(d);
500 *dest = store8888(over (d, da, load8888(s)));
510 mmxCombineInU (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
512 const uint32_t *end = dest + width;
516 x = load8888 (combine (src, mask));
519 x = pix_multiply(x, a);
520 *dest = store8888(x);
530 mmxCombineInReverseU (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
532 const uint32_t *end = dest + width;
537 a = load8888(combine (src, mask));
539 x = pix_multiply(x, a);
540 *dest = store8888(x);
550 mmxCombineOutU (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
552 const uint32_t *end = dest + width;
556 x = load8888(combine (src, mask));
560 x = pix_multiply(x, a);
561 *dest = store8888(x);
571 mmxCombineOutReverseU (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
573 const uint32_t *end = dest + width;
578 a = load8888(combine (src, mask));
581 x = pix_multiply(x, a);
582 *dest = store8888(x);
592 mmxCombineAtopU (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
594 const uint32_t *end = dest + width;
598 s = load8888(combine (src, mask));
600 sia = expand_alpha(s);
602 da = expand_alpha(d);
603 s = pix_add_mul (s, da, d, sia);
604 *dest = store8888(s);
614 mmxCombineAtopReverseU (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
622 s = load8888(combine(src, mask));
624 sa = expand_alpha(s);
625 dia = expand_alpha(d);
627 s = pix_add_mul (s, dia, d, sa);
628 *dest = store8888(s);
638 mmxCombineXorU (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
640 const uint32_t *end = dest + width;
643 __m64 s, dia, d, sia;
644 s = load8888(combine(src, mask));
646 sia = expand_alpha(s);
647 dia = expand_alpha(d);
650 s = pix_add_mul (s, dia, d, sia);
651 *dest = store8888(s);
661 mmxCombineAddU (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
663 const uint32_t *end = dest + width;
666 s = load8888(combine(src,mask));
669 *dest = store8888(s);
679 mmxCombineSaturateU (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
681 const uint32_t *end = dest + width;
683 uint32_t s = combine(src,mask);
685 __m64 ms = load8888(s);
686 __m64 md = load8888(d);
687 uint32_t sa = s >> 24;
688 uint32_t da = ~d >> 24;
691 __m64 msa = load8888(FbIntDiv(da, sa) << 24);
692 msa = expand_alpha(msa);
693 ms = pix_multiply(ms, msa);
695 md = pix_add(md, ms);
696 *dest = store8888(md);
707 mmxCombineSrcC (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
709 const uint32_t *end = src + width;
711 __m64 a = load8888(*mask);
712 __m64 s = load8888(*src);
713 s = pix_multiply(s, a);
714 *dest = store8888(s);
723 mmxCombineOverC (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
725 const uint32_t *end = src + width;
727 __m64 a = load8888(*mask);
728 __m64 s = load8888(*src);
729 __m64 d = load8888(*dest);
730 __m64 sa = expand_alpha(s);
732 *dest = store8888(in_over (s, sa, a, d));
742 mmxCombineOverReverseC (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
744 const uint32_t *end = src + width;
746 __m64 a = load8888(*mask);
747 __m64 s = load8888(*src);
748 __m64 d = load8888(*dest);
749 __m64 da = expand_alpha(d);
751 *dest = store8888(over (d, da, in (s, a)));
762 mmxCombineInC (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
764 const uint32_t *end = src + width;
766 __m64 a = load8888(*mask);
767 __m64 s = load8888(*src);
768 __m64 d = load8888(*dest);
769 __m64 da = expand_alpha(d);
770 s = pix_multiply(s, a);
771 s = pix_multiply(s, da);
772 *dest = store8888(s);
781 mmxCombineInReverseC (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
783 const uint32_t *end = src + width;
785 __m64 a = load8888(*mask);
786 __m64 s = load8888(*src);
787 __m64 d = load8888(*dest);
788 __m64 sa = expand_alpha(s);
789 a = pix_multiply(a, sa);
790 d = pix_multiply(d, a);
791 *dest = store8888(d);
800 mmxCombineOutC (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
802 const uint32_t *end = src + width;
804 __m64 a = load8888(*mask);
805 __m64 s = load8888(*src);
806 __m64 d = load8888(*dest);
807 __m64 da = expand_alpha(d);
809 s = pix_multiply(s, a);
810 s = pix_multiply(s, da);
811 *dest = store8888(s);
820 mmxCombineOutReverseC (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
822 const uint32_t *end = src + width;
824 __m64 a = load8888(*mask);
825 __m64 s = load8888(*src);
826 __m64 d = load8888(*dest);
827 __m64 sa = expand_alpha(s);
828 a = pix_multiply(a, sa);
830 d = pix_multiply(d, a);
831 *dest = store8888(d);
840 mmxCombineAtopC (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
842 const uint32_t *end = src + width;
844 __m64 a = load8888(*mask);
845 __m64 s = load8888(*src);
846 __m64 d = load8888(*dest);
847 __m64 da = expand_alpha(d);
848 __m64 sa = expand_alpha(s);
849 s = pix_multiply(s, a);
850 a = pix_multiply(a, sa);
852 d = pix_add_mul (d, a, s, da);
853 *dest = store8888(d);
862 mmxCombineAtopReverseC (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
864 const uint32_t *end = src + width;
866 __m64 a = load8888(*mask);
867 __m64 s = load8888(*src);
868 __m64 d = load8888(*dest);
869 __m64 da = expand_alpha(d);
870 __m64 sa = expand_alpha(s);
871 s = pix_multiply(s, a);
872 a = pix_multiply(a, sa);
874 d = pix_add_mul (d, a, s, da);
875 *dest = store8888(d);
884 mmxCombineXorC (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
886 const uint32_t *end = src + width;
888 __m64 a = load8888(*mask);
889 __m64 s = load8888(*src);
890 __m64 d = load8888(*dest);
891 __m64 da = expand_alpha(d);
892 __m64 sa = expand_alpha(s);
893 s = pix_multiply(s, a);
894 a = pix_multiply(a, sa);
897 d = pix_add_mul (d, a, s, da);
898 *dest = store8888(d);
907 mmxCombineAddC (uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
909 const uint32_t *end = src + width;
911 __m64 a = load8888(*mask);
912 __m64 s = load8888(*src);
913 __m64 d = load8888(*dest);
914 s = pix_multiply(s, a);
916 *dest = store8888(d);
925 fbComposeSetupMMX(void)
927 static pixman_bool_t initialized = FALSE;
932 /* check if we have MMX support and initialize accordingly */
933 if (pixman_have_mmx())
935 pixman_composeFunctions.combineU[PIXMAN_OP_OVER] = mmxCombineOverU;
936 pixman_composeFunctions.combineU[PIXMAN_OP_OVER_REVERSE] = mmxCombineOverReverseU;
937 pixman_composeFunctions.combineU[PIXMAN_OP_IN] = mmxCombineInU;
938 pixman_composeFunctions.combineU[PIXMAN_OP_IN_REVERSE] = mmxCombineInReverseU;
939 pixman_composeFunctions.combineU[PIXMAN_OP_OUT] = mmxCombineOutU;
940 pixman_composeFunctions.combineU[PIXMAN_OP_OUT_REVERSE] = mmxCombineOutReverseU;
941 pixman_composeFunctions.combineU[PIXMAN_OP_ATOP] = mmxCombineAtopU;
942 pixman_composeFunctions.combineU[PIXMAN_OP_ATOP_REVERSE] = mmxCombineAtopReverseU;
944 pixman_composeFunctions.combineU[PIXMAN_OP_XOR] = mmxCombineXorU;
945 pixman_composeFunctions.combineU[PIXMAN_OP_ADD] = mmxCombineAddU;
946 pixman_composeFunctions.combineU[PIXMAN_OP_SATURATE] = mmxCombineSaturateU;
949 pixman_composeFunctions.combineC[PIXMAN_OP_SRC] = mmxCombineSrcC;
950 pixman_composeFunctions.combineC[PIXMAN_OP_OVER] = mmxCombineOverC;
951 pixman_composeFunctions.combineC[PIXMAN_OP_OVER_REVERSE] = mmxCombineOverReverseC;
952 pixman_composeFunctions.combineC[PIXMAN_OP_IN] = mmxCombineInC;
953 pixman_composeFunctions.combineC[PIXMAN_OP_IN_REVERSE] = mmxCombineInReverseC;
954 pixman_composeFunctions.combineC[PIXMAN_OP_OUT] = mmxCombineOutC;
955 pixman_composeFunctions.combineC[PIXMAN_OP_OUT_REVERSE] = mmxCombineOutReverseC;
956 pixman_composeFunctions.combineC[PIXMAN_OP_ATOP] = mmxCombineAtopC;
957 pixman_composeFunctions.combineC[PIXMAN_OP_ATOP_REVERSE] = mmxCombineAtopReverseC;
958 pixman_composeFunctions.combineC[PIXMAN_OP_XOR] = mmxCombineXorC;
959 pixman_composeFunctions.combineC[PIXMAN_OP_ADD] = mmxCombineAddC;
961 pixman_composeFunctions.combineMaskU = mmxCombineMaskU;
968 /* ------------------ MMX code paths called from fbpict.c ----------------------- */
971 fbCompositeSolid_nx8888mmx (pixman_op_t op,
972 pixman_image_t * pSrc,
973 pixman_image_t * pMask,
974 pixman_image_t * pDst,
985 uint32_t *dstLine, *dst;
992 fbComposeGetSolid(pSrc, src, pDst->bits.format);
997 fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
999 vsrc = load8888 (src);
1000 vsrca = expand_alpha (vsrc);
1005 dstLine += dstStride;
1010 while (w && (unsigned long)dst & 7)
1012 *dst = store8888(over(vsrc, vsrca, load8888(*dst)));
1023 vdest = *(__m64 *)dst;
1025 dest0 = over(vsrc, vsrca, expand8888(vdest, 0));
1026 dest1 = over(vsrc, vsrca, expand8888(vdest, 1));
1028 *(__m64 *)dst = pack8888(dest0, dest1);
1038 *dst = store8888(over(vsrc, vsrca, load8888(*dst)));
1049 fbCompositeSolid_nx0565mmx (pixman_op_t op,
1050 pixman_image_t * pSrc,
1051 pixman_image_t * pMask,
1052 pixman_image_t * pDst,
1063 uint16_t *dstLine, *dst;
1070 fbComposeGetSolid(pSrc, src, pDst->bits.format);
1075 fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
1077 vsrc = load8888 (src);
1078 vsrca = expand_alpha (vsrc);
1083 dstLine += dstStride;
1088 while (w && (unsigned long)dst & 7)
1091 __m64 vdest = expand565 (M64(d), 0);
1092 vdest = pack565(over(vsrc, vsrca, vdest), vdest, 0);
1093 *dst = UINT64(vdest);
1103 vdest = *(__m64 *)dst;
1105 vdest = pack565 (over(vsrc, vsrca, expand565(vdest, 0)), vdest, 0);
1106 vdest = pack565 (over(vsrc, vsrca, expand565(vdest, 1)), vdest, 1);
1107 vdest = pack565 (over(vsrc, vsrca, expand565(vdest, 2)), vdest, 2);
1108 vdest = pack565 (over(vsrc, vsrca, expand565(vdest, 3)), vdest, 3);
1110 *(__m64 *)dst = vdest;
1121 __m64 vdest = expand565 (M64(d), 0);
1122 vdest = pack565(over(vsrc, vsrca, vdest), vdest, 0);
1123 *dst = UINT64(vdest);
1134 fbCompositeSolidMask_nx8888x8888Cmmx (pixman_op_t op,
1135 pixman_image_t * pSrc,
1136 pixman_image_t * pMask,
1137 pixman_image_t * pDst,
1150 int dstStride, maskStride;
1155 fbComposeGetSolid(pSrc, src, pDst->bits.format);
1161 fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
1162 fbComposeGetStart (pMask, xMask, yMask, uint32_t, maskStride, maskLine, 1);
1164 vsrc = load8888(src);
1165 vsrca = expand_alpha(vsrc);
1170 uint32_t *p = (uint32_t *)maskLine;
1171 uint32_t *q = (uint32_t *)dstLine;
1173 while (twidth && (unsigned long)q & 7)
1175 uint32_t m = *(uint32_t *)p;
1179 __m64 vdest = load8888(*q);
1180 vdest = in_over(vsrc, vsrca, load8888(m), vdest);
1181 *q = store8888(vdest);
1198 __m64 vdest = *(__m64 *)q;
1200 dest0 = in_over(vsrc, vsrca, load8888(m0),
1201 expand8888 (vdest, 0));
1202 dest1 = in_over(vsrc, vsrca, load8888(m1),
1203 expand8888 (vdest, 1));
1205 *(__m64 *)q = pack8888(dest0, dest1);
1215 uint32_t m = *(uint32_t *)p;
1219 __m64 vdest = load8888(*q);
1220 vdest = in_over(vsrc, vsrca, load8888(m), vdest);
1221 *q = store8888(vdest);
1229 dstLine += dstStride;
1230 maskLine += maskStride;
1237 fbCompositeSrc_8888x8x8888mmx (pixman_op_t op,
1238 pixman_image_t * pSrc,
1239 pixman_image_t * pMask,
1240 pixman_image_t * pDst,
1250 uint32_t *dstLine, *dst;
1251 uint32_t *srcLine, *src;
1254 int dstStride, srcStride;
1260 fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
1261 fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
1263 fbComposeGetSolid (pMask, mask, pDst->bits.format);
1264 mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
1265 vmask = load8888 (mask);
1271 dstLine += dstStride;
1273 srcLine += srcStride;
1276 while (w && (unsigned long)dst & 7)
1278 __m64 s = load8888 (*src);
1279 __m64 d = load8888 (*dst);
1281 *dst = store8888 (in_over (s, expand_alpha (s), vmask, d));
1290 __m64 vs = *(__m64 *)src;
1291 __m64 vd = *(__m64 *)dst;
1292 __m64 vsrc0 = expand8888 (vs, 0);
1293 __m64 vsrc1 = expand8888 (vs, 1);
1295 *(__m64 *)dst = pack8888 (
1296 in_over (vsrc0, expand_alpha (vsrc0), vmask, expand8888 (vd, 0)),
1297 in_over (vsrc1, expand_alpha (vsrc1), vmask, expand8888 (vd, 1)));
1306 __m64 s = load8888 (*src);
1307 __m64 d = load8888 (*dst);
1309 *dst = store8888 (in_over (s, expand_alpha (s), vmask, d));
1321 fbCompositeSrc_x888xnx8888mmx (pixman_op_t op,
1322 pixman_image_t * pSrc,
1323 pixman_image_t * pMask,
1324 pixman_image_t * pDst,
1334 uint32_t *dstLine, *dst;
1335 uint32_t *srcLine, *src;
1338 int dstStride, srcStride;
1344 fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
1345 fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
1346 fbComposeGetSolid (pMask, mask, pDst->bits.format);
1348 mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
1349 vmask = load8888 (mask);
1355 dstLine += dstStride;
1357 srcLine += srcStride;
1360 while (w && (unsigned long)dst & 7)
1362 __m64 s = load8888 (*src | 0xff000000);
1363 __m64 d = load8888 (*dst);
1365 *dst = store8888 (in_over (s, srca, vmask, d));
1374 __m64 vd0 = *(__m64 *)(dst + 0);
1375 __m64 vd1 = *(__m64 *)(dst + 2);
1376 __m64 vd2 = *(__m64 *)(dst + 4);
1377 __m64 vd3 = *(__m64 *)(dst + 6);
1378 __m64 vd4 = *(__m64 *)(dst + 8);
1379 __m64 vd5 = *(__m64 *)(dst + 10);
1380 __m64 vd6 = *(__m64 *)(dst + 12);
1381 __m64 vd7 = *(__m64 *)(dst + 14);
1383 __m64 vs0 = *(__m64 *)(src + 0);
1384 __m64 vs1 = *(__m64 *)(src + 2);
1385 __m64 vs2 = *(__m64 *)(src + 4);
1386 __m64 vs3 = *(__m64 *)(src + 6);
1387 __m64 vs4 = *(__m64 *)(src + 8);
1388 __m64 vs5 = *(__m64 *)(src + 10);
1389 __m64 vs6 = *(__m64 *)(src + 12);
1390 __m64 vs7 = *(__m64 *)(src + 14);
1393 in_over (expandx888 (vs0, 0), srca, vmask, expand8888 (vd0, 0)),
1394 in_over (expandx888 (vs0, 1), srca, vmask, expand8888 (vd0, 1)));
1397 in_over (expandx888 (vs1, 0), srca, vmask, expand8888 (vd1, 0)),
1398 in_over (expandx888 (vs1, 1), srca, vmask, expand8888 (vd1, 1)));
1401 in_over (expandx888 (vs2, 0), srca, vmask, expand8888 (vd2, 0)),
1402 in_over (expandx888 (vs2, 1), srca, vmask, expand8888 (vd2, 1)));
1405 in_over (expandx888 (vs3, 0), srca, vmask, expand8888 (vd3, 0)),
1406 in_over (expandx888 (vs3, 1), srca, vmask, expand8888 (vd3, 1)));
1409 in_over (expandx888 (vs4, 0), srca, vmask, expand8888 (vd4, 0)),
1410 in_over (expandx888 (vs4, 1), srca, vmask, expand8888 (vd4, 1)));
1413 in_over (expandx888 (vs5, 0), srca, vmask, expand8888 (vd5, 0)),
1414 in_over (expandx888 (vs5, 1), srca, vmask, expand8888 (vd5, 1)));
1417 in_over (expandx888 (vs6, 0), srca, vmask, expand8888 (vd6, 0)),
1418 in_over (expandx888 (vs6, 1), srca, vmask, expand8888 (vd6, 1)));
1421 in_over (expandx888 (vs7, 0), srca, vmask, expand8888 (vd7, 0)),
1422 in_over (expandx888 (vs7, 1), srca, vmask, expand8888 (vd7, 1)));
1424 *(__m64 *)(dst + 0) = vd0;
1425 *(__m64 *)(dst + 2) = vd1;
1426 *(__m64 *)(dst + 4) = vd2;
1427 *(__m64 *)(dst + 6) = vd3;
1428 *(__m64 *)(dst + 8) = vd4;
1429 *(__m64 *)(dst + 10) = vd5;
1430 *(__m64 *)(dst + 12) = vd6;
1431 *(__m64 *)(dst + 14) = vd7;
1440 __m64 s = load8888 (*src | 0xff000000);
1441 __m64 d = load8888 (*dst);
1443 *dst = store8888 (in_over (s, srca, vmask, d));
1455 fbCompositeSrc_8888x8888mmx (pixman_op_t op,
1456 pixman_image_t * pSrc,
1457 pixman_image_t * pMask,
1458 pixman_image_t * pDst,
1468 uint32_t *dstLine, *dst;
1469 uint32_t *srcLine, *src;
1471 int dstStride, srcStride;
1477 fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
1478 fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
1483 dstLine += dstStride;
1485 srcLine += srcStride;
1497 sa = expand_alpha(ms);
1498 *dst = store8888(over(ms, sa, load8888(*dst)));
1507 fbCompositeSrc_8888x0565mmx (pixman_op_t op,
1508 pixman_image_t * pSrc,
1509 pixman_image_t * pMask,
1510 pixman_image_t * pDst,
1520 uint16_t *dstLine, *dst;
1521 uint32_t *srcLine, *src;
1522 int dstStride, srcStride;
1527 fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
1528 fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
1532 assert (pSrc->pDrawable == pMask->pDrawable);
1538 dstLine += dstStride;
1540 srcLine += srcStride;
1545 while (w && (unsigned long)dst & 7)
1547 __m64 vsrc = load8888 (*src);
1549 __m64 vdest = expand565 (M64(d), 0);
1551 vdest = pack565(over(vsrc, expand_alpha(vsrc), vdest), vdest, 0);
1553 *dst = UINT64(vdest);
1564 __m64 vsrc0, vsrc1, vsrc2, vsrc3;
1567 vsrc0 = load8888(*(src + 0));
1568 vsrc1 = load8888(*(src + 1));
1569 vsrc2 = load8888(*(src + 2));
1570 vsrc3 = load8888(*(src + 3));
1572 vdest = *(__m64 *)dst;
1574 vdest = pack565(over(vsrc0, expand_alpha(vsrc0), expand565(vdest, 0)), vdest, 0);
1575 vdest = pack565(over(vsrc1, expand_alpha(vsrc1), expand565(vdest, 1)), vdest, 1);
1576 vdest = pack565(over(vsrc2, expand_alpha(vsrc2), expand565(vdest, 2)), vdest, 2);
1577 vdest = pack565(over(vsrc3, expand_alpha(vsrc3), expand565(vdest, 3)), vdest, 3);
1579 *(__m64 *)dst = vdest;
1590 __m64 vsrc = load8888 (*src);
1592 __m64 vdest = expand565 (M64(d), 0);
1594 vdest = pack565(over(vsrc, expand_alpha(vsrc), vdest), vdest, 0);
1596 *dst = UINT64(vdest);
1608 fbCompositeSolidMask_nx8x8888mmx (pixman_op_t op,
1609 pixman_image_t * pSrc,
1610 pixman_image_t * pMask,
1611 pixman_image_t * pDst,
1622 uint32_t *dstLine, *dst;
1623 uint8_t *maskLine, *mask;
1624 int dstStride, maskStride;
1631 fbComposeGetSolid(pSrc, src, pDst->bits.format);
1637 srcsrc = (uint64_t)src << 32 | src;
1639 fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
1640 fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
1642 vsrc = load8888 (src);
1643 vsrca = expand_alpha (vsrc);
1648 dstLine += dstStride;
1650 maskLine += maskStride;
1655 while (w && (unsigned long)dst & 7)
1661 __m64 vdest = in_over(vsrc, vsrca, expand_alpha_rev (M64(m)), load8888(*dst));
1662 *dst = store8888(vdest);
1678 if (srca == 0xff && (m0 & m1) == 0xff)
1680 *(uint64_t *)dst = srcsrc;
1687 vdest = *(__m64 *)dst;
1689 dest0 = in_over(vsrc, vsrca, expand_alpha_rev (M64(m0)), expand8888(vdest, 0));
1690 dest1 = in_over(vsrc, vsrca, expand_alpha_rev (M64(m1)), expand8888(vdest, 1));
1692 *(__m64 *)dst = pack8888(dest0, dest1);
1708 __m64 vdest = load8888(*dst);
1709 vdest = in_over(vsrc, vsrca, expand_alpha_rev (M64(m)), vdest);
1710 *dst = store8888(vdest);
1723 pixman_fill_mmx (uint32_t *bits,
1734 uint32_t byte_width;
1737 __m64 v1, v2, v3, v4, v5, v6, v7;
1740 if (bpp != 16 && bpp != 32 && bpp != 8)
1743 if (bpp == 16 && (xor >> 16 != (xor & 0xffff)))
1747 ((xor >> 16 != (xor & 0xffff)) ||
1748 (xor >> 24 != (xor & 0x00ff) >> 16)))
1755 stride = stride * (int) sizeof (uint32_t) / 1;
1756 byte_line = (uint8_t *)(((uint8_t *)bits) + stride * y + x);
1762 stride = stride * (int) sizeof (uint32_t) / 2;
1763 byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x);
1764 byte_width = 2 * width;
1769 stride = stride * (int) sizeof (uint32_t) / 4;
1770 byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x);
1771 byte_width = 4 * width;
1775 fill = ((uint64_t)xor << 32) | xor;
1787 : "=y" (v1), "=y" (v2), "=y" (v3),
1788 "=y" (v4), "=y" (v5), "=y" (v6), "=y" (v7)
1795 uint8_t *d = byte_line;
1796 byte_line += stride;
1799 while (w >= 1 && ((unsigned long)d & 1))
1801 *(uint8_t *)d = (xor & 0xff);
1806 while (w >= 2 && ((unsigned long)d & 3))
1808 *(uint16_t *)d = xor;
1813 while (w >= 4 && ((unsigned long)d & 7))
1815 *(uint32_t *)d = xor;
1835 "y" (vfill), "y" (v1), "y" (v2), "y" (v3),
1836 "y" (v4), "y" (v5), "y" (v6), "y" (v7)
1839 *(__m64*) (d + 0) = vfill;
1840 *(__m64*) (d + 8) = vfill;
1841 *(__m64*) (d + 16) = vfill;
1842 *(__m64*) (d + 24) = vfill;
1843 *(__m64*) (d + 32) = vfill;
1844 *(__m64*) (d + 40) = vfill;
1845 *(__m64*) (d + 48) = vfill;
1846 *(__m64*) (d + 56) = vfill;
1854 *(uint32_t *)d = xor;
1861 *(uint16_t *)d = xor;
1867 *(uint8_t *)d = (xor & 0xff);
1879 fbCompositeSolidMaskSrc_nx8x8888mmx (pixman_op_t op,
1880 pixman_image_t * pSrc,
1881 pixman_image_t * pMask,
1882 pixman_image_t * pDst,
1893 uint32_t *dstLine, *dst;
1894 uint8_t *maskLine, *mask;
1895 int dstStride, maskStride;
1902 fbComposeGetSolid(pSrc, src, pDst->bits.format);
1907 pixman_fill_mmx (pDst->bits.bits, pDst->bits.rowstride, PIXMAN_FORMAT_BPP (pDst->bits.format),
1908 xDst, yDst, width, height, 0);
1912 srcsrc = (uint64_t)src << 32 | src;
1914 fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
1915 fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
1917 vsrc = load8888 (src);
1918 vsrca = expand_alpha (vsrc);
1923 dstLine += dstStride;
1925 maskLine += maskStride;
1930 while (w && (unsigned long)dst & 7)
1936 __m64 vdest = in(vsrc, expand_alpha_rev (M64(m)));
1937 *dst = store8888(vdest);
1957 if (srca == 0xff && (m0 & m1) == 0xff)
1959 *(uint64_t *)dst = srcsrc;
1966 vdest = *(__m64 *)dst;
1968 dest0 = in(vsrc, expand_alpha_rev (M64(m0)));
1969 dest1 = in(vsrc, expand_alpha_rev (M64(m1)));
1971 *(__m64 *)dst = pack8888(dest0, dest1);
1975 *(uint64_t *)dst = 0;
1991 __m64 vdest = load8888(*dst);
1992 vdest = in(vsrc, expand_alpha_rev (M64(m)));
1993 *dst = store8888(vdest);
2010 fbCompositeSolidMask_nx8x0565mmx (pixman_op_t op,
2011 pixman_image_t * pSrc,
2012 pixman_image_t * pMask,
2013 pixman_image_t * pDst,
2024 uint16_t *dstLine, *dst;
2025 uint8_t *maskLine, *mask;
2026 int dstStride, maskStride;
2028 __m64 vsrc, vsrca, tmp;
2029 uint64_t srcsrcsrcsrc, src16;
2033 fbComposeGetSolid(pSrc, src, pDst->bits.format);
2039 fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
2040 fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
2042 vsrc = load8888 (src);
2043 vsrca = expand_alpha (vsrc);
2045 tmp = pack565(vsrc, _mm_setzero_si64(), 0);
2046 src16 = UINT64(tmp);
2048 srcsrcsrcsrc = (uint64_t)src16 << 48 | (uint64_t)src16 << 32 |
2049 (uint64_t)src16 << 16 | (uint64_t)src16;
2054 dstLine += dstStride;
2056 maskLine += maskStride;
2061 while (w && (unsigned long)dst & 7)
2069 __m64 vdest = in_over(vsrc, vsrca, expand_alpha_rev (M64 (m)), expand565(vd, 0));
2070 vd = pack565(vdest, _mm_setzero_si64(), 0);
2083 uint64_t m0, m1, m2, m3;
2089 if (srca == 0xff && (m0 & m1 & m2 & m3) == 0xff)
2091 *(uint64_t *)dst = srcsrcsrcsrc;
2093 else if (m0 | m1 | m2 | m3)
2096 __m64 vm0, vm1, vm2, vm3;
2098 vdest = *(__m64 *)dst;
2101 vdest = pack565(in_over(vsrc, vsrca, expand_alpha_rev(vm0), expand565(vdest, 0)), vdest, 0);
2103 vdest = pack565(in_over(vsrc, vsrca, expand_alpha_rev(vm1), expand565(vdest, 1)), vdest, 1);
2105 vdest = pack565(in_over(vsrc, vsrca, expand_alpha_rev(vm2), expand565(vdest, 2)), vdest, 2);
2107 vdest = pack565(in_over(vsrc, vsrca, expand_alpha_rev(vm3), expand565(vdest, 3)), vdest, 3);
2109 *(__m64 *)dst = vdest;
2127 __m64 vdest = in_over(vsrc, vsrca, expand_alpha_rev (M64(m)), expand565(vd, 0));
2128 vd = pack565(vdest, _mm_setzero_si64(), 0);
2142 fbCompositeSrc_8888RevNPx0565mmx (pixman_op_t op,
2143 pixman_image_t * pSrc,
2144 pixman_image_t * pMask,
2145 pixman_image_t * pDst,
2155 uint16_t *dstLine, *dst;
2156 uint32_t *srcLine, *src;
2157 int dstStride, srcStride;
2162 fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
2163 fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
2167 assert (pSrc->pDrawable == pMask->pDrawable);
2173 dstLine += dstStride;
2175 srcLine += srcStride;
2180 while (w && (unsigned long)dst & 7)
2182 __m64 vsrc = load8888 (*src);
2184 __m64 vdest = expand565 (M64(d), 0);
2186 vdest = pack565(over_rev_non_pre(vsrc, vdest), vdest, 0);
2188 *dst = UINT64(vdest);
2199 uint32_t s0, s1, s2, s3;
2200 unsigned char a0, a1, a2, a3;
2212 if ((a0 & a1 & a2 & a3) == 0xFF)
2215 vdest = pack565(invert_colors(load8888(s0)), _mm_setzero_si64(), 0);
2216 vdest = pack565(invert_colors(load8888(s1)), vdest, 1);
2217 vdest = pack565(invert_colors(load8888(s2)), vdest, 2);
2218 vdest = pack565(invert_colors(load8888(s3)), vdest, 3);
2220 *(__m64 *)dst = vdest;
2222 else if (a0 | a1 | a2 | a3)
2224 __m64 vdest = *(__m64 *)dst;
2226 vdest = pack565(over_rev_non_pre(load8888(s0), expand565(vdest, 0)), vdest, 0);
2227 vdest = pack565(over_rev_non_pre(load8888(s1), expand565(vdest, 1)), vdest, 1);
2228 vdest = pack565(over_rev_non_pre(load8888(s2), expand565(vdest, 2)), vdest, 2);
2229 vdest = pack565(over_rev_non_pre(load8888(s3), expand565(vdest, 3)), vdest, 3);
2231 *(__m64 *)dst = vdest;
2243 __m64 vsrc = load8888 (*src);
2245 __m64 vdest = expand565 (M64(d), 0);
2247 vdest = pack565(over_rev_non_pre(vsrc, vdest), vdest, 0);
2249 *dst = UINT64(vdest);
2260 /* "8888RevNP" is GdkPixbuf's format: ABGR, non premultiplied */
2263 fbCompositeSrc_8888RevNPx8888mmx (pixman_op_t op,
2264 pixman_image_t * pSrc,
2265 pixman_image_t * pMask,
2266 pixman_image_t * pDst,
2276 uint32_t *dstLine, *dst;
2277 uint32_t *srcLine, *src;
2278 int dstStride, srcStride;
2283 fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
2284 fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
2288 assert (pSrc->pDrawable == pMask->pDrawable);
2294 dstLine += dstStride;
2296 srcLine += srcStride;
2299 while (w && (unsigned long)dst & 7)
2301 __m64 s = load8888 (*src);
2302 __m64 d = load8888 (*dst);
2304 *dst = store8888 (over_rev_non_pre (s, d));
2314 unsigned char a0, a1;
2323 if ((a0 & a1) == 0xFF)
2325 d0 = invert_colors(load8888(s0));
2326 d1 = invert_colors(load8888(s1));
2328 *(__m64 *)dst = pack8888 (d0, d1);
2332 __m64 vdest = *(__m64 *)dst;
2334 d0 = over_rev_non_pre (load8888(s0), expand8888 (vdest, 0));
2335 d1 = over_rev_non_pre (load8888(s1), expand8888 (vdest, 1));
2337 *(__m64 *)dst = pack8888 (d0, d1);
2347 __m64 s = load8888 (*src);
2348 __m64 d = load8888 (*dst);
2350 *dst = store8888 (over_rev_non_pre (s, d));
2362 fbCompositeSolidMask_nx8888x0565Cmmx (pixman_op_t op,
2363 pixman_image_t * pSrc,
2364 pixman_image_t * pMask,
2365 pixman_image_t * pDst,
2378 int dstStride, maskStride;
2383 fbComposeGetSolid(pSrc, src, pDst->bits.format);
2389 fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
2390 fbComposeGetStart (pMask, xMask, yMask, uint32_t, maskStride, maskLine, 1);
2392 vsrc = load8888 (src);
2393 vsrca = expand_alpha (vsrc);
2398 uint32_t *p = (uint32_t *)maskLine;
2399 uint16_t *q = (uint16_t *)dstLine;
2401 while (twidth && ((unsigned long)q & 7))
2403 uint32_t m = *(uint32_t *)p;
2408 __m64 vdest = expand565 (M64(d), 0);
2409 vdest = pack565 (in_over (vsrc, vsrca, load8888 (m), vdest), vdest, 0);
2420 uint32_t m0, m1, m2, m3;
2427 if ((m0 | m1 | m2 | m3))
2429 __m64 vdest = *(__m64 *)q;
2431 vdest = pack565(in_over(vsrc, vsrca, load8888(m0), expand565(vdest, 0)), vdest, 0);
2432 vdest = pack565(in_over(vsrc, vsrca, load8888(m1), expand565(vdest, 1)), vdest, 1);
2433 vdest = pack565(in_over(vsrc, vsrca, load8888(m2), expand565(vdest, 2)), vdest, 2);
2434 vdest = pack565(in_over(vsrc, vsrca, load8888(m3), expand565(vdest, 3)), vdest, 3);
2436 *(__m64 *)q = vdest;
2451 __m64 vdest = expand565(M64(d), 0);
2452 vdest = pack565 (in_over(vsrc, vsrca, load8888(m), vdest), vdest, 0);
2461 maskLine += maskStride;
2462 dstLine += dstStride;
2469 fbCompositeIn_nx8x8mmx (pixman_op_t op,
2470 pixman_image_t * pSrc,
2471 pixman_image_t * pMask,
2472 pixman_image_t * pDst,
2482 uint8_t *dstLine, *dst;
2483 uint8_t *maskLine, *mask;
2484 int dstStride, maskStride;
2490 fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
2491 fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
2493 fbComposeGetSolid(pSrc, src, pDst->bits.format);
2499 vsrc = load8888(src);
2500 vsrca = expand_alpha(vsrc);
2505 dstLine += dstStride;
2507 maskLine += maskStride;
2510 if ((((unsigned long)pDst & 3) == 0) &&
2511 (((unsigned long)pSrc & 3) == 0))
2521 vmask = load8888 (*(uint32_t *)mask);
2522 vdest = load8888 (*(uint32_t *)dst);
2524 *(uint32_t *)dst = store8888 (in (in (vsrca, vmask), vdest));
2542 m = FbInU (sa, 0, a, tmp);
2543 r = FbInU (m, 0, d, tmp);
2553 fbCompositeIn_8x8mmx (pixman_op_t op,
2554 pixman_image_t * pSrc,
2555 pixman_image_t * pMask,
2556 pixman_image_t * pDst,
2566 uint8_t *dstLine, *dst;
2567 uint8_t *srcLine, *src;
2568 int srcStride, dstStride;
2571 fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
2572 fbComposeGetStart (pSrc, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
2577 dstLine += dstStride;
2579 srcLine += srcStride;
2582 if ((((unsigned long)pDst & 3) == 0) &&
2583 (((unsigned long)pSrc & 3) == 0))
2587 uint32_t *s = (uint32_t *)src;
2588 uint32_t *d = (uint32_t *)dst;
2590 *d = store8888 (in (load8888 (*s), load8888 (*d)));
2606 *dst = FbInU (s, 0, d, tmp);
2617 fbCompositeSrcAdd_8888x8x8mmx (pixman_op_t op,
2618 pixman_image_t * pSrc,
2619 pixman_image_t * pMask,
2620 pixman_image_t * pDst,
2630 uint8_t *dstLine, *dst;
2631 uint8_t *maskLine, *mask;
2632 int dstStride, maskStride;
2638 fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
2639 fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
2641 fbComposeGetSolid(pSrc, src, pDst->bits.format);
2647 vsrc = load8888(src);
2648 vsrca = expand_alpha(vsrc);
2653 dstLine += dstStride;
2655 maskLine += maskStride;
2658 if ((((unsigned long)pMask & 3) == 0) &&
2659 (((unsigned long)pDst & 3) == 0))
2663 __m64 vmask = load8888 (*(uint32_t *)mask);
2664 __m64 vdest = load8888 (*(uint32_t *)dst);
2666 *(uint32_t *)dst = store8888 (_mm_adds_pu8 (in (vsrca, vmask), vdest));
2684 m = FbInU (sa, 0, a, tmp);
2685 r = FbAdd (m, d, 0, tmp);
2695 fbCompositeSrcAdd_8000x8000mmx (pixman_op_t op,
2696 pixman_image_t * pSrc,
2697 pixman_image_t * pMask,
2698 pixman_image_t * pDst,
2708 uint8_t *dstLine, *dst;
2709 uint8_t *srcLine, *src;
2710 int dstStride, srcStride;
2717 fbComposeGetStart (pSrc, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
2718 fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
2723 dstLine += dstStride;
2725 srcLine += srcStride;
2728 while (w && (unsigned long)dst & 7)
2733 s = t | (0 - (t >> 8));
2743 *(__m64*)dst = _mm_adds_pu8(*(__m64*)src, *(__m64*)dst);
2754 s = t | (0 - (t >> 8));
2767 fbCompositeSrcAdd_8888x8888mmx (pixman_op_t op,
2768 pixman_image_t * pSrc,
2769 pixman_image_t * pMask,
2770 pixman_image_t * pDst,
2781 uint32_t *dstLine, *dst;
2782 uint32_t *srcLine, *src;
2783 int dstStride, srcStride;
2788 fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
2789 fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
2794 dstLine += dstStride;
2796 srcLine += srcStride;
2799 while (w && (unsigned long)dst & 7)
2801 *dst = _mm_cvtsi64_si32(_mm_adds_pu8(_mm_cvtsi32_si64(*src),
2802 _mm_cvtsi32_si64(*dst)));
2810 dst64 = _mm_adds_pu8(*(__m64*)src, *(__m64*)dst);
2811 *(uint64_t*)dst = UINT64(dst64);
2819 *dst = _mm_cvtsi64_si32(_mm_adds_pu8(_mm_cvtsi32_si64(*src),
2820 _mm_cvtsi32_si64(*dst)));
2829 pixman_blt_mmx (uint32_t *src_bits,
2835 int src_x, int src_y,
2836 int dst_x, int dst_y,
2837 int width, int height)
2839 uint8_t * src_bytes;
2840 uint8_t * dst_bytes;
2843 if (src_bpp != dst_bpp)
2848 src_stride = src_stride * (int) sizeof (uint32_t) / 2;
2849 dst_stride = dst_stride * (int) sizeof (uint32_t) / 2;
2850 src_bytes = (uint8_t *)(((uint16_t *)src_bits) + src_stride * (src_y) + (src_x));
2851 dst_bytes = (uint8_t *)(((uint16_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
2852 byte_width = 2 * width;
2855 } else if (src_bpp == 32) {
2856 src_stride = src_stride * (int) sizeof (uint32_t) / 4;
2857 dst_stride = dst_stride * (int) sizeof (uint32_t) / 4;
2858 src_bytes = (uint8_t *)(((uint32_t *)src_bits) + src_stride * (src_y) + (src_x));
2859 dst_bytes = (uint8_t *)(((uint32_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
2860 byte_width = 4 * width;
2870 uint8_t *s = src_bytes;
2871 uint8_t *d = dst_bytes;
2872 src_bytes += src_stride;
2873 dst_bytes += dst_stride;
2876 while (w >= 2 && ((unsigned long)d & 3))
2878 *(uint16_t *)d = *(uint16_t *)s;
2884 while (w >= 4 && ((unsigned long)d & 7))
2886 *(uint32_t *)d = *(uint32_t *)s;
2895 #if defined (__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
2897 "movq (%1), %%mm0\n"
2898 "movq 8(%1), %%mm1\n"
2899 "movq 16(%1), %%mm2\n"
2900 "movq 24(%1), %%mm3\n"
2901 "movq 32(%1), %%mm4\n"
2902 "movq 40(%1), %%mm5\n"
2903 "movq 48(%1), %%mm6\n"
2904 "movq 56(%1), %%mm7\n"
2906 "movq %%mm0, (%0)\n"
2907 "movq %%mm1, 8(%0)\n"
2908 "movq %%mm2, 16(%0)\n"
2909 "movq %%mm3, 24(%0)\n"
2910 "movq %%mm4, 32(%0)\n"
2911 "movq %%mm5, 40(%0)\n"
2912 "movq %%mm6, 48(%0)\n"
2913 "movq %%mm7, 56(%0)\n"
2917 "%mm0", "%mm1", "%mm2", "%mm3",
2918 "%mm4", "%mm5", "%mm6", "%mm7");
2920 __m64 v0 = *(__m64 *)(s + 0);
2921 __m64 v1 = *(__m64 *)(s + 8);
2922 __m64 v2 = *(__m64 *)(s + 16);
2923 __m64 v3 = *(__m64 *)(s + 24);
2924 __m64 v4 = *(__m64 *)(s + 32);
2925 __m64 v5 = *(__m64 *)(s + 40);
2926 __m64 v6 = *(__m64 *)(s + 48);
2927 __m64 v7 = *(__m64 *)(s + 56);
2928 *(__m64 *)(d + 0) = v0;
2929 *(__m64 *)(d + 8) = v1;
2930 *(__m64 *)(d + 16) = v2;
2931 *(__m64 *)(d + 24) = v3;
2932 *(__m64 *)(d + 32) = v4;
2933 *(__m64 *)(d + 40) = v5;
2934 *(__m64 *)(d + 48) = v6;
2935 *(__m64 *)(d + 56) = v7;
2944 *(uint32_t *)d = *(uint32_t *)s;
2952 *(uint16_t *)d = *(uint16_t *)s;
2965 fbCompositeCopyAreammx (pixman_op_t op,
2966 pixman_image_t * pSrc,
2967 pixman_image_t * pMask,
2968 pixman_image_t * pDst,
2978 pixman_blt_mmx (pSrc->bits.bits,
2980 pSrc->bits.rowstride,
2981 pDst->bits.rowstride,
2982 PIXMAN_FORMAT_BPP (pSrc->bits.format),
2983 PIXMAN_FORMAT_BPP (pDst->bits.format),
2984 xSrc, ySrc, xDst, yDst, width, height);
2988 fbCompositeOver_x888x8x8888mmx (pixman_op_t op,
2989 pixman_image_t * pSrc,
2990 pixman_image_t * pMask,
2991 pixman_image_t * pDst,
3001 uint32_t *src, *srcLine;
3002 uint32_t *dst, *dstLine;
3003 uint8_t *mask, *maskLine;
3004 int srcStride, maskStride, dstStride;
3007 fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
3008 fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
3009 fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
3014 srcLine += srcStride;
3016 dstLine += dstStride;
3018 maskLine += maskStride;
3028 __m64 s = load8888 (*src | 0xff000000);
3031 *dst = store8888 (s);
3034 __m64 sa = expand_alpha (s);
3035 __m64 vm = expand_alpha_rev (M64(m));
3036 __m64 vdest = in_over(s, sa, vm, load8888 (*dst));
3038 *dst = store8888 (vdest);
3053 #endif /* USE_MMX */