Replace // comments with /* */ comments in various places
[profile/ivi/pixman.git] / pixman / pixman-sse2.c
1 /*
2  * Copyright © 2008 Rodrigo Kumpera
3  * Copyright © 2008 André Tupinambá
4  *
5  * Permission to use, copy, modify, distribute, and sell this software and its
6  * documentation for any purpose is hereby granted without fee, provided that
7  * the above copyright notice appear in all copies and that both that
8  * copyright notice and this permission notice appear in supporting
9  * documentation, and that the name of Red Hat not be used in advertising or
10  * publicity pertaining to distribution of the software without specific,
11  * written prior permission.  Red Hat makes no representations about the
12  * suitability of this software for any purpose.  It is provided "as is"
13  * without express or implied warranty.
14  *
15  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
16  * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
17  * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
18  * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
20  * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
21  * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
22  * SOFTWARE.
23  *
24  * Author:  Rodrigo Kumpera (kumpera@gmail.com)
25  *          André Tupinambá (andrelrt@gmail.com)
26  *
27  * Based on work by Owen Taylor and Søren Sandmann
28  */
29 #ifdef HAVE_CONFIG_H
30 #include <config.h>
31 #endif
32
33 #include <mmintrin.h>
34 #include <xmmintrin.h> /* for _mm_shuffle_pi16 and _MM_SHUFFLE */
35 #include <emmintrin.h> /* for SSE2 intrinsics */
36 #include "pixman-private.h"
37 #include "pixman-combine32.h"
38
39 #ifdef USE_SSE2
40
41 /* --------------------------------------------------------------------
42  * Locals
43  */
44
45 static __m64 mask_x0080;
46 static __m64 mask_x00ff;
47 static __m64 mask_x0101;
48 static __m64 mask_x_alpha;
49
50 static __m64 mask_x565_rgb;
51 static __m64 mask_x565_unpack;
52
53 static __m128i mask_0080;
54 static __m128i mask_00ff;
55 static __m128i mask_0101;
56 static __m128i mask_ffff;
57 static __m128i mask_ff000000;
58 static __m128i mask_alpha;
59
60 static __m128i mask_565_r;
61 static __m128i mask_565_g1, mask_565_g2;
62 static __m128i mask_565_b;
63 static __m128i mask_red;
64 static __m128i mask_green;
65 static __m128i mask_blue;
66
67 static __m128i mask_565_fix_rb;
68 static __m128i mask_565_fix_g;
69
70 /* ----------------------------------------------------------------------
71  * SSE2 Inlines
72  */
73 static force_inline __m128i
74 unpack_32_1x128 (uint32_t data)
75 {
76     return _mm_unpacklo_epi8 (_mm_cvtsi32_si128 (data), _mm_setzero_si128 ());
77 }
78
79 static force_inline void
80 unpack_128_2x128 (__m128i data, __m128i* data_lo, __m128i* data_hi)
81 {
82     *data_lo = _mm_unpacklo_epi8 (data, _mm_setzero_si128 ());
83     *data_hi = _mm_unpackhi_epi8 (data, _mm_setzero_si128 ());
84 }
85
86 static force_inline __m128i
87 unpack_565_to_8888 (__m128i lo)
88 {
89     __m128i r, g, b, rb, t;
90
91     r = _mm_and_si128 (_mm_slli_epi32 (lo, 8), mask_red);
92     g = _mm_and_si128 (_mm_slli_epi32 (lo, 5), mask_green);
93     b = _mm_and_si128 (_mm_slli_epi32 (lo, 3), mask_blue);
94
95     rb = _mm_or_si128 (r, b);
96     t  = _mm_and_si128 (rb, mask_565_fix_rb);
97     t  = _mm_srli_epi32 (t, 5);
98     rb = _mm_or_si128 (rb, t);
99
100     t  = _mm_and_si128 (g, mask_565_fix_g);
101     t  = _mm_srli_epi32 (t, 6);
102     g  = _mm_or_si128 (g, t);
103
104     return _mm_or_si128 (rb, g);
105 }
106
107 static force_inline void
108 unpack_565_128_4x128 (__m128i  data,
109                       __m128i* data0,
110                       __m128i* data1,
111                       __m128i* data2,
112                       __m128i* data3)
113 {
114     __m128i lo, hi;
115
116     lo = _mm_unpacklo_epi16 (data, _mm_setzero_si128 ());
117     hi = _mm_unpackhi_epi16 (data, _mm_setzero_si128 ());
118
119     lo = unpack_565_to_8888 (lo);
120     hi = unpack_565_to_8888 (hi);
121
122     unpack_128_2x128 (lo, data0, data1);
123     unpack_128_2x128 (hi, data2, data3);
124 }
125
126 static force_inline uint16_t
127 pack_565_32_16 (uint32_t pixel)
128 {
129     return (uint16_t) (((pixel >> 8) & 0xf800) |
130                        ((pixel >> 5) & 0x07e0) |
131                        ((pixel >> 3) & 0x001f));
132 }
133
134 static force_inline __m128i
135 pack_2x128_128 (__m128i lo, __m128i hi)
136 {
137     return _mm_packus_epi16 (lo, hi);
138 }
139
140 static force_inline __m128i
141 pack_565_2x128_128 (__m128i lo, __m128i hi)
142 {
143     __m128i data;
144     __m128i r, g1, g2, b;
145
146     data = pack_2x128_128 (lo, hi);
147
148     r  = _mm_and_si128 (data, mask_565_r);
149     g1 = _mm_and_si128 (_mm_slli_epi32 (data, 3), mask_565_g1);
150     g2 = _mm_and_si128 (_mm_srli_epi32 (data, 5), mask_565_g2);
151     b  = _mm_and_si128 (_mm_srli_epi32 (data, 3), mask_565_b);
152
153     return _mm_or_si128 (_mm_or_si128 (_mm_or_si128 (r, g1), g2), b);
154 }
155
156 static force_inline __m128i
157 pack_565_4x128_128 (__m128i* xmm0, __m128i* xmm1, __m128i* xmm2, __m128i* xmm3)
158 {
159     return _mm_packus_epi16 (pack_565_2x128_128 (*xmm0, *xmm1),
160                              pack_565_2x128_128 (*xmm2, *xmm3));
161 }
162
163 static force_inline int
164 is_opaque (__m128i x)
165 {
166     __m128i ffs = _mm_cmpeq_epi8 (x, x);
167
168     return (_mm_movemask_epi8 (_mm_cmpeq_epi8 (x, ffs)) & 0x8888) == 0x8888;
169 }
170
171 static force_inline int
172 is_zero (__m128i x)
173 {
174     return _mm_movemask_epi8 (
175         _mm_cmpeq_epi8 (x, _mm_setzero_si128 ())) == 0xffff;
176 }
177
178 static force_inline int
179 is_transparent (__m128i x)
180 {
181     return (_mm_movemask_epi8 (
182                 _mm_cmpeq_epi8 (x, _mm_setzero_si128 ())) & 0x8888) == 0x8888;
183 }
184
185 static force_inline __m128i
186 expand_pixel_32_1x128 (uint32_t data)
187 {
188     return _mm_shuffle_epi32 (unpack_32_1x128 (data), _MM_SHUFFLE (1, 0, 1, 0));
189 }
190
191 static force_inline __m128i
192 expand_alpha_1x128 (__m128i data)
193 {
194     return _mm_shufflehi_epi16 (_mm_shufflelo_epi16 (data,
195                                                      _MM_SHUFFLE (3, 3, 3, 3)),
196                                 _MM_SHUFFLE (3, 3, 3, 3));
197 }
198
199 static force_inline void
200 expand_alpha_2x128 (__m128i  data_lo,
201                     __m128i  data_hi,
202                     __m128i* alpha_lo,
203                     __m128i* alpha_hi)
204 {
205     __m128i lo, hi;
206
207     lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (3, 3, 3, 3));
208     hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (3, 3, 3, 3));
209
210     *alpha_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (3, 3, 3, 3));
211     *alpha_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (3, 3, 3, 3));
212 }
213
214 static force_inline void
215 expand_alpha_rev_2x128 (__m128i  data_lo,
216                         __m128i  data_hi,
217                         __m128i* alpha_lo,
218                         __m128i* alpha_hi)
219 {
220     __m128i lo, hi;
221
222     lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (0, 0, 0, 0));
223     hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (0, 0, 0, 0));
224     *alpha_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (0, 0, 0, 0));
225     *alpha_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (0, 0, 0, 0));
226 }
227
228 static force_inline void
229 pix_multiply_2x128 (__m128i* data_lo,
230                     __m128i* data_hi,
231                     __m128i* alpha_lo,
232                     __m128i* alpha_hi,
233                     __m128i* ret_lo,
234                     __m128i* ret_hi)
235 {
236     __m128i lo, hi;
237
238     lo = _mm_mullo_epi16 (*data_lo, *alpha_lo);
239     hi = _mm_mullo_epi16 (*data_hi, *alpha_hi);
240     lo = _mm_adds_epu16 (lo, mask_0080);
241     hi = _mm_adds_epu16 (hi, mask_0080);
242     *ret_lo = _mm_mulhi_epu16 (lo, mask_0101);
243     *ret_hi = _mm_mulhi_epu16 (hi, mask_0101);
244 }
245
246 static force_inline void
247 pix_add_multiply_2x128 (__m128i* src_lo,
248                         __m128i* src_hi,
249                         __m128i* alpha_dst_lo,
250                         __m128i* alpha_dst_hi,
251                         __m128i* dst_lo,
252                         __m128i* dst_hi,
253                         __m128i* alpha_src_lo,
254                         __m128i* alpha_src_hi,
255                         __m128i* ret_lo,
256                         __m128i* ret_hi)
257 {
258     __m128i lo, hi;
259     __m128i mul_lo, mul_hi;
260
261     lo = _mm_mullo_epi16 (*src_lo, *alpha_dst_lo);
262     hi = _mm_mullo_epi16 (*src_hi, *alpha_dst_hi);
263     mul_lo = _mm_mullo_epi16 (*dst_lo, *alpha_src_lo);
264     mul_hi = _mm_mullo_epi16 (*dst_hi, *alpha_src_hi);
265     lo = _mm_adds_epu16 (lo, mask_0080);
266     hi = _mm_adds_epu16 (hi, mask_0080);
267     lo = _mm_adds_epu16 (lo, mul_lo);
268     hi = _mm_adds_epu16 (hi, mul_hi);
269     *ret_lo = _mm_mulhi_epu16 (lo, mask_0101);
270     *ret_hi = _mm_mulhi_epu16 (hi, mask_0101);
271 }
272
273 static force_inline void
274 negate_2x128 (__m128i  data_lo,
275               __m128i  data_hi,
276               __m128i* neg_lo,
277               __m128i* neg_hi)
278 {
279     *neg_lo = _mm_xor_si128 (data_lo, mask_00ff);
280     *neg_hi = _mm_xor_si128 (data_hi, mask_00ff);
281 }
282
283 static force_inline void
284 invert_colors_2x128 (__m128i  data_lo,
285                      __m128i  data_hi,
286                      __m128i* inv_lo,
287                      __m128i* inv_hi)
288 {
289     __m128i lo, hi;
290
291     lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (3, 0, 1, 2));
292     hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (3, 0, 1, 2));
293     *inv_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (3, 0, 1, 2));
294     *inv_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (3, 0, 1, 2));
295 }
296
297 static force_inline void
298 over_2x128 (__m128i* src_lo,
299             __m128i* src_hi,
300             __m128i* alpha_lo,
301             __m128i* alpha_hi,
302             __m128i* dst_lo,
303             __m128i* dst_hi)
304 {
305     __m128i t1, t2;
306
307     negate_2x128 (*alpha_lo, *alpha_hi, &t1, &t2);
308
309     pix_multiply_2x128 (dst_lo, dst_hi, &t1, &t2, dst_lo, dst_hi);
310
311     *dst_lo = _mm_adds_epu8 (*src_lo, *dst_lo);
312     *dst_hi = _mm_adds_epu8 (*src_hi, *dst_hi);
313 }
314
315 static force_inline void
316 over_rev_non_pre_2x128 (__m128i  src_lo,
317                         __m128i  src_hi,
318                         __m128i* dst_lo,
319                         __m128i* dst_hi)
320 {
321     __m128i lo, hi;
322     __m128i alpha_lo, alpha_hi;
323
324     expand_alpha_2x128 (src_lo, src_hi, &alpha_lo, &alpha_hi);
325
326     lo = _mm_or_si128 (alpha_lo, mask_alpha);
327     hi = _mm_or_si128 (alpha_hi, mask_alpha);
328
329     invert_colors_2x128 (src_lo, src_hi, &src_lo, &src_hi);
330
331     pix_multiply_2x128 (&src_lo, &src_hi, &lo, &hi, &lo, &hi);
332
333     over_2x128 (&lo, &hi, &alpha_lo, &alpha_hi, dst_lo, dst_hi);
334 }
335
336 static force_inline void
337 in_over_2x128 (__m128i* src_lo,
338                __m128i* src_hi,
339                __m128i* alpha_lo,
340                __m128i* alpha_hi,
341                __m128i* mask_lo,
342                __m128i* mask_hi,
343                __m128i* dst_lo,
344                __m128i* dst_hi)
345 {
346     __m128i s_lo, s_hi;
347     __m128i a_lo, a_hi;
348
349     pix_multiply_2x128 (src_lo,   src_hi, mask_lo, mask_hi, &s_lo, &s_hi);
350     pix_multiply_2x128 (alpha_lo, alpha_hi, mask_lo, mask_hi, &a_lo, &a_hi);
351
352     over_2x128 (&s_lo, &s_hi, &a_lo, &a_hi, dst_lo, dst_hi);
353 }
354
355 static force_inline void
356 cache_prefetch (__m128i* addr)
357 {
358     _mm_prefetch (addr, _MM_HINT_T0);
359 }
360
361 static force_inline void
362 cache_prefetch_next (__m128i* addr)
363 {
364     _mm_prefetch (addr + 4, _MM_HINT_T0); /* 64 bytes ahead */
365 }
366
367 /* load 4 pixels from a 16-byte boundary aligned address */
368 static force_inline __m128i
369 load_128_aligned (__m128i* src)
370 {
371     return _mm_load_si128 (src);
372 }
373
374 /* load 4 pixels from a unaligned address */
375 static force_inline __m128i
376 load_128_unaligned (const __m128i* src)
377 {
378     return _mm_loadu_si128 (src);
379 }
380
381 /* save 4 pixels using Write Combining memory on a 16-byte
382  * boundary aligned address
383  */
384 static force_inline void
385 save_128_write_combining (__m128i* dst,
386                           __m128i  data)
387 {
388     _mm_stream_si128 (dst, data);
389 }
390
391 /* save 4 pixels on a 16-byte boundary aligned address */
392 static force_inline void
393 save_128_aligned (__m128i* dst,
394                   __m128i  data)
395 {
396     _mm_store_si128 (dst, data);
397 }
398
399 /* save 4 pixels on a unaligned address */
400 static force_inline void
401 save_128_unaligned (__m128i* dst,
402                     __m128i  data)
403 {
404     _mm_storeu_si128 (dst, data);
405 }
406
407 /* ------------------------------------------------------------------
408  * MMX inlines
409  */
410
411 static force_inline __m64
412 unpack_32_1x64 (uint32_t data)
413 {
414     return _mm_unpacklo_pi8 (_mm_cvtsi32_si64 (data), _mm_setzero_si64 ());
415 }
416
417 static force_inline __m64
418 expand_alpha_1x64 (__m64 data)
419 {
420     return _mm_shuffle_pi16 (data, _MM_SHUFFLE (3, 3, 3, 3));
421 }
422
423 static force_inline __m64
424 expand_alpha_rev_1x64 (__m64 data)
425 {
426     return _mm_shuffle_pi16 (data, _MM_SHUFFLE (0, 0, 0, 0));
427 }
428
429 static force_inline __m64
430 expand_pixel_8_1x64 (uint8_t data)
431 {
432     return _mm_shuffle_pi16 (
433         unpack_32_1x64 ((uint32_t)data), _MM_SHUFFLE (0, 0, 0, 0));
434 }
435
436 static force_inline __m64
437 pix_multiply_1x64 (__m64 data,
438                    __m64 alpha)
439 {
440     return _mm_mulhi_pu16 (_mm_adds_pu16 (_mm_mullo_pi16 (data, alpha),
441                                           mask_x0080),
442                            mask_x0101);
443 }
444
445 static force_inline __m64
446 pix_add_multiply_1x64 (__m64* src,
447                        __m64* alpha_dst,
448                        __m64* dst,
449                        __m64* alpha_src)
450 {
451     return _mm_mulhi_pu16 (
452         _mm_adds_pu16 (_mm_adds_pu16 (_mm_mullo_pi16 (*src, *alpha_dst),
453                                       mask_x0080),
454                        _mm_mullo_pi16 (*dst, *alpha_src)),
455         mask_x0101);
456 }
457
458 static force_inline __m64
459 negate_1x64 (__m64 data)
460 {
461     return _mm_xor_si64 (data, mask_x00ff);
462 }
463
464 static force_inline __m64
465 invert_colors_1x64 (__m64 data)
466 {
467     return _mm_shuffle_pi16 (data, _MM_SHUFFLE (3, 0, 1, 2));
468 }
469
470 static force_inline __m64
471 over_1x64 (__m64 src, __m64 alpha, __m64 dst)
472 {
473     return _mm_adds_pu8 (src, pix_multiply_1x64 (dst, negate_1x64 (alpha)));
474 }
475
476 static force_inline __m64
477 in_over_1x64 (__m64* src, __m64* alpha, __m64* mask, __m64* dst)
478 {
479     return over_1x64 (pix_multiply_1x64 (*src, *mask),
480                       pix_multiply_1x64 (*alpha, *mask),
481                       *dst);
482 }
483
484 static force_inline __m64
485 over_rev_non_pre_1x64 (__m64 src, __m64 dst)
486 {
487     __m64 alpha = expand_alpha_1x64 (src);
488
489     return over_1x64 (pix_multiply_1x64 (invert_colors_1x64 (src),
490                                          _mm_or_si64 (alpha, mask_x_alpha)),
491                       alpha,
492                       dst);
493 }
494
495 static force_inline uint32_t
496 pack_1x64_32 (__m64 data)
497 {
498     return _mm_cvtsi64_si32 (_mm_packs_pu16 (data, _mm_setzero_si64 ()));
499 }
500
501 /* Expand 16 bits positioned at @pos (0-3) of a mmx register into
502  *
503  *    00RR00GG00BB
504  *
505  * --- Expanding 565 in the low word ---
506  *
507  * m = (m << (32 - 3)) | (m << (16 - 5)) | m;
508  * m = m & (01f0003f001f);
509  * m = m * (008404100840);
510  * m = m >> 8;
511  *
512  * Note the trick here - the top word is shifted by another nibble to
513  * avoid it bumping into the middle word
514  */
515 static force_inline __m64
516 expand565_16_1x64 (uint16_t pixel)
517 {
518     __m64 p;
519     __m64 t1, t2;
520
521     p = _mm_cvtsi32_si64 ((uint32_t) pixel);
522
523     t1 = _mm_slli_si64 (p, 36 - 11);
524     t2 = _mm_slli_si64 (p, 16 - 5);
525
526     p = _mm_or_si64 (t1, p);
527     p = _mm_or_si64 (t2, p);
528     p = _mm_and_si64 (p, mask_x565_rgb);
529     p = _mm_mullo_pi16 (p, mask_x565_unpack);
530
531     return _mm_srli_pi16 (p, 8);
532 }
533
534 /* ----------------------------------------------------------------------------
535  * Compose Core transformations
536  */
537 static force_inline uint32_t
538 core_combine_over_u_pixel_sse2 (uint32_t src, uint32_t dst)
539 {
540     uint8_t a;
541     __m64 ms;
542
543     a = src >> 24;
544
545     if (a == 0xff)
546     {
547         return src;
548     }
549     else if (src)
550     {
551         ms = unpack_32_1x64 (src);
552         return pack_1x64_32 (
553             over_1x64 (ms, expand_alpha_1x64 (ms), unpack_32_1x64 (dst)));
554     }
555
556     return dst;
557 }
558
559 static force_inline uint32_t
560 combine1 (const uint32_t *ps, const uint32_t *pm)
561 {
562     uint32_t s = *ps;
563
564     if (pm)
565     {
566         __m64 ms, mm;
567
568         mm = unpack_32_1x64 (*pm);
569         mm = expand_alpha_1x64 (mm);
570
571         ms = unpack_32_1x64 (s);
572         ms = pix_multiply_1x64 (ms, mm);
573
574         s = pack_1x64_32 (ms);
575     }
576
577     return s;
578 }
579
580 static force_inline __m128i
581 combine4 (const __m128i *ps, const __m128i *pm)
582 {
583     __m128i xmm_src_lo, xmm_src_hi;
584     __m128i xmm_msk_lo, xmm_msk_hi;
585     __m128i s;
586
587     if (pm)
588     {
589         xmm_msk_lo = load_128_unaligned (pm);
590
591         if (is_transparent (xmm_msk_lo))
592             return _mm_setzero_si128 ();
593     }
594
595     s = load_128_unaligned (ps);
596
597     if (pm)
598     {
599         unpack_128_2x128 (s, &xmm_src_lo, &xmm_src_hi);
600         unpack_128_2x128 (xmm_msk_lo, &xmm_msk_lo, &xmm_msk_hi);
601
602         expand_alpha_2x128 (xmm_msk_lo, xmm_msk_hi, &xmm_msk_lo, &xmm_msk_hi);
603
604         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
605                             &xmm_msk_lo, &xmm_msk_hi,
606                             &xmm_src_lo, &xmm_src_hi);
607
608         s = pack_2x128_128 (xmm_src_lo, xmm_src_hi);
609     }
610
611     return s;
612 }
613
614 static force_inline void
615 core_combine_over_u_sse2 (uint32_t*       pd,
616                           const uint32_t* ps,
617                           const uint32_t* pm,
618                           int             w)
619 {
620     uint32_t s, d;
621
622     __m128i xmm_dst_lo, xmm_dst_hi;
623     __m128i xmm_src_lo, xmm_src_hi;
624     __m128i xmm_alpha_lo, xmm_alpha_hi;
625
626     /* call prefetch hint to optimize cache load*/
627     cache_prefetch ((__m128i*)ps);
628     cache_prefetch ((__m128i*)pd);
629     cache_prefetch ((__m128i*)pm);
630
631     /* Align dst on a 16-byte boundary */
632     while (w && ((unsigned long)pd & 15))
633     {
634         d = *pd;
635         s = combine1 (ps, pm);
636
637         *pd++ = core_combine_over_u_pixel_sse2 (s, d);
638         ps++;
639         if (pm)
640             pm++;
641         w--;
642     }
643
644     /* call prefetch hint to optimize cache load*/
645     cache_prefetch ((__m128i*)ps);
646     cache_prefetch ((__m128i*)pd);
647     cache_prefetch ((__m128i*)pm);
648
649     while (w >= 4)
650     {
651         /* fill cache line with next memory */
652         cache_prefetch_next ((__m128i*)ps);
653         cache_prefetch_next ((__m128i*)pd);
654         cache_prefetch_next ((__m128i*)pm);
655
656         /* I'm loading unaligned because I'm not sure about
657          * the address alignment.
658          */
659         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
660
661         if (is_opaque (xmm_src_hi))
662         {
663             save_128_aligned ((__m128i*)pd, xmm_src_hi);
664         }
665         else if (!is_zero (xmm_src_hi))
666         {
667             xmm_dst_hi = load_128_aligned ((__m128i*) pd);
668
669             unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
670             unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
671
672             expand_alpha_2x128 (
673                 xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
674
675             over_2x128 (&xmm_src_lo, &xmm_src_hi,
676                         &xmm_alpha_lo, &xmm_alpha_hi,
677                         &xmm_dst_lo, &xmm_dst_hi);
678
679             /* rebuid the 4 pixel data and save*/
680             save_128_aligned ((__m128i*)pd,
681                               pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
682         }
683
684         w -= 4;
685         ps += 4;
686         pd += 4;
687         if (pm)
688             pm += 4;
689     }
690
691     while (w)
692     {
693         d = *pd;
694         s = combine1 (ps, pm);
695
696         *pd++ = core_combine_over_u_pixel_sse2 (s, d);
697         ps++;
698         if (pm)
699             pm++;
700
701         w--;
702     }
703 }
704
705 static force_inline void
706 core_combine_over_reverse_u_sse2 (uint32_t*       pd,
707                                   const uint32_t* ps,
708                                   const uint32_t* pm,
709                                   int             w)
710 {
711     uint32_t s, d;
712
713     __m128i xmm_dst_lo, xmm_dst_hi;
714     __m128i xmm_src_lo, xmm_src_hi;
715     __m128i xmm_alpha_lo, xmm_alpha_hi;
716
717     /* call prefetch hint to optimize cache load*/
718     cache_prefetch ((__m128i*)ps);
719     cache_prefetch ((__m128i*)pd);
720     cache_prefetch ((__m128i*)pm);
721
722     /* Align dst on a 16-byte boundary */
723     while (w &&
724            ((unsigned long)pd & 15))
725     {
726         d = *pd;
727         s = combine1 (ps, pm);
728
729         *pd++ = core_combine_over_u_pixel_sse2 (d, s);
730         w--;
731         ps++;
732         if (pm)
733             pm++;
734     }
735
736     /* call prefetch hint to optimize cache load*/
737     cache_prefetch ((__m128i*)ps);
738     cache_prefetch ((__m128i*)pd);
739     cache_prefetch ((__m128i*)pm);
740
741     while (w >= 4)
742     {
743         /* fill cache line with next memory */
744         cache_prefetch_next ((__m128i*)ps);
745         cache_prefetch_next ((__m128i*)pd);
746         cache_prefetch_next ((__m128i*)pm);
747
748         /* I'm loading unaligned because I'm not sure
749          * about the address alignment.
750          */
751         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
752         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
753
754         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
755         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
756
757         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
758                             &xmm_alpha_lo, &xmm_alpha_hi);
759
760         over_2x128 (&xmm_dst_lo, &xmm_dst_hi,
761                     &xmm_alpha_lo, &xmm_alpha_hi,
762                     &xmm_src_lo, &xmm_src_hi);
763
764         /* rebuid the 4 pixel data and save*/
765         save_128_aligned ((__m128i*)pd,
766                           pack_2x128_128 (xmm_src_lo, xmm_src_hi));
767
768         w -= 4;
769         ps += 4;
770         pd += 4;
771
772         if (pm)
773             pm += 4;
774     }
775
776     while (w)
777     {
778         d = *pd;
779         s = combine1 (ps, pm);
780
781         *pd++ = core_combine_over_u_pixel_sse2 (d, s);
782         ps++;
783         w--;
784         if (pm)
785             pm++;
786     }
787 }
788
789 static force_inline uint32_t
790 core_combine_in_u_pixelsse2 (uint32_t src, uint32_t dst)
791 {
792     uint32_t maska = src >> 24;
793
794     if (maska == 0)
795     {
796         return 0;
797     }
798     else if (maska != 0xff)
799     {
800         return pack_1x64_32 (
801             pix_multiply_1x64 (unpack_32_1x64 (dst),
802                                expand_alpha_1x64 (unpack_32_1x64 (src))));
803     }
804
805     return dst;
806 }
807
808 static force_inline void
809 core_combine_in_u_sse2 (uint32_t*       pd,
810                         const uint32_t* ps,
811                         const uint32_t* pm,
812                         int             w)
813 {
814     uint32_t s, d;
815
816     __m128i xmm_src_lo, xmm_src_hi;
817     __m128i xmm_dst_lo, xmm_dst_hi;
818
819     /* call prefetch hint to optimize cache load*/
820     cache_prefetch ((__m128i*)ps);
821     cache_prefetch ((__m128i*)pd);
822     cache_prefetch ((__m128i*)pm);
823
824     while (w && ((unsigned long) pd & 15))
825     {
826         s = combine1 (ps, pm);
827         d = *pd;
828
829         *pd++ = core_combine_in_u_pixelsse2 (d, s);
830         w--;
831         ps++;
832         if (pm)
833             pm++;
834     }
835
836     /* call prefetch hint to optimize cache load*/
837     cache_prefetch ((__m128i*)ps);
838     cache_prefetch ((__m128i*)pd);
839     cache_prefetch ((__m128i*)pm);
840
841     while (w >= 4)
842     {
843         /* fill cache line with next memory */
844         cache_prefetch_next ((__m128i*)ps);
845         cache_prefetch_next ((__m128i*)pd);
846         cache_prefetch_next ((__m128i*)pm);
847
848         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
849         xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*) pm);
850
851         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
852         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
853
854         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
855         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
856                             &xmm_dst_lo, &xmm_dst_hi,
857                             &xmm_dst_lo, &xmm_dst_hi);
858
859         save_128_aligned ((__m128i*)pd,
860                           pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
861
862         ps += 4;
863         pd += 4;
864         w -= 4;
865         if (pm)
866             pm += 4;
867     }
868
869     while (w)
870     {
871         s = combine1 (ps, pm);
872         d = *pd;
873
874         *pd++ = core_combine_in_u_pixelsse2 (d, s);
875         w--;
876         ps++;
877         if (pm)
878             pm++;
879     }
880 }
881
882 static force_inline void
883 core_combine_reverse_in_u_sse2 (uint32_t*       pd,
884                                 const uint32_t* ps,
885                                 const uint32_t *pm,
886                                 int             w)
887 {
888     uint32_t s, d;
889
890     __m128i xmm_src_lo, xmm_src_hi;
891     __m128i xmm_dst_lo, xmm_dst_hi;
892
893     /* call prefetch hint to optimize cache load*/
894     cache_prefetch ((__m128i*)ps);
895     cache_prefetch ((__m128i*)pd);
896     cache_prefetch ((__m128i*)pm);
897
898     while (w && ((unsigned long) pd & 15))
899     {
900         s = combine1 (ps, pm);
901         d = *pd;
902
903         *pd++ = core_combine_in_u_pixelsse2 (s, d);
904         ps++;
905         w--;
906         if (pm)
907             pm++;
908     }
909
910     /* call prefetch hint to optimize cache load*/
911     cache_prefetch ((__m128i*)ps);
912     cache_prefetch ((__m128i*)pd);
913     cache_prefetch ((__m128i*)pm);
914
915     while (w >= 4)
916     {
917         /* fill cache line with next memory */
918         cache_prefetch_next ((__m128i*)ps);
919         cache_prefetch_next ((__m128i*)pd);
920         cache_prefetch_next ((__m128i*)pm);
921
922         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
923         xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm);
924
925         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
926         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
927
928         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
929         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
930                             &xmm_src_lo, &xmm_src_hi,
931                             &xmm_dst_lo, &xmm_dst_hi);
932
933         save_128_aligned (
934             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
935
936         ps += 4;
937         pd += 4;
938         w -= 4;
939         if (pm)
940             pm += 4;
941     }
942
943     while (w)
944     {
945         s = combine1 (ps, pm);
946         d = *pd;
947
948         *pd++ = core_combine_in_u_pixelsse2 (s, d);
949         w--;
950         ps++;
951         if (pm)
952             pm++;
953     }
954 }
955
956 static force_inline void
957 core_combine_reverse_out_u_sse2 (uint32_t*       pd,
958                                  const uint32_t* ps,
959                                  const uint32_t* pm,
960                                  int             w)
961 {
962     /* call prefetch hint to optimize cache load*/
963     cache_prefetch ((__m128i*)ps);
964     cache_prefetch ((__m128i*)pd);
965     cache_prefetch ((__m128i*)pm);
966
967     while (w && ((unsigned long) pd & 15))
968     {
969         uint32_t s = combine1 (ps, pm);
970         uint32_t d = *pd;
971
972         *pd++ = pack_1x64_32 (
973             pix_multiply_1x64 (
974                 unpack_32_1x64 (d), negate_1x64 (
975                     expand_alpha_1x64 (unpack_32_1x64 (s)))));
976         
977         if (pm)
978             pm++;
979         ps++;
980         w--;
981     }
982
983     /* call prefetch hint to optimize cache load*/
984     cache_prefetch ((__m128i*)ps);
985     cache_prefetch ((__m128i*)pd);
986     cache_prefetch ((__m128i*)pm);
987
988     while (w >= 4)
989     {
990         __m128i xmm_src_lo, xmm_src_hi;
991         __m128i xmm_dst_lo, xmm_dst_hi;
992
993         /* fill cache line with next memory */
994         cache_prefetch_next ((__m128i*)ps);
995         cache_prefetch_next ((__m128i*)pd);
996         cache_prefetch_next ((__m128i*)pm);
997
998         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
999         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
1000
1001         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1002         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1003
1004         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1005         negate_2x128       (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1006
1007         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
1008                             &xmm_src_lo, &xmm_src_hi,
1009                             &xmm_dst_lo, &xmm_dst_hi);
1010
1011         save_128_aligned (
1012             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1013
1014         ps += 4;
1015         pd += 4;
1016         if (pm)
1017             pm += 4;
1018
1019         w -= 4;
1020     }
1021
1022     while (w)
1023     {
1024         uint32_t s = combine1 (ps, pm);
1025         uint32_t d = *pd;
1026
1027         *pd++ = pack_1x64_32 (
1028             pix_multiply_1x64 (
1029                 unpack_32_1x64 (d), negate_1x64 (
1030                     expand_alpha_1x64 (unpack_32_1x64 (s)))));
1031         ps++;
1032         if (pm)
1033             pm++;
1034         w--;
1035     }
1036 }
1037
1038 static force_inline void
1039 core_combine_out_u_sse2 (uint32_t*       pd,
1040                          const uint32_t* ps,
1041                          const uint32_t* pm,
1042                          int             w)
1043 {
1044     /* call prefetch hint to optimize cache load*/
1045     cache_prefetch ((__m128i*)ps);
1046     cache_prefetch ((__m128i*)pd);
1047     cache_prefetch ((__m128i*)pm);
1048
1049     while (w && ((unsigned long) pd & 15))
1050     {
1051         uint32_t s = combine1 (ps, pm);
1052         uint32_t d = *pd;
1053
1054         *pd++ = pack_1x64_32 (
1055             pix_multiply_1x64 (
1056                 unpack_32_1x64 (s), negate_1x64 (
1057                     expand_alpha_1x64 (unpack_32_1x64 (d)))));
1058         w--;
1059         ps++;
1060         if (pm)
1061             pm++;
1062     }
1063
1064     /* call prefetch hint to optimize cache load*/
1065     cache_prefetch ((__m128i*)ps);
1066     cache_prefetch ((__m128i*)pd);
1067     cache_prefetch ((__m128i*)pm);
1068
1069     while (w >= 4)
1070     {
1071         __m128i xmm_src_lo, xmm_src_hi;
1072         __m128i xmm_dst_lo, xmm_dst_hi;
1073
1074         /* fill cache line with next memory */
1075         cache_prefetch_next ((__m128i*)ps);
1076         cache_prefetch_next ((__m128i*)pd);
1077         cache_prefetch_next ((__m128i*)pm);
1078
1079         xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm);
1080         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
1081
1082         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1083         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1084
1085         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1086         negate_2x128       (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1087
1088         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
1089                             &xmm_dst_lo, &xmm_dst_hi,
1090                             &xmm_dst_lo, &xmm_dst_hi);
1091
1092         save_128_aligned (
1093             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1094
1095         ps += 4;
1096         pd += 4;
1097         w -= 4;
1098         if (pm)
1099             pm += 4;
1100     }
1101
1102     while (w)
1103     {
1104         uint32_t s = combine1 (ps, pm);
1105         uint32_t d = *pd;
1106
1107         *pd++ = pack_1x64_32 (
1108             pix_multiply_1x64 (
1109                 unpack_32_1x64 (s), negate_1x64 (
1110                     expand_alpha_1x64 (unpack_32_1x64 (d)))));
1111         w--;
1112         ps++;
1113         if (pm)
1114             pm++;
1115     }
1116 }
1117
1118 static force_inline uint32_t
1119 core_combine_atop_u_pixel_sse2 (uint32_t src,
1120                                 uint32_t dst)
1121 {
1122     __m64 s = unpack_32_1x64 (src);
1123     __m64 d = unpack_32_1x64 (dst);
1124
1125     __m64 sa = negate_1x64 (expand_alpha_1x64 (s));
1126     __m64 da = expand_alpha_1x64 (d);
1127
1128     return pack_1x64_32 (pix_add_multiply_1x64 (&s, &da, &d, &sa));
1129 }
1130
1131 static force_inline void
1132 core_combine_atop_u_sse2 (uint32_t*       pd,
1133                           const uint32_t* ps,
1134                           const uint32_t* pm,
1135                           int             w)
1136 {
1137     uint32_t s, d;
1138
1139     __m128i xmm_src_lo, xmm_src_hi;
1140     __m128i xmm_dst_lo, xmm_dst_hi;
1141     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
1142     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
1143
1144     /* call prefetch hint to optimize cache load*/
1145     cache_prefetch ((__m128i*)ps);
1146     cache_prefetch ((__m128i*)pd);
1147     cache_prefetch ((__m128i*)pm);
1148
1149     while (w && ((unsigned long) pd & 15))
1150     {
1151         s = combine1 (ps, pm);
1152         d = *pd;
1153
1154         *pd++ = core_combine_atop_u_pixel_sse2 (s, d);
1155         w--;
1156         ps++;
1157         if (pm)
1158             pm++;
1159     }
1160
1161     /* call prefetch hint to optimize cache load*/
1162     cache_prefetch ((__m128i*)ps);
1163     cache_prefetch ((__m128i*)pd);
1164     cache_prefetch ((__m128i*)pm);
1165
1166     while (w >= 4)
1167     {
1168         /* fill cache line with next memory */
1169         cache_prefetch_next ((__m128i*)ps);
1170         cache_prefetch_next ((__m128i*)pd);
1171         cache_prefetch_next ((__m128i*)pm);
1172
1173         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
1174         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
1175
1176         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1177         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1178
1179         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
1180                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1181         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
1182                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1183
1184         negate_2x128 (xmm_alpha_src_lo, xmm_alpha_src_hi,
1185                       &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1186
1187         pix_add_multiply_2x128 (
1188             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
1189             &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
1190             &xmm_dst_lo, &xmm_dst_hi);
1191
1192         save_128_aligned (
1193             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1194
1195         ps += 4;
1196         pd += 4;
1197         w -= 4;
1198         if (pm)
1199             pm += 4;
1200     }
1201
1202     while (w)
1203     {
1204         s = combine1 (ps, pm);
1205         d = *pd;
1206
1207         *pd++ = core_combine_atop_u_pixel_sse2 (s, d);
1208         w--;
1209         ps++;
1210         if (pm)
1211             pm++;
1212     }
1213 }
1214
1215 static force_inline uint32_t
1216 core_combine_reverse_atop_u_pixel_sse2 (uint32_t src,
1217                                         uint32_t dst)
1218 {
1219     __m64 s = unpack_32_1x64 (src);
1220     __m64 d = unpack_32_1x64 (dst);
1221
1222     __m64 sa = expand_alpha_1x64 (s);
1223     __m64 da = negate_1x64 (expand_alpha_1x64 (d));
1224
1225     return pack_1x64_32 (pix_add_multiply_1x64 (&s, &da, &d, &sa));
1226 }
1227
1228 static force_inline void
1229 core_combine_reverse_atop_u_sse2 (uint32_t*       pd,
1230                                   const uint32_t* ps,
1231                                   const uint32_t* pm,
1232                                   int             w)
1233 {
1234     uint32_t s, d;
1235
1236     __m128i xmm_src_lo, xmm_src_hi;
1237     __m128i xmm_dst_lo, xmm_dst_hi;
1238     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
1239     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
1240
1241     /* call prefetch hint to optimize cache load*/
1242     cache_prefetch ((__m128i*)ps);
1243     cache_prefetch ((__m128i*)pd);
1244     cache_prefetch ((__m128i*)pm);
1245
1246     while (w && ((unsigned long) pd & 15))
1247     {
1248         s = combine1 (ps, pm);
1249         d = *pd;
1250
1251         *pd++ = core_combine_reverse_atop_u_pixel_sse2 (s, d);
1252         ps++;
1253         w--;
1254         if (pm)
1255             pm++;
1256     }
1257
1258     /* call prefetch hint to optimize cache load*/
1259     cache_prefetch ((__m128i*)ps);
1260     cache_prefetch ((__m128i*)pd);
1261     cache_prefetch ((__m128i*)pm);
1262
1263     while (w >= 4)
1264     {
1265         /* fill cache line with next memory */
1266         cache_prefetch_next ((__m128i*)ps);
1267         cache_prefetch_next ((__m128i*)pd);
1268         cache_prefetch_next ((__m128i*)pm);
1269
1270         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
1271         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
1272
1273         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1274         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1275
1276         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
1277                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1278         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
1279                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1280
1281         negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi,
1282                       &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1283
1284         pix_add_multiply_2x128 (
1285             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
1286             &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
1287             &xmm_dst_lo, &xmm_dst_hi);
1288
1289         save_128_aligned (
1290             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1291
1292         ps += 4;
1293         pd += 4;
1294         w -= 4;
1295         if (pm)
1296             pm += 4;
1297     }
1298
1299     while (w)
1300     {
1301         s = combine1 (ps, pm);
1302         d = *pd;
1303
1304         *pd++ = core_combine_reverse_atop_u_pixel_sse2 (s, d);
1305         ps++;
1306         w--;
1307         if (pm)
1308             pm++;
1309     }
1310 }
1311
1312 static force_inline uint32_t
1313 core_combine_xor_u_pixel_sse2 (uint32_t src,
1314                                uint32_t dst)
1315 {
1316     __m64 s = unpack_32_1x64 (src);
1317     __m64 d = unpack_32_1x64 (dst);
1318
1319     __m64 neg_d = negate_1x64 (expand_alpha_1x64 (d));
1320     __m64 neg_s = negate_1x64 (expand_alpha_1x64 (s));
1321
1322     return pack_1x64_32 (pix_add_multiply_1x64 (&s, &neg_d, &d, &neg_s));
1323 }
1324
1325 static force_inline void
1326 core_combine_xor_u_sse2 (uint32_t*       dst,
1327                          const uint32_t* src,
1328                          const uint32_t *mask,
1329                          int             width)
1330 {
1331     int w = width;
1332     uint32_t s, d;
1333     uint32_t* pd = dst;
1334     const uint32_t* ps = src;
1335     const uint32_t* pm = mask;
1336
1337     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
1338     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
1339     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
1340     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
1341
1342     /* call prefetch hint to optimize cache load*/
1343     cache_prefetch ((__m128i*)ps);
1344     cache_prefetch ((__m128i*)pd);
1345     cache_prefetch ((__m128i*)pm);
1346
1347     while (w && ((unsigned long) pd & 15))
1348     {
1349         s = combine1 (ps, pm);
1350         d = *pd;
1351
1352         *pd++ = core_combine_xor_u_pixel_sse2 (s, d);
1353         w--;
1354         ps++;
1355         if (pm)
1356             pm++;
1357     }
1358
1359     /* call prefetch hint to optimize cache load*/
1360     cache_prefetch ((__m128i*)ps);
1361     cache_prefetch ((__m128i*)pd);
1362     cache_prefetch ((__m128i*)pm);
1363
1364     while (w >= 4)
1365     {
1366         /* fill cache line with next memory */
1367         cache_prefetch_next ((__m128i*)ps);
1368         cache_prefetch_next ((__m128i*)pd);
1369         cache_prefetch_next ((__m128i*)pm);
1370
1371         xmm_src = combine4 ((__m128i*) ps, (__m128i*) pm);
1372         xmm_dst = load_128_aligned ((__m128i*) pd);
1373
1374         unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
1375         unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
1376
1377         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
1378                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1379         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
1380                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1381
1382         negate_2x128 (xmm_alpha_src_lo, xmm_alpha_src_hi,
1383                       &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1384         negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi,
1385                       &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1386
1387         pix_add_multiply_2x128 (
1388             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
1389             &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
1390             &xmm_dst_lo, &xmm_dst_hi);
1391
1392         save_128_aligned (
1393             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1394
1395         ps += 4;
1396         pd += 4;
1397         w -= 4;
1398         if (pm)
1399             pm += 4;
1400     }
1401
1402     while (w)
1403     {
1404         s = combine1 (ps, pm);
1405         d = *pd;
1406
1407         *pd++ = core_combine_xor_u_pixel_sse2 (s, d);
1408         w--;
1409         ps++;
1410         if (pm)
1411             pm++;
1412     }
1413 }
1414
1415 static force_inline void
1416 core_combine_add_u_sse2 (uint32_t*       dst,
1417                          const uint32_t* src,
1418                          const uint32_t* mask,
1419                          int             width)
1420 {
1421     int w = width;
1422     uint32_t s, d;
1423     uint32_t* pd = dst;
1424     const uint32_t* ps = src;
1425     const uint32_t* pm = mask;
1426
1427     /* call prefetch hint to optimize cache load*/
1428     cache_prefetch ((__m128i*)ps);
1429     cache_prefetch ((__m128i*)pd);
1430     cache_prefetch ((__m128i*)pm);
1431
1432     while (w && (unsigned long)pd & 15)
1433     {
1434         s = combine1 (ps, pm);
1435         d = *pd;
1436
1437         ps++;
1438         if (pm)
1439             pm++;
1440         *pd++ = _mm_cvtsi64_si32 (
1441             _mm_adds_pu8 (_mm_cvtsi32_si64 (s), _mm_cvtsi32_si64 (d)));
1442         w--;
1443     }
1444
1445     /* call prefetch hint to optimize cache load*/
1446     cache_prefetch ((__m128i*)ps);
1447     cache_prefetch ((__m128i*)pd);
1448     cache_prefetch ((__m128i*)pm);
1449
1450     while (w >= 4)
1451     {
1452         __m128i s;
1453
1454         /* fill cache line with next memory */
1455         cache_prefetch_next ((__m128i*)ps);
1456         cache_prefetch_next ((__m128i*)pd);
1457         cache_prefetch_next ((__m128i*)pm);
1458
1459         s = combine4 ((__m128i*)ps, (__m128i*)pm);
1460
1461         save_128_aligned (
1462             (__m128i*)pd, _mm_adds_epu8 (s, load_128_aligned  ((__m128i*)pd)));
1463
1464         pd += 4;
1465         ps += 4;
1466         if (pm)
1467             pm += 4;
1468         w -= 4;
1469     }
1470
1471     while (w--)
1472     {
1473         s = combine1 (ps, pm);
1474         d = *pd;
1475
1476         ps++;
1477         *pd++ = _mm_cvtsi64_si32 (
1478             _mm_adds_pu8 (_mm_cvtsi32_si64 (s), _mm_cvtsi32_si64 (d)));
1479         if (pm)
1480             pm++;
1481     }
1482 }
1483
1484 static force_inline uint32_t
1485 core_combine_saturate_u_pixel_sse2 (uint32_t src,
1486                                     uint32_t dst)
1487 {
1488     __m64 ms = unpack_32_1x64 (src);
1489     __m64 md = unpack_32_1x64 (dst);
1490     uint32_t sa = src >> 24;
1491     uint32_t da = ~dst >> 24;
1492
1493     if (sa > da)
1494     {
1495         ms = pix_multiply_1x64 (
1496             ms, expand_alpha_1x64 (unpack_32_1x64 (DIV_UN8 (da, sa) << 24)));
1497     }
1498
1499     return pack_1x64_32 (_mm_adds_pu16 (md, ms));
1500 }
1501
1502 static force_inline void
1503 core_combine_saturate_u_sse2 (uint32_t *      pd,
1504                               const uint32_t *ps,
1505                               const uint32_t *pm,
1506                               int             w)
1507 {
1508     uint32_t s, d;
1509
1510     uint32_t pack_cmp;
1511     __m128i xmm_src, xmm_dst;
1512
1513     /* call prefetch hint to optimize cache load*/
1514     cache_prefetch ((__m128i*)ps);
1515     cache_prefetch ((__m128i*)pd);
1516     cache_prefetch ((__m128i*)pm);
1517
1518     while (w && (unsigned long)pd & 15)
1519     {
1520         s = combine1 (ps, pm);
1521         d = *pd;
1522
1523         *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1524         w--;
1525         ps++;
1526         if (pm)
1527             pm++;
1528     }
1529
1530     /* call prefetch hint to optimize cache load*/
1531     cache_prefetch ((__m128i*)ps);
1532     cache_prefetch ((__m128i*)pd);
1533     cache_prefetch ((__m128i*)pm);
1534
1535     while (w >= 4)
1536     {
1537         /* fill cache line with next memory */
1538         cache_prefetch_next ((__m128i*)ps);
1539         cache_prefetch_next ((__m128i*)pd);
1540         cache_prefetch_next ((__m128i*)pm);
1541
1542         xmm_dst = load_128_aligned  ((__m128i*)pd);
1543         xmm_src = combine4 ((__m128i*)ps, (__m128i*)pm);
1544
1545         pack_cmp = _mm_movemask_epi8 (
1546             _mm_cmpgt_epi32 (
1547                 _mm_srli_epi32 (xmm_src, 24),
1548                 _mm_srli_epi32 (_mm_xor_si128 (xmm_dst, mask_ff000000), 24)));
1549
1550         /* if some alpha src is grater than respective ~alpha dst */
1551         if (pack_cmp)
1552         {
1553             s = combine1 (ps++, pm);
1554             d = *pd;
1555             *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1556             if (pm)
1557                 pm++;
1558
1559             s = combine1 (ps++, pm);
1560             d = *pd;
1561             *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1562             if (pm)
1563                 pm++;
1564
1565             s = combine1 (ps++, pm);
1566             d = *pd;
1567             *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1568             if (pm)
1569                 pm++;
1570
1571             s = combine1 (ps++, pm);
1572             d = *pd;
1573             *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1574             if (pm)
1575                 pm++;
1576         }
1577         else
1578         {
1579             save_128_aligned ((__m128i*)pd, _mm_adds_epu8 (xmm_dst, xmm_src));
1580
1581             pd += 4;
1582             ps += 4;
1583             if (pm)
1584                 pm += 4;
1585         }
1586
1587         w -= 4;
1588     }
1589
1590     while (w--)
1591     {
1592         s = combine1 (ps, pm);
1593         d = *pd;
1594
1595         *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1596         ps++;
1597         if (pm)
1598             pm++;
1599     }
1600 }
1601
1602 static force_inline void
1603 core_combine_src_ca_sse2 (uint32_t*       pd,
1604                           const uint32_t* ps,
1605                           const uint32_t *pm,
1606                           int             w)
1607 {
1608     uint32_t s, m;
1609
1610     __m128i xmm_src_lo, xmm_src_hi;
1611     __m128i xmm_mask_lo, xmm_mask_hi;
1612     __m128i xmm_dst_lo, xmm_dst_hi;
1613
1614     /* call prefetch hint to optimize cache load*/
1615     cache_prefetch ((__m128i*)ps);
1616     cache_prefetch ((__m128i*)pd);
1617     cache_prefetch ((__m128i*)pm);
1618
1619     while (w && (unsigned long)pd & 15)
1620     {
1621         s = *ps++;
1622         m = *pm++;
1623         *pd++ = pack_1x64_32 (
1624             pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)));
1625         w--;
1626     }
1627
1628     /* call prefetch hint to optimize cache load*/
1629     cache_prefetch ((__m128i*)ps);
1630     cache_prefetch ((__m128i*)pd);
1631     cache_prefetch ((__m128i*)pm);
1632
1633     while (w >= 4)
1634     {
1635         /* fill cache line with next memory */
1636         cache_prefetch_next ((__m128i*)ps);
1637         cache_prefetch_next ((__m128i*)pd);
1638         cache_prefetch_next ((__m128i*)pm);
1639
1640         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1641         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1642
1643         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1644         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1645
1646         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
1647                             &xmm_mask_lo, &xmm_mask_hi,
1648                             &xmm_dst_lo, &xmm_dst_hi);
1649
1650         save_128_aligned (
1651             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1652
1653         ps += 4;
1654         pd += 4;
1655         pm += 4;
1656         w -= 4;
1657     }
1658
1659     while (w)
1660     {
1661         s = *ps++;
1662         m = *pm++;
1663         *pd++ = pack_1x64_32 (
1664             pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)));
1665         w--;
1666     }
1667 }
1668
1669 static force_inline uint32_t
1670 core_combine_over_ca_pixel_sse2 (uint32_t src,
1671                                  uint32_t mask,
1672                                  uint32_t dst)
1673 {
1674     __m64 s = unpack_32_1x64 (src);
1675     __m64 expAlpha = expand_alpha_1x64 (s);
1676     __m64 unpk_mask = unpack_32_1x64 (mask);
1677     __m64 unpk_dst  = unpack_32_1x64 (dst);
1678
1679     return pack_1x64_32 (in_over_1x64 (&s, &expAlpha, &unpk_mask, &unpk_dst));
1680 }
1681
1682 static force_inline void
1683 core_combine_over_ca_sse2 (uint32_t*       pd,
1684                            const uint32_t* ps,
1685                            const uint32_t *pm,
1686                            int             w)
1687 {
1688     uint32_t s, m, d;
1689
1690     __m128i xmm_alpha_lo, xmm_alpha_hi;
1691     __m128i xmm_src_lo, xmm_src_hi;
1692     __m128i xmm_dst_lo, xmm_dst_hi;
1693     __m128i xmm_mask_lo, xmm_mask_hi;
1694
1695     /* call prefetch hint to optimize cache load*/
1696     cache_prefetch ((__m128i*)ps);
1697     cache_prefetch ((__m128i*)pd);
1698     cache_prefetch ((__m128i*)pm);
1699
1700     while (w && (unsigned long)pd & 15)
1701     {
1702         s = *ps++;
1703         m = *pm++;
1704         d = *pd;
1705
1706         *pd++ = core_combine_over_ca_pixel_sse2 (s, m, d);
1707         w--;
1708     }
1709
1710     /* call prefetch hint to optimize cache load*/
1711     cache_prefetch ((__m128i*)ps);
1712     cache_prefetch ((__m128i*)pd);
1713     cache_prefetch ((__m128i*)pm);
1714
1715     while (w >= 4)
1716     {
1717         /* fill cache line with next memory */
1718         cache_prefetch_next ((__m128i*)ps);
1719         cache_prefetch_next ((__m128i*)pd);
1720         cache_prefetch_next ((__m128i*)pm);
1721
1722         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1723         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1724         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1725
1726         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1727         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1728         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1729
1730         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
1731                             &xmm_alpha_lo, &xmm_alpha_hi);
1732
1733         in_over_2x128 (&xmm_src_lo, &xmm_src_hi,
1734                        &xmm_alpha_lo, &xmm_alpha_hi,
1735                        &xmm_mask_lo, &xmm_mask_hi,
1736                        &xmm_dst_lo, &xmm_dst_hi);
1737
1738         save_128_aligned (
1739             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1740
1741         ps += 4;
1742         pd += 4;
1743         pm += 4;
1744         w -= 4;
1745     }
1746
1747     while (w)
1748     {
1749         s = *ps++;
1750         m = *pm++;
1751         d = *pd;
1752
1753         *pd++ = core_combine_over_ca_pixel_sse2 (s, m, d);
1754         w--;
1755     }
1756 }
1757
1758 static force_inline uint32_t
1759 core_combine_over_reverse_ca_pixel_sse2 (uint32_t src,
1760                                          uint32_t mask,
1761                                          uint32_t dst)
1762 {
1763     __m64 d = unpack_32_1x64 (dst);
1764
1765     return pack_1x64_32 (
1766         over_1x64 (d, expand_alpha_1x64 (d),
1767                    pix_multiply_1x64 (unpack_32_1x64 (src),
1768                                       unpack_32_1x64 (mask))));
1769 }
1770
1771 static force_inline void
1772 core_combine_over_reverse_ca_sse2 (uint32_t*       pd,
1773                                    const uint32_t* ps,
1774                                    const uint32_t *pm,
1775                                    int             w)
1776 {
1777     uint32_t s, m, d;
1778
1779     __m128i xmm_alpha_lo, xmm_alpha_hi;
1780     __m128i xmm_src_lo, xmm_src_hi;
1781     __m128i xmm_dst_lo, xmm_dst_hi;
1782     __m128i xmm_mask_lo, xmm_mask_hi;
1783
1784     /* call prefetch hint to optimize cache load*/
1785     cache_prefetch ((__m128i*)ps);
1786     cache_prefetch ((__m128i*)pd);
1787     cache_prefetch ((__m128i*)pm);
1788
1789     while (w && (unsigned long)pd & 15)
1790     {
1791         s = *ps++;
1792         m = *pm++;
1793         d = *pd;
1794
1795         *pd++ = core_combine_over_reverse_ca_pixel_sse2 (s, m, d);
1796         w--;
1797     }
1798
1799     /* call prefetch hint to optimize cache load*/
1800     cache_prefetch ((__m128i*)ps);
1801     cache_prefetch ((__m128i*)pd);
1802     cache_prefetch ((__m128i*)pm);
1803
1804     while (w >= 4)
1805     {
1806         /* fill cache line with next memory */
1807         cache_prefetch_next ((__m128i*)ps);
1808         cache_prefetch_next ((__m128i*)pd);
1809         cache_prefetch_next ((__m128i*)pm);
1810
1811         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1812         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1813         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1814
1815         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1816         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1817         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1818
1819         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
1820                             &xmm_alpha_lo, &xmm_alpha_hi);
1821         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
1822                             &xmm_mask_lo, &xmm_mask_hi,
1823                             &xmm_mask_lo, &xmm_mask_hi);
1824
1825         over_2x128 (&xmm_dst_lo, &xmm_dst_hi,
1826                     &xmm_alpha_lo, &xmm_alpha_hi,
1827                     &xmm_mask_lo, &xmm_mask_hi);
1828
1829         save_128_aligned (
1830             (__m128i*)pd, pack_2x128_128 (xmm_mask_lo, xmm_mask_hi));
1831
1832         ps += 4;
1833         pd += 4;
1834         pm += 4;
1835         w -= 4;
1836     }
1837
1838     while (w)
1839     {
1840         s = *ps++;
1841         m = *pm++;
1842         d = *pd;
1843
1844         *pd++ = core_combine_over_reverse_ca_pixel_sse2 (s, m, d);
1845         w--;
1846     }
1847 }
1848
1849 static force_inline void
1850 core_combine_in_ca_sse2 (uint32_t *      pd,
1851                          const uint32_t *ps,
1852                          const uint32_t *pm,
1853                          int             w)
1854 {
1855     uint32_t s, m, d;
1856
1857     __m128i xmm_alpha_lo, xmm_alpha_hi;
1858     __m128i xmm_src_lo, xmm_src_hi;
1859     __m128i xmm_dst_lo, xmm_dst_hi;
1860     __m128i xmm_mask_lo, xmm_mask_hi;
1861
1862     /* call prefetch hint to optimize cache load*/
1863     cache_prefetch ((__m128i*)ps);
1864     cache_prefetch ((__m128i*)pd);
1865     cache_prefetch ((__m128i*)pm);
1866
1867     while (w && (unsigned long)pd & 15)
1868     {
1869         s = *ps++;
1870         m = *pm++;
1871         d = *pd;
1872
1873         *pd++ = pack_1x64_32 (
1874             pix_multiply_1x64 (
1875                 pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)),
1876                 expand_alpha_1x64 (unpack_32_1x64 (d))));
1877
1878         w--;
1879     }
1880
1881     /* call prefetch hint to optimize cache load*/
1882     cache_prefetch ((__m128i*)ps);
1883     cache_prefetch ((__m128i*)pd);
1884     cache_prefetch ((__m128i*)pm);
1885
1886     while (w >= 4)
1887     {
1888         /* fill cache line with next memory */
1889         cache_prefetch_next ((__m128i*)ps);
1890         cache_prefetch_next ((__m128i*)pd);
1891         cache_prefetch_next ((__m128i*)pm);
1892
1893         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1894         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1895         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1896
1897         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1898         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1899         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1900
1901         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
1902                             &xmm_alpha_lo, &xmm_alpha_hi);
1903
1904         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
1905                             &xmm_mask_lo, &xmm_mask_hi,
1906                             &xmm_dst_lo, &xmm_dst_hi);
1907
1908         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
1909                             &xmm_alpha_lo, &xmm_alpha_hi,
1910                             &xmm_dst_lo, &xmm_dst_hi);
1911
1912         save_128_aligned (
1913             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1914
1915         ps += 4;
1916         pd += 4;
1917         pm += 4;
1918         w -= 4;
1919     }
1920
1921     while (w)
1922     {
1923         s = *ps++;
1924         m = *pm++;
1925         d = *pd;
1926
1927         *pd++ = pack_1x64_32 (
1928             pix_multiply_1x64 (
1929                 pix_multiply_1x64 (
1930                     unpack_32_1x64 (s), unpack_32_1x64 (m)),
1931                 expand_alpha_1x64 (unpack_32_1x64 (d))));
1932
1933         w--;
1934     }
1935 }
1936
1937 static force_inline void
1938 core_combine_in_reverse_ca_sse2 (uint32_t *      pd,
1939                                  const uint32_t *ps,
1940                                  const uint32_t *pm,
1941                                  int             w)
1942 {
1943     uint32_t s, m, d;
1944
1945     __m128i xmm_alpha_lo, xmm_alpha_hi;
1946     __m128i xmm_src_lo, xmm_src_hi;
1947     __m128i xmm_dst_lo, xmm_dst_hi;
1948     __m128i xmm_mask_lo, xmm_mask_hi;
1949
1950     /* call prefetch hint to optimize cache load*/
1951     cache_prefetch ((__m128i*)ps);
1952     cache_prefetch ((__m128i*)pd);
1953     cache_prefetch ((__m128i*)pm);
1954
1955     while (w && (unsigned long)pd & 15)
1956     {
1957         s = *ps++;
1958         m = *pm++;
1959         d = *pd;
1960
1961         *pd++ = pack_1x64_32 (
1962             pix_multiply_1x64 (
1963                 unpack_32_1x64 (d),
1964                 pix_multiply_1x64 (unpack_32_1x64 (m),
1965                                    expand_alpha_1x64 (unpack_32_1x64 (s)))));
1966         w--;
1967     }
1968
1969     /* call prefetch hint to optimize cache load*/
1970     cache_prefetch ((__m128i*)ps);
1971     cache_prefetch ((__m128i*)pd);
1972     cache_prefetch ((__m128i*)pm);
1973
1974     while (w >= 4)
1975     {
1976         /* fill cache line with next memory */
1977         cache_prefetch_next ((__m128i*)ps);
1978         cache_prefetch_next ((__m128i*)pd);
1979         cache_prefetch_next ((__m128i*)pm);
1980
1981         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1982         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1983         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1984
1985         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1986         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1987         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1988
1989         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
1990                             &xmm_alpha_lo, &xmm_alpha_hi);
1991         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
1992                             &xmm_alpha_lo, &xmm_alpha_hi,
1993                             &xmm_alpha_lo, &xmm_alpha_hi);
1994
1995         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
1996                             &xmm_alpha_lo, &xmm_alpha_hi,
1997                             &xmm_dst_lo, &xmm_dst_hi);
1998
1999         save_128_aligned (
2000             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2001
2002         ps += 4;
2003         pd += 4;
2004         pm += 4;
2005         w -= 4;
2006     }
2007
2008     while (w)
2009     {
2010         s = *ps++;
2011         m = *pm++;
2012         d = *pd;
2013
2014         *pd++ = pack_1x64_32 (
2015             pix_multiply_1x64 (
2016                 unpack_32_1x64 (d),
2017                 pix_multiply_1x64 (unpack_32_1x64 (m),
2018                                    expand_alpha_1x64 (unpack_32_1x64 (s)))));
2019         w--;
2020     }
2021 }
2022
2023 static force_inline void
2024 core_combine_out_ca_sse2 (uint32_t *      pd,
2025                           const uint32_t *ps,
2026                           const uint32_t *pm,
2027                           int             w)
2028 {
2029     uint32_t s, m, d;
2030
2031     __m128i xmm_alpha_lo, xmm_alpha_hi;
2032     __m128i xmm_src_lo, xmm_src_hi;
2033     __m128i xmm_dst_lo, xmm_dst_hi;
2034     __m128i xmm_mask_lo, xmm_mask_hi;
2035
2036     /* call prefetch hint to optimize cache load*/
2037     cache_prefetch ((__m128i*)ps);
2038     cache_prefetch ((__m128i*)pd);
2039     cache_prefetch ((__m128i*)pm);
2040
2041     while (w && (unsigned long)pd & 15)
2042     {
2043         s = *ps++;
2044         m = *pm++;
2045         d = *pd;
2046
2047         *pd++ = pack_1x64_32 (
2048             pix_multiply_1x64 (
2049                 pix_multiply_1x64 (
2050                     unpack_32_1x64 (s), unpack_32_1x64 (m)),
2051                 negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d)))));
2052         w--;
2053     }
2054
2055     /* call prefetch hint to optimize cache load*/
2056     cache_prefetch ((__m128i*)ps);
2057     cache_prefetch ((__m128i*)pd);
2058     cache_prefetch ((__m128i*)pm);
2059
2060     while (w >= 4)
2061     {
2062         /* fill cache line with next memory */
2063         cache_prefetch_next ((__m128i*)ps);
2064         cache_prefetch_next ((__m128i*)pd);
2065         cache_prefetch_next ((__m128i*)pm);
2066
2067         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2068         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2069         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2070
2071         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2072         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2073         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2074
2075         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
2076                             &xmm_alpha_lo, &xmm_alpha_hi);
2077         negate_2x128 (xmm_alpha_lo, xmm_alpha_hi,
2078                       &xmm_alpha_lo, &xmm_alpha_hi);
2079
2080         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
2081                             &xmm_mask_lo, &xmm_mask_hi,
2082                             &xmm_dst_lo, &xmm_dst_hi);
2083         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
2084                             &xmm_alpha_lo, &xmm_alpha_hi,
2085                             &xmm_dst_lo, &xmm_dst_hi);
2086
2087         save_128_aligned (
2088             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2089
2090         ps += 4;
2091         pd += 4;
2092         pm += 4;
2093         w -= 4;
2094     }
2095
2096     while (w)
2097     {
2098         s = *ps++;
2099         m = *pm++;
2100         d = *pd;
2101
2102         *pd++ = pack_1x64_32 (
2103             pix_multiply_1x64 (
2104                 pix_multiply_1x64 (
2105                     unpack_32_1x64 (s), unpack_32_1x64 (m)),
2106                 negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d)))));
2107
2108         w--;
2109     }
2110 }
2111
2112 static force_inline void
2113 core_combine_out_reverse_ca_sse2 (uint32_t *      pd,
2114                                   const uint32_t *ps,
2115                                   const uint32_t *pm,
2116                                   int             w)
2117 {
2118     uint32_t s, m, d;
2119
2120     __m128i xmm_alpha_lo, xmm_alpha_hi;
2121     __m128i xmm_src_lo, xmm_src_hi;
2122     __m128i xmm_dst_lo, xmm_dst_hi;
2123     __m128i xmm_mask_lo, xmm_mask_hi;
2124
2125     /* call prefetch hint to optimize cache load*/
2126     cache_prefetch ((__m128i*)ps);
2127     cache_prefetch ((__m128i*)pd);
2128     cache_prefetch ((__m128i*)pm);
2129
2130     while (w && (unsigned long)pd & 15)
2131     {
2132         s = *ps++;
2133         m = *pm++;
2134         d = *pd;
2135
2136         *pd++ = pack_1x64_32 (
2137             pix_multiply_1x64 (
2138                 unpack_32_1x64 (d),
2139                 negate_1x64 (pix_multiply_1x64 (
2140                                  unpack_32_1x64 (m),
2141                                  expand_alpha_1x64 (unpack_32_1x64 (s))))));
2142         w--;
2143     }
2144
2145     /* call prefetch hint to optimize cache load*/
2146     cache_prefetch ((__m128i*)ps);
2147     cache_prefetch ((__m128i*)pd);
2148     cache_prefetch ((__m128i*)pm);
2149
2150     while (w >= 4)
2151     {
2152         /* fill cache line with next memory */
2153         cache_prefetch_next ((__m128i*)ps);
2154         cache_prefetch_next ((__m128i*)pd);
2155         cache_prefetch_next ((__m128i*)pm);
2156
2157         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2158         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2159         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2160
2161         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2162         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2163         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2164
2165         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
2166                             &xmm_alpha_lo, &xmm_alpha_hi);
2167
2168         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
2169                             &xmm_alpha_lo, &xmm_alpha_hi,
2170                             &xmm_mask_lo, &xmm_mask_hi);
2171
2172         negate_2x128 (xmm_mask_lo, xmm_mask_hi,
2173                       &xmm_mask_lo, &xmm_mask_hi);
2174
2175         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
2176                             &xmm_mask_lo, &xmm_mask_hi,
2177                             &xmm_dst_lo, &xmm_dst_hi);
2178
2179         save_128_aligned (
2180             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2181
2182         ps += 4;
2183         pd += 4;
2184         pm += 4;
2185         w -= 4;
2186     }
2187
2188     while (w)
2189     {
2190         s = *ps++;
2191         m = *pm++;
2192         d = *pd;
2193
2194         *pd++ = pack_1x64_32 (
2195             pix_multiply_1x64 (
2196                 unpack_32_1x64 (d),
2197                 negate_1x64 (pix_multiply_1x64 (
2198                                  unpack_32_1x64 (m),
2199                                  expand_alpha_1x64 (unpack_32_1x64 (s))))));
2200         w--;
2201     }
2202 }
2203
2204 static force_inline uint32_t
2205 core_combine_atop_ca_pixel_sse2 (uint32_t src,
2206                                  uint32_t mask,
2207                                  uint32_t dst)
2208 {
2209     __m64 m = unpack_32_1x64 (mask);
2210     __m64 s = unpack_32_1x64 (src);
2211     __m64 d = unpack_32_1x64 (dst);
2212     __m64 sa = expand_alpha_1x64 (s);
2213     __m64 da = expand_alpha_1x64 (d);
2214
2215     s = pix_multiply_1x64 (s, m);
2216     m = negate_1x64 (pix_multiply_1x64 (m, sa));
2217
2218     return pack_1x64_32 (pix_add_multiply_1x64 (&d, &m, &s, &da));
2219 }
2220
2221 static force_inline void
2222 core_combine_atop_ca_sse2 (uint32_t *      pd,
2223                            const uint32_t *ps,
2224                            const uint32_t *pm,
2225                            int             w)
2226 {
2227     uint32_t s, m, d;
2228
2229     __m128i xmm_src_lo, xmm_src_hi;
2230     __m128i xmm_dst_lo, xmm_dst_hi;
2231     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
2232     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
2233     __m128i xmm_mask_lo, xmm_mask_hi;
2234
2235     /* call prefetch hint to optimize cache load*/
2236     cache_prefetch ((__m128i*)ps);
2237     cache_prefetch ((__m128i*)pd);
2238     cache_prefetch ((__m128i*)pm);
2239
2240     while (w && (unsigned long)pd & 15)
2241     {
2242         s = *ps++;
2243         m = *pm++;
2244         d = *pd;
2245
2246         *pd++ = core_combine_atop_ca_pixel_sse2 (s, m, d);
2247         w--;
2248     }
2249
2250     /* call prefetch hint to optimize cache load*/
2251     cache_prefetch ((__m128i*)ps);
2252     cache_prefetch ((__m128i*)pd);
2253     cache_prefetch ((__m128i*)pm);
2254
2255     while (w >= 4)
2256     {
2257         /* fill cache line with next memory */
2258         cache_prefetch_next ((__m128i*)ps);
2259         cache_prefetch_next ((__m128i*)pd);
2260         cache_prefetch_next ((__m128i*)pm);
2261
2262         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2263         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2264         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2265
2266         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2267         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2268         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2269
2270         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
2271                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
2272         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
2273                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2274
2275         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
2276                             &xmm_mask_lo, &xmm_mask_hi,
2277                             &xmm_src_lo, &xmm_src_hi);
2278         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
2279                             &xmm_alpha_src_lo, &xmm_alpha_src_hi,
2280                             &xmm_mask_lo, &xmm_mask_hi);
2281
2282         negate_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2283
2284         pix_add_multiply_2x128 (
2285             &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
2286             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
2287             &xmm_dst_lo, &xmm_dst_hi);
2288
2289         save_128_aligned (
2290             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2291
2292         ps += 4;
2293         pd += 4;
2294         pm += 4;
2295         w -= 4;
2296     }
2297
2298     while (w)
2299     {
2300         s = *ps++;
2301         m = *pm++;
2302         d = *pd;
2303
2304         *pd++ = core_combine_atop_ca_pixel_sse2 (s, m, d);
2305         w--;
2306     }
2307 }
2308
2309 static force_inline uint32_t
2310 core_combine_reverse_atop_ca_pixel_sse2 (uint32_t src,
2311                                          uint32_t mask,
2312                                          uint32_t dst)
2313 {
2314     __m64 m = unpack_32_1x64 (mask);
2315     __m64 s = unpack_32_1x64 (src);
2316     __m64 d = unpack_32_1x64 (dst);
2317
2318     __m64 da = negate_1x64 (expand_alpha_1x64 (d));
2319     __m64 sa = expand_alpha_1x64 (s);
2320
2321     s = pix_multiply_1x64 (s, m);
2322     m = pix_multiply_1x64 (m, sa);
2323
2324     return pack_1x64_32 (pix_add_multiply_1x64 (&d, &m, &s, &da));
2325 }
2326
2327 static force_inline void
2328 core_combine_reverse_atop_ca_sse2 (uint32_t *      pd,
2329                                    const uint32_t *ps,
2330                                    const uint32_t *pm,
2331                                    int             w)
2332 {
2333     uint32_t s, m, d;
2334
2335     __m128i xmm_src_lo, xmm_src_hi;
2336     __m128i xmm_dst_lo, xmm_dst_hi;
2337     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
2338     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
2339     __m128i xmm_mask_lo, xmm_mask_hi;
2340
2341     /* call prefetch hint to optimize cache load*/
2342     cache_prefetch ((__m128i*)ps);
2343     cache_prefetch ((__m128i*)pd);
2344     cache_prefetch ((__m128i*)pm);
2345
2346     while (w && (unsigned long)pd & 15)
2347     {
2348         s = *ps++;
2349         m = *pm++;
2350         d = *pd;
2351
2352         *pd++ = core_combine_reverse_atop_ca_pixel_sse2 (s, m, d);
2353         w--;
2354     }
2355
2356     /* call prefetch hint to optimize cache load*/
2357     cache_prefetch ((__m128i*)ps);
2358     cache_prefetch ((__m128i*)pd);
2359     cache_prefetch ((__m128i*)pm);
2360
2361     while (w >= 4)
2362     {
2363         /* fill cache line with next memory */
2364         cache_prefetch_next ((__m128i*)ps);
2365         cache_prefetch_next ((__m128i*)pd);
2366         cache_prefetch_next ((__m128i*)pm);
2367
2368         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2369         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2370         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2371
2372         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2373         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2374         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2375
2376         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
2377                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
2378         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
2379                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2380
2381         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
2382                             &xmm_mask_lo, &xmm_mask_hi,
2383                             &xmm_src_lo, &xmm_src_hi);
2384         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
2385                             &xmm_alpha_src_lo, &xmm_alpha_src_hi,
2386                             &xmm_mask_lo, &xmm_mask_hi);
2387
2388         negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi,
2389                       &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2390
2391         pix_add_multiply_2x128 (
2392             &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
2393             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
2394             &xmm_dst_lo, &xmm_dst_hi);
2395
2396         save_128_aligned (
2397             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2398
2399         ps += 4;
2400         pd += 4;
2401         pm += 4;
2402         w -= 4;
2403     }
2404
2405     while (w)
2406     {
2407         s = *ps++;
2408         m = *pm++;
2409         d = *pd;
2410
2411         *pd++ = core_combine_reverse_atop_ca_pixel_sse2 (s, m, d);
2412         w--;
2413     }
2414 }
2415
2416 static force_inline uint32_t
2417 core_combine_xor_ca_pixel_sse2 (uint32_t src,
2418                                 uint32_t mask,
2419                                 uint32_t dst)
2420 {
2421     __m64 a = unpack_32_1x64 (mask);
2422     __m64 s = unpack_32_1x64 (src);
2423     __m64 d = unpack_32_1x64 (dst);
2424
2425     __m64 alpha_dst = negate_1x64 (pix_multiply_1x64 (
2426                                        a, expand_alpha_1x64 (s)));
2427     __m64 dest      = pix_multiply_1x64 (s, a);
2428     __m64 alpha_src = negate_1x64 (expand_alpha_1x64 (d));
2429
2430     return pack_1x64_32 (pix_add_multiply_1x64 (&d,
2431                                                 &alpha_dst,
2432                                                 &dest,
2433                                                 &alpha_src));
2434 }
2435
2436 static force_inline void
2437 core_combine_xor_ca_sse2 (uint32_t *      pd,
2438                           const uint32_t *ps,
2439                           const uint32_t *pm,
2440                           int             w)
2441 {
2442     uint32_t s, m, d;
2443
2444     __m128i xmm_src_lo, xmm_src_hi;
2445     __m128i xmm_dst_lo, xmm_dst_hi;
2446     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
2447     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
2448     __m128i xmm_mask_lo, xmm_mask_hi;
2449
2450     /* call prefetch hint to optimize cache load*/
2451     cache_prefetch ((__m128i*)ps);
2452     cache_prefetch ((__m128i*)pd);
2453     cache_prefetch ((__m128i*)pm);
2454
2455     while (w && (unsigned long)pd & 15)
2456     {
2457         s = *ps++;
2458         m = *pm++;
2459         d = *pd;
2460
2461         *pd++ = core_combine_xor_ca_pixel_sse2 (s, m, d);
2462         w--;
2463     }
2464
2465     /* call prefetch hint to optimize cache load*/
2466     cache_prefetch ((__m128i*)ps);
2467     cache_prefetch ((__m128i*)pd);
2468     cache_prefetch ((__m128i*)pm);
2469
2470     while (w >= 4)
2471     {
2472         /* fill cache line with next memory */
2473         cache_prefetch_next ((__m128i*)ps);
2474         cache_prefetch_next ((__m128i*)pd);
2475         cache_prefetch_next ((__m128i*)pm);
2476
2477         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2478         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2479         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2480
2481         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2482         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2483         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2484
2485         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
2486                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
2487         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
2488                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2489
2490         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
2491                             &xmm_mask_lo, &xmm_mask_hi,
2492                             &xmm_src_lo, &xmm_src_hi);
2493         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
2494                             &xmm_alpha_src_lo, &xmm_alpha_src_hi,
2495                             &xmm_mask_lo, &xmm_mask_hi);
2496
2497         negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi,
2498                       &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2499         negate_2x128 (xmm_mask_lo, xmm_mask_hi,
2500                       &xmm_mask_lo, &xmm_mask_hi);
2501
2502         pix_add_multiply_2x128 (
2503             &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
2504             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
2505             &xmm_dst_lo, &xmm_dst_hi);
2506
2507         save_128_aligned (
2508             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2509
2510         ps += 4;
2511         pd += 4;
2512         pm += 4;
2513         w -= 4;
2514     }
2515
2516     while (w)
2517     {
2518         s = *ps++;
2519         m = *pm++;
2520         d = *pd;
2521
2522         *pd++ = core_combine_xor_ca_pixel_sse2 (s, m, d);
2523         w--;
2524     }
2525 }
2526
2527 static force_inline void
2528 core_combine_add_ca_sse2 (uint32_t *      pd,
2529                           const uint32_t *ps,
2530                           const uint32_t *pm,
2531                           int             w)
2532 {
2533     uint32_t s, m, d;
2534
2535     __m128i xmm_src_lo, xmm_src_hi;
2536     __m128i xmm_dst_lo, xmm_dst_hi;
2537     __m128i xmm_mask_lo, xmm_mask_hi;
2538
2539     /* call prefetch hint to optimize cache load*/
2540     cache_prefetch ((__m128i*)ps);
2541     cache_prefetch ((__m128i*)pd);
2542     cache_prefetch ((__m128i*)pm);
2543
2544     while (w && (unsigned long)pd & 15)
2545     {
2546         s = *ps++;
2547         m = *pm++;
2548         d = *pd;
2549
2550         *pd++ = pack_1x64_32 (
2551             _mm_adds_pu8 (pix_multiply_1x64 (unpack_32_1x64 (s),
2552                                              unpack_32_1x64 (m)),
2553                           unpack_32_1x64 (d)));
2554         w--;
2555     }
2556
2557     /* call prefetch hint to optimize cache load*/
2558     cache_prefetch ((__m128i*)ps);
2559     cache_prefetch ((__m128i*)pd);
2560     cache_prefetch ((__m128i*)pm);
2561
2562     while (w >= 4)
2563     {
2564         /* fill cache line with next memory */
2565         cache_prefetch_next ((__m128i*)ps);
2566         cache_prefetch_next ((__m128i*)pd);
2567         cache_prefetch_next ((__m128i*)pm);
2568
2569         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2570         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2571         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2572
2573         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2574         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2575         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2576
2577         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
2578                             &xmm_mask_lo, &xmm_mask_hi,
2579                             &xmm_src_lo, &xmm_src_hi);
2580
2581         save_128_aligned (
2582             (__m128i*)pd, pack_2x128_128 (
2583                 _mm_adds_epu8 (xmm_src_lo, xmm_dst_lo),
2584                 _mm_adds_epu8 (xmm_src_hi, xmm_dst_hi)));
2585
2586         ps += 4;
2587         pd += 4;
2588         pm += 4;
2589         w -= 4;
2590     }
2591
2592     while (w)
2593     {
2594         s = *ps++;
2595         m = *pm++;
2596         d = *pd;
2597
2598         *pd++ = pack_1x64_32 (
2599             _mm_adds_pu8 (pix_multiply_1x64 (unpack_32_1x64 (s),
2600                                              unpack_32_1x64 (m)),
2601                           unpack_32_1x64 (d)));
2602         w--;
2603     }
2604 }
2605
2606 /* ---------------------------------------------------
2607  * fb_compose_setup_sSE2
2608  */
2609 static force_inline __m64
2610 create_mask_16_64 (uint16_t mask)
2611 {
2612     return _mm_set1_pi16 (mask);
2613 }
2614
2615 static force_inline __m128i
2616 create_mask_16_128 (uint16_t mask)
2617 {
2618     return _mm_set1_epi16 (mask);
2619 }
2620
2621 static force_inline __m64
2622 create_mask_2x32_64 (uint32_t mask0,
2623                      uint32_t mask1)
2624 {
2625     return _mm_set_pi32 (mask0, mask1);
2626 }
2627
2628 static force_inline __m128i
2629 create_mask_2x32_128 (uint32_t mask0,
2630                       uint32_t mask1)
2631 {
2632     return _mm_set_epi32 (mask0, mask1, mask0, mask1);
2633 }
2634
2635 /* SSE2 code patch for fbcompose.c */
2636
2637 static void
2638 sse2_combine_over_u (pixman_implementation_t *imp,
2639                      pixman_op_t              op,
2640                      uint32_t *               dst,
2641                      const uint32_t *         src,
2642                      const uint32_t *         mask,
2643                      int                      width)
2644 {
2645     core_combine_over_u_sse2 (dst, src, mask, width);
2646     _mm_empty ();
2647 }
2648
2649 static void
2650 sse2_combine_over_reverse_u (pixman_implementation_t *imp,
2651                              pixman_op_t              op,
2652                              uint32_t *               dst,
2653                              const uint32_t *         src,
2654                              const uint32_t *         mask,
2655                              int                      width)
2656 {
2657     core_combine_over_reverse_u_sse2 (dst, src, mask, width);
2658     _mm_empty ();
2659 }
2660
2661 static void
2662 sse2_combine_in_u (pixman_implementation_t *imp,
2663                    pixman_op_t              op,
2664                    uint32_t *               dst,
2665                    const uint32_t *         src,
2666                    const uint32_t *         mask,
2667                    int                      width)
2668 {
2669     core_combine_in_u_sse2 (dst, src, mask, width);
2670     _mm_empty ();
2671 }
2672
2673 static void
2674 sse2_combine_in_reverse_u (pixman_implementation_t *imp,
2675                            pixman_op_t              op,
2676                            uint32_t *               dst,
2677                            const uint32_t *         src,
2678                            const uint32_t *         mask,
2679                            int                      width)
2680 {
2681     core_combine_reverse_in_u_sse2 (dst, src, mask, width);
2682     _mm_empty ();
2683 }
2684
2685 static void
2686 sse2_combine_out_u (pixman_implementation_t *imp,
2687                     pixman_op_t              op,
2688                     uint32_t *               dst,
2689                     const uint32_t *         src,
2690                     const uint32_t *         mask,
2691                     int                      width)
2692 {
2693     core_combine_out_u_sse2 (dst, src, mask, width);
2694     _mm_empty ();
2695 }
2696
2697 static void
2698 sse2_combine_out_reverse_u (pixman_implementation_t *imp,
2699                             pixman_op_t              op,
2700                             uint32_t *               dst,
2701                             const uint32_t *         src,
2702                             const uint32_t *         mask,
2703                             int                      width)
2704 {
2705     core_combine_reverse_out_u_sse2 (dst, src, mask, width);
2706     _mm_empty ();
2707 }
2708
2709 static void
2710 sse2_combine_atop_u (pixman_implementation_t *imp,
2711                      pixman_op_t              op,
2712                      uint32_t *               dst,
2713                      const uint32_t *         src,
2714                      const uint32_t *         mask,
2715                      int                      width)
2716 {
2717     core_combine_atop_u_sse2 (dst, src, mask, width);
2718     _mm_empty ();
2719 }
2720
2721 static void
2722 sse2_combine_atop_reverse_u (pixman_implementation_t *imp,
2723                              pixman_op_t              op,
2724                              uint32_t *               dst,
2725                              const uint32_t *         src,
2726                              const uint32_t *         mask,
2727                              int                      width)
2728 {
2729     core_combine_reverse_atop_u_sse2 (dst, src, mask, width);
2730     _mm_empty ();
2731 }
2732
2733 static void
2734 sse2_combine_xor_u (pixman_implementation_t *imp,
2735                     pixman_op_t              op,
2736                     uint32_t *               dst,
2737                     const uint32_t *         src,
2738                     const uint32_t *         mask,
2739                     int                      width)
2740 {
2741     core_combine_xor_u_sse2 (dst, src, mask, width);
2742     _mm_empty ();
2743 }
2744
2745 static void
2746 sse2_combine_add_u (pixman_implementation_t *imp,
2747                     pixman_op_t              op,
2748                     uint32_t *               dst,
2749                     const uint32_t *         src,
2750                     const uint32_t *         mask,
2751                     int                      width)
2752 {
2753     core_combine_add_u_sse2 (dst, src, mask, width);
2754     _mm_empty ();
2755 }
2756
2757 static void
2758 sse2_combine_saturate_u (pixman_implementation_t *imp,
2759                          pixman_op_t              op,
2760                          uint32_t *               dst,
2761                          const uint32_t *         src,
2762                          const uint32_t *         mask,
2763                          int                      width)
2764 {
2765     core_combine_saturate_u_sse2 (dst, src, mask, width);
2766     _mm_empty ();
2767 }
2768
2769 static void
2770 sse2_combine_src_ca (pixman_implementation_t *imp,
2771                      pixman_op_t              op,
2772                      uint32_t *               dst,
2773                      const uint32_t *         src,
2774                      const uint32_t *         mask,
2775                      int                      width)
2776 {
2777     core_combine_src_ca_sse2 (dst, src, mask, width);
2778     _mm_empty ();
2779 }
2780
2781 static void
2782 sse2_combine_over_ca (pixman_implementation_t *imp,
2783                       pixman_op_t              op,
2784                       uint32_t *               dst,
2785                       const uint32_t *         src,
2786                       const uint32_t *         mask,
2787                       int                      width)
2788 {
2789     core_combine_over_ca_sse2 (dst, src, mask, width);
2790     _mm_empty ();
2791 }
2792
2793 static void
2794 sse2_combine_over_reverse_ca (pixman_implementation_t *imp,
2795                               pixman_op_t              op,
2796                               uint32_t *               dst,
2797                               const uint32_t *         src,
2798                               const uint32_t *         mask,
2799                               int                      width)
2800 {
2801     core_combine_over_reverse_ca_sse2 (dst, src, mask, width);
2802     _mm_empty ();
2803 }
2804
2805 static void
2806 sse2_combine_in_ca (pixman_implementation_t *imp,
2807                     pixman_op_t              op,
2808                     uint32_t *               dst,
2809                     const uint32_t *         src,
2810                     const uint32_t *         mask,
2811                     int                      width)
2812 {
2813     core_combine_in_ca_sse2 (dst, src, mask, width);
2814     _mm_empty ();
2815 }
2816
2817 static void
2818 sse2_combine_in_reverse_ca (pixman_implementation_t *imp,
2819                             pixman_op_t              op,
2820                             uint32_t *               dst,
2821                             const uint32_t *         src,
2822                             const uint32_t *         mask,
2823                             int                      width)
2824 {
2825     core_combine_in_reverse_ca_sse2 (dst, src, mask, width);
2826     _mm_empty ();
2827 }
2828
2829 static void
2830 sse2_combine_out_ca (pixman_implementation_t *imp,
2831                      pixman_op_t              op,
2832                      uint32_t *               dst,
2833                      const uint32_t *         src,
2834                      const uint32_t *         mask,
2835                      int                      width)
2836 {
2837     core_combine_out_ca_sse2 (dst, src, mask, width);
2838     _mm_empty ();
2839 }
2840
2841 static void
2842 sse2_combine_out_reverse_ca (pixman_implementation_t *imp,
2843                              pixman_op_t              op,
2844                              uint32_t *               dst,
2845                              const uint32_t *         src,
2846                              const uint32_t *         mask,
2847                              int                      width)
2848 {
2849     core_combine_out_reverse_ca_sse2 (dst, src, mask, width);
2850     _mm_empty ();
2851 }
2852
2853 static void
2854 sse2_combine_atop_ca (pixman_implementation_t *imp,
2855                       pixman_op_t              op,
2856                       uint32_t *               dst,
2857                       const uint32_t *         src,
2858                       const uint32_t *         mask,
2859                       int                      width)
2860 {
2861     core_combine_atop_ca_sse2 (dst, src, mask, width);
2862     _mm_empty ();
2863 }
2864
2865 static void
2866 sse2_combine_atop_reverse_ca (pixman_implementation_t *imp,
2867                               pixman_op_t              op,
2868                               uint32_t *               dst,
2869                               const uint32_t *         src,
2870                               const uint32_t *         mask,
2871                               int                      width)
2872 {
2873     core_combine_reverse_atop_ca_sse2 (dst, src, mask, width);
2874     _mm_empty ();
2875 }
2876
2877 static void
2878 sse2_combine_xor_ca (pixman_implementation_t *imp,
2879                      pixman_op_t              op,
2880                      uint32_t *               dst,
2881                      const uint32_t *         src,
2882                      const uint32_t *         mask,
2883                      int                      width)
2884 {
2885     core_combine_xor_ca_sse2 (dst, src, mask, width);
2886     _mm_empty ();
2887 }
2888
2889 static void
2890 sse2_combine_add_ca (pixman_implementation_t *imp,
2891                      pixman_op_t              op,
2892                      uint32_t *               dst,
2893                      const uint32_t *         src,
2894                      const uint32_t *         mask,
2895                      int                      width)
2896 {
2897     core_combine_add_ca_sse2 (dst, src, mask, width);
2898     _mm_empty ();
2899 }
2900
2901 /* -------------------------------------------------------------------
2902  * composite_over_n_8888
2903  */
2904
2905 static void
2906 sse2_composite_over_n_8888 (pixman_implementation_t *imp,
2907                             pixman_op_t              op,
2908                             pixman_image_t *         src_image,
2909                             pixman_image_t *         mask_image,
2910                             pixman_image_t *         dst_image,
2911                             int32_t                  src_x,
2912                             int32_t                  src_y,
2913                             int32_t                  mask_x,
2914                             int32_t                  mask_y,
2915                             int32_t                  dest_x,
2916                             int32_t                  dest_y,
2917                             int32_t                  width,
2918                             int32_t                  height)
2919 {
2920     uint32_t src;
2921     uint32_t    *dst_line, *dst, d;
2922     uint16_t w;
2923     int dst_stride;
2924     __m128i xmm_src, xmm_alpha;
2925     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
2926
2927     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
2928
2929     if (src == 0)
2930         return;
2931
2932     PIXMAN_IMAGE_GET_LINE (
2933         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
2934
2935     xmm_src = expand_pixel_32_1x128 (src);
2936     xmm_alpha = expand_alpha_1x128 (xmm_src);
2937
2938     while (height--)
2939     {
2940         dst = dst_line;
2941
2942         /* call prefetch hint to optimize cache load*/
2943         cache_prefetch ((__m128i*)dst);
2944
2945         dst_line += dst_stride;
2946         w = width;
2947
2948         while (w && (unsigned long)dst & 15)
2949         {
2950             d = *dst;
2951             *dst++ = pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
2952                                               _mm_movepi64_pi64 (xmm_alpha),
2953                                               unpack_32_1x64 (d)));
2954             w--;
2955         }
2956
2957         cache_prefetch ((__m128i*)dst);
2958
2959         while (w >= 4)
2960         {
2961             /* fill cache line with next memory */
2962             cache_prefetch_next ((__m128i*)dst);
2963
2964             xmm_dst = load_128_aligned ((__m128i*)dst);
2965
2966             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
2967
2968             over_2x128 (&xmm_src, &xmm_src,
2969                         &xmm_alpha, &xmm_alpha,
2970                         &xmm_dst_lo, &xmm_dst_hi);
2971
2972             /* rebuid the 4 pixel data and save*/
2973             save_128_aligned (
2974                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2975
2976             w -= 4;
2977             dst += 4;
2978         }
2979
2980         while (w)
2981         {
2982             d = *dst;
2983             *dst++ = pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
2984                                               _mm_movepi64_pi64 (xmm_alpha),
2985                                               unpack_32_1x64 (d)));
2986             w--;
2987         }
2988
2989     }
2990     _mm_empty ();
2991 }
2992
2993 /* ---------------------------------------------------------------------
2994  * composite_over_n_0565
2995  */
2996 static void
2997 sse2_composite_over_n_0565 (pixman_implementation_t *imp,
2998                             pixman_op_t              op,
2999                             pixman_image_t *         src_image,
3000                             pixman_image_t *         mask_image,
3001                             pixman_image_t *         dst_image,
3002                             int32_t                  src_x,
3003                             int32_t                  src_y,
3004                             int32_t                  mask_x,
3005                             int32_t                  mask_y,
3006                             int32_t                  dest_x,
3007                             int32_t                  dest_y,
3008                             int32_t                  width,
3009                             int32_t                  height)
3010 {
3011     uint32_t src;
3012     uint16_t    *dst_line, *dst, d;
3013     uint16_t w;
3014     int dst_stride;
3015     __m128i xmm_src, xmm_alpha;
3016     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
3017
3018     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
3019
3020     if (src == 0)
3021         return;
3022
3023     PIXMAN_IMAGE_GET_LINE (
3024         dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
3025
3026     xmm_src = expand_pixel_32_1x128 (src);
3027     xmm_alpha = expand_alpha_1x128 (xmm_src);
3028
3029     while (height--)
3030     {
3031         dst = dst_line;
3032
3033         /* call prefetch hint to optimize cache load*/
3034         cache_prefetch ((__m128i*)dst);
3035
3036         dst_line += dst_stride;
3037         w = width;
3038
3039         while (w && (unsigned long)dst & 15)
3040         {
3041             d = *dst;
3042
3043             *dst++ = pack_565_32_16 (
3044                 pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
3045                                          _mm_movepi64_pi64 (xmm_alpha),
3046                                          expand565_16_1x64 (d))));
3047             w--;
3048         }
3049
3050         /* call prefetch hint to optimize cache load*/
3051         cache_prefetch ((__m128i*)dst);
3052
3053         while (w >= 8)
3054         {
3055             /* fill cache line with next memory */
3056             cache_prefetch_next ((__m128i*)dst);
3057
3058             xmm_dst = load_128_aligned ((__m128i*)dst);
3059
3060             unpack_565_128_4x128 (xmm_dst,
3061                                   &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
3062
3063             over_2x128 (&xmm_src, &xmm_src,
3064                         &xmm_alpha, &xmm_alpha,
3065                         &xmm_dst0, &xmm_dst1);
3066             over_2x128 (&xmm_src, &xmm_src,
3067                         &xmm_alpha, &xmm_alpha,
3068                         &xmm_dst2, &xmm_dst3);
3069
3070             xmm_dst = pack_565_4x128_128 (
3071                 &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
3072
3073             save_128_aligned ((__m128i*)dst, xmm_dst);
3074
3075             dst += 8;
3076             w -= 8;
3077         }
3078
3079         while (w--)
3080         {
3081             d = *dst;
3082             *dst++ = pack_565_32_16 (
3083                 pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
3084                                          _mm_movepi64_pi64 (xmm_alpha),
3085                                          expand565_16_1x64 (d))));
3086         }
3087     }
3088
3089     _mm_empty ();
3090 }
3091
3092 /* ---------------------------------------------------------------------------
3093  * composite_over_n_8888_8888_ca
3094  */
3095
3096 static void
3097 sse2_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
3098                                     pixman_op_t              op,
3099                                     pixman_image_t *         src_image,
3100                                     pixman_image_t *         mask_image,
3101                                     pixman_image_t *         dst_image,
3102                                     int32_t                  src_x,
3103                                     int32_t                  src_y,
3104                                     int32_t                  mask_x,
3105                                     int32_t                  mask_y,
3106                                     int32_t                  dest_x,
3107                                     int32_t                  dest_y,
3108                                     int32_t                  width,
3109                                     int32_t                  height)
3110 {
3111     uint32_t src;
3112     uint32_t    *dst_line, d;
3113     uint32_t    *mask_line, m;
3114     uint32_t pack_cmp;
3115     int dst_stride, mask_stride;
3116
3117     __m128i xmm_src, xmm_alpha;
3118     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
3119     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
3120
3121     __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
3122
3123     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
3124
3125     if (src == 0)
3126         return;
3127
3128     PIXMAN_IMAGE_GET_LINE (
3129         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3130     PIXMAN_IMAGE_GET_LINE (
3131         mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
3132
3133     xmm_src = _mm_unpacklo_epi8 (
3134         create_mask_2x32_128 (src, src), _mm_setzero_si128 ());
3135     xmm_alpha = expand_alpha_1x128 (xmm_src);
3136     mmx_src   = _mm_movepi64_pi64 (xmm_src);
3137     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
3138
3139     while (height--)
3140     {
3141         int w = width;
3142         const uint32_t *pm = (uint32_t *)mask_line;
3143         uint32_t *pd = (uint32_t *)dst_line;
3144
3145         dst_line += dst_stride;
3146         mask_line += mask_stride;
3147
3148         /* call prefetch hint to optimize cache load*/
3149         cache_prefetch ((__m128i*)pd);
3150         cache_prefetch ((__m128i*)pm);
3151
3152         while (w && (unsigned long)pd & 15)
3153         {
3154             m = *pm++;
3155
3156             if (m)
3157             {
3158                 d = *pd;
3159                 mmx_mask = unpack_32_1x64 (m);
3160                 mmx_dest = unpack_32_1x64 (d);
3161
3162                 *pd = pack_1x64_32 (in_over_1x64 (&mmx_src,
3163                                                   &mmx_alpha,
3164                                                   &mmx_mask,
3165                                                   &mmx_dest));
3166             }
3167
3168             pd++;
3169             w--;
3170         }
3171
3172         /* call prefetch hint to optimize cache load*/
3173         cache_prefetch ((__m128i*)pd);
3174         cache_prefetch ((__m128i*)pm);
3175
3176         while (w >= 4)
3177         {
3178             /* fill cache line with next memory */
3179             cache_prefetch_next ((__m128i*)pd);
3180             cache_prefetch_next ((__m128i*)pm);
3181
3182             xmm_mask = load_128_unaligned ((__m128i*)pm);
3183
3184             pack_cmp =
3185                 _mm_movemask_epi8 (
3186                     _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ()));
3187
3188             /* if all bits in mask are zero, pack_cmp are equal to 0xffff */
3189             if (pack_cmp != 0xffff)
3190             {
3191                 xmm_dst = load_128_aligned ((__m128i*)pd);
3192
3193                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
3194                 unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
3195
3196                 in_over_2x128 (&xmm_src, &xmm_src,
3197                                &xmm_alpha, &xmm_alpha,
3198                                &xmm_mask_lo, &xmm_mask_hi,
3199                                &xmm_dst_lo, &xmm_dst_hi);
3200
3201                 save_128_aligned (
3202                     (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
3203             }
3204
3205             pd += 4;
3206             pm += 4;
3207             w -= 4;
3208         }
3209
3210         while (w)
3211         {
3212             m = *pm++;
3213
3214             if (m)
3215             {
3216                 d = *pd;
3217                 mmx_mask = unpack_32_1x64 (m);
3218                 mmx_dest = unpack_32_1x64 (d);
3219
3220                 *pd = pack_1x64_32 (
3221                     in_over_1x64 (&mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest));
3222             }
3223
3224             pd++;
3225             w--;
3226         }
3227     }
3228
3229     _mm_empty ();
3230 }
3231
3232 /*---------------------------------------------------------------------
3233  * composite_over_8888_n_8888
3234  */
3235
3236 static void
3237 sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
3238                                  pixman_op_t              op,
3239                                  pixman_image_t *         src_image,
3240                                  pixman_image_t *         mask_image,
3241                                  pixman_image_t *         dst_image,
3242                                  int32_t                  src_x,
3243                                  int32_t                  src_y,
3244                                  int32_t                  mask_x,
3245                                  int32_t                  mask_y,
3246                                  int32_t                  dest_x,
3247                                  int32_t                  dest_y,
3248                                  int32_t                  width,
3249                                  int32_t                  height)
3250 {
3251     uint32_t    *dst_line, *dst;
3252     uint32_t    *src_line, *src;
3253     uint32_t mask;
3254     uint16_t w;
3255     int dst_stride, src_stride;
3256
3257     __m128i xmm_mask;
3258     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
3259     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
3260     __m128i xmm_alpha_lo, xmm_alpha_hi;
3261
3262     PIXMAN_IMAGE_GET_LINE (
3263         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3264     PIXMAN_IMAGE_GET_LINE (
3265         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3266
3267     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
3268
3269     xmm_mask = create_mask_16_128 (mask >> 24);
3270
3271     while (height--)
3272     {
3273         dst = dst_line;
3274         dst_line += dst_stride;
3275         src = src_line;
3276         src_line += src_stride;
3277         w = width;
3278
3279         /* call prefetch hint to optimize cache load*/
3280         cache_prefetch ((__m128i*)dst);
3281         cache_prefetch ((__m128i*)src);
3282
3283         while (w && (unsigned long)dst & 15)
3284         {
3285             uint32_t s = *src++;
3286             uint32_t d = *dst;
3287
3288             __m64 ms = unpack_32_1x64 (s);
3289             __m64 alpha    = expand_alpha_1x64 (ms);
3290             __m64 dest     = _mm_movepi64_pi64 (xmm_mask);
3291             __m64 alpha_dst = unpack_32_1x64 (d);
3292
3293             *dst++ = pack_1x64_32 (
3294                 in_over_1x64 (&ms, &alpha, &dest, &alpha_dst));
3295
3296             w--;
3297         }
3298
3299         /* call prefetch hint to optimize cache load*/
3300         cache_prefetch ((__m128i*)dst);
3301         cache_prefetch ((__m128i*)src);
3302
3303         while (w >= 4)
3304         {
3305             /* fill cache line with next memory */
3306             cache_prefetch_next ((__m128i*)dst);
3307             cache_prefetch_next ((__m128i*)src);
3308
3309             xmm_src = load_128_unaligned ((__m128i*)src);
3310             xmm_dst = load_128_aligned ((__m128i*)dst);
3311
3312             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
3313             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
3314             expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
3315                                 &xmm_alpha_lo, &xmm_alpha_hi);
3316
3317             in_over_2x128 (&xmm_src_lo, &xmm_src_hi,
3318                            &xmm_alpha_lo, &xmm_alpha_hi,
3319                            &xmm_mask, &xmm_mask,
3320                            &xmm_dst_lo, &xmm_dst_hi);
3321
3322             save_128_aligned (
3323                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
3324
3325             dst += 4;
3326             src += 4;
3327             w -= 4;
3328         }
3329
3330         while (w)
3331         {
3332             uint32_t s = *src++;
3333             uint32_t d = *dst;
3334
3335             __m64 ms = unpack_32_1x64 (s);
3336             __m64 alpha = expand_alpha_1x64 (ms);
3337             __m64 mask  = _mm_movepi64_pi64 (xmm_mask);
3338             __m64 dest  = unpack_32_1x64 (d);
3339
3340             *dst++ = pack_1x64_32 (
3341                 in_over_1x64 (&ms, &alpha, &mask, &dest));
3342
3343             w--;
3344         }
3345     }
3346
3347     _mm_empty ();
3348 }
3349
3350 /* ---------------------------------------------------------------------
3351  * composite_over_x888_n_8888
3352  */
3353 static void
3354 sse2_composite_over_x888_n_8888 (pixman_implementation_t *imp,
3355                                  pixman_op_t              op,
3356                                  pixman_image_t *         src_image,
3357                                  pixman_image_t *         mask_image,
3358                                  pixman_image_t *         dst_image,
3359                                  int32_t                  src_x,
3360                                  int32_t                  src_y,
3361                                  int32_t                  mask_x,
3362                                  int32_t                  mask_y,
3363                                  int32_t                  dest_x,
3364                                  int32_t                  dest_y,
3365                                  int32_t                  width,
3366                                  int32_t                  height)
3367 {
3368     uint32_t    *dst_line, *dst;
3369     uint32_t    *src_line, *src;
3370     uint32_t mask;
3371     int dst_stride, src_stride;
3372     uint16_t w;
3373
3374     __m128i xmm_mask, xmm_alpha;
3375     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
3376     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
3377
3378     PIXMAN_IMAGE_GET_LINE (
3379         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3380     PIXMAN_IMAGE_GET_LINE (
3381         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3382
3383     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
3384
3385     xmm_mask = create_mask_16_128 (mask >> 24);
3386     xmm_alpha = mask_00ff;
3387
3388     while (height--)
3389     {
3390         dst = dst_line;
3391         dst_line += dst_stride;
3392         src = src_line;
3393         src_line += src_stride;
3394         w = width;
3395
3396         /* call prefetch hint to optimize cache load*/
3397         cache_prefetch ((__m128i*)dst);
3398         cache_prefetch ((__m128i*)src);
3399
3400         while (w && (unsigned long)dst & 15)
3401         {
3402             uint32_t s = (*src++) | 0xff000000;
3403             uint32_t d = *dst;
3404
3405             __m64 src   = unpack_32_1x64 (s);
3406             __m64 alpha = _mm_movepi64_pi64 (xmm_alpha);
3407             __m64 mask  = _mm_movepi64_pi64 (xmm_mask);
3408             __m64 dest  = unpack_32_1x64 (d);
3409
3410             *dst++ = pack_1x64_32 (
3411                 in_over_1x64 (&src, &alpha, &mask, &dest));
3412
3413             w--;
3414         }
3415
3416         /* call prefetch hint to optimize cache load*/
3417         cache_prefetch ((__m128i*)dst);
3418         cache_prefetch ((__m128i*)src);
3419
3420         while (w >= 4)
3421         {
3422             /* fill cache line with next memory */
3423             cache_prefetch_next ((__m128i*)dst);
3424             cache_prefetch_next ((__m128i*)src);
3425
3426             xmm_src = _mm_or_si128 (
3427                 load_128_unaligned ((__m128i*)src), mask_ff000000);
3428             xmm_dst = load_128_aligned ((__m128i*)dst);
3429
3430             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
3431             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
3432
3433             in_over_2x128 (&xmm_src_lo, &xmm_src_hi,
3434                            &xmm_alpha, &xmm_alpha,
3435                            &xmm_mask, &xmm_mask,
3436                            &xmm_dst_lo, &xmm_dst_hi);
3437
3438             save_128_aligned (
3439                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
3440
3441             dst += 4;
3442             src += 4;
3443             w -= 4;
3444
3445         }
3446
3447         while (w)
3448         {
3449             uint32_t s = (*src++) | 0xff000000;
3450             uint32_t d = *dst;
3451
3452             __m64 src  = unpack_32_1x64 (s);
3453             __m64 alpha = _mm_movepi64_pi64 (xmm_alpha);
3454             __m64 mask  = _mm_movepi64_pi64 (xmm_mask);
3455             __m64 dest  = unpack_32_1x64 (d);
3456
3457             *dst++ = pack_1x64_32 (
3458                 in_over_1x64 (&src, &alpha, &mask, &dest));
3459
3460             w--;
3461         }
3462     }
3463
3464     _mm_empty ();
3465 }
3466
3467 /* --------------------------------------------------------------------
3468  * composite_over_8888_8888
3469  */
3470 static void
3471 sse2_composite_over_8888_8888 (pixman_implementation_t *imp,
3472                                pixman_op_t              op,
3473                                pixman_image_t *         src_image,
3474                                pixman_image_t *         mask_image,
3475                                pixman_image_t *         dst_image,
3476                                int32_t                  src_x,
3477                                int32_t                  src_y,
3478                                int32_t                  mask_x,
3479                                int32_t                  mask_y,
3480                                int32_t                  dest_x,
3481                                int32_t                  dest_y,
3482                                int32_t                  width,
3483                                int32_t                  height)
3484 {
3485     int dst_stride, src_stride;
3486     uint32_t    *dst_line, *dst;
3487     uint32_t    *src_line, *src;
3488
3489     PIXMAN_IMAGE_GET_LINE (
3490         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3491     PIXMAN_IMAGE_GET_LINE (
3492         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3493
3494     dst = dst_line;
3495     src = src_line;
3496
3497     while (height--)
3498     {
3499         core_combine_over_u_sse2 (dst, src, NULL, width);
3500
3501         dst += dst_stride;
3502         src += src_stride;
3503     }
3504     _mm_empty ();
3505 }
3506
3507 /* ------------------------------------------------------------------
3508  * composite_over_8888_0565
3509  */
3510 static force_inline uint16_t
3511 composite_over_8888_0565pixel (uint32_t src, uint16_t dst)
3512 {
3513     __m64 ms;
3514
3515     ms = unpack_32_1x64 (src);
3516     return pack_565_32_16 (
3517         pack_1x64_32 (
3518             over_1x64 (
3519                 ms, expand_alpha_1x64 (ms), expand565_16_1x64 (dst))));
3520 }
3521
3522 static void
3523 sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
3524                                pixman_op_t              op,
3525                                pixman_image_t *         src_image,
3526                                pixman_image_t *         mask_image,
3527                                pixman_image_t *         dst_image,
3528                                int32_t                  src_x,
3529                                int32_t                  src_y,
3530                                int32_t                  mask_x,
3531                                int32_t                  mask_y,
3532                                int32_t                  dest_x,
3533                                int32_t                  dest_y,
3534                                int32_t                  width,
3535                                int32_t                  height)
3536 {
3537     uint16_t    *dst_line, *dst, d;
3538     uint32_t    *src_line, *src, s;
3539     int dst_stride, src_stride;
3540     uint16_t w;
3541
3542     __m128i xmm_alpha_lo, xmm_alpha_hi;
3543     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
3544     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
3545
3546     PIXMAN_IMAGE_GET_LINE (
3547         dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
3548     PIXMAN_IMAGE_GET_LINE (
3549         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3550
3551 #if 0
3552     /* FIXME
3553      *
3554      * I copy the code from MMX one and keep the fixme.
3555      * If it's a problem there, probably is a problem here.
3556      */
3557     assert (src_image->drawable == mask_image->drawable);
3558 #endif
3559
3560     while (height--)
3561     {
3562         dst = dst_line;
3563         src = src_line;
3564
3565         /* call prefetch hint to optimize cache load*/
3566         cache_prefetch ((__m128i*)src);
3567         cache_prefetch ((__m128i*)dst);
3568
3569         dst_line += dst_stride;
3570         src_line += src_stride;
3571         w = width;
3572
3573         /* Align dst on a 16-byte boundary */
3574         while (w &&
3575                ((unsigned long)dst & 15))
3576         {
3577             s = *src++;
3578             d = *dst;
3579
3580             *dst++ = composite_over_8888_0565pixel (s, d);
3581             w--;
3582         }
3583
3584         /* call prefetch hint to optimize cache load*/
3585         cache_prefetch ((__m128i*)src);
3586         cache_prefetch ((__m128i*)dst);
3587
3588         /* It's a 8 pixel loop */
3589         while (w >= 8)
3590         {
3591             /* fill cache line with next memory */
3592             cache_prefetch_next ((__m128i*)src);
3593             cache_prefetch_next ((__m128i*)dst);
3594
3595             /* I'm loading unaligned because I'm not sure
3596              * about the address alignment.
3597              */
3598             xmm_src = load_128_unaligned ((__m128i*) src);
3599             xmm_dst = load_128_aligned ((__m128i*) dst);
3600
3601             /* Unpacking */
3602             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
3603             unpack_565_128_4x128 (xmm_dst,
3604                                   &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
3605             expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
3606                                 &xmm_alpha_lo, &xmm_alpha_hi);
3607
3608             /* I'm loading next 4 pixels from memory
3609              * before to optimze the memory read.
3610              */
3611             xmm_src = load_128_unaligned ((__m128i*) (src + 4));
3612
3613             over_2x128 (&xmm_src_lo, &xmm_src_hi,
3614                         &xmm_alpha_lo, &xmm_alpha_hi,
3615                         &xmm_dst0, &xmm_dst1);
3616
3617             /* Unpacking */
3618             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
3619             expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
3620                                 &xmm_alpha_lo, &xmm_alpha_hi);
3621
3622             over_2x128 (&xmm_src_lo, &xmm_src_hi,
3623                         &xmm_alpha_lo, &xmm_alpha_hi,
3624                         &xmm_dst2, &xmm_dst3);
3625
3626             save_128_aligned (
3627                 (__m128i*)dst, pack_565_4x128_128 (
3628                     &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
3629
3630             w -= 8;
3631             dst += 8;
3632             src += 8;
3633         }
3634
3635         while (w--)
3636         {
3637             s = *src++;
3638             d = *dst;
3639
3640             *dst++ = composite_over_8888_0565pixel (s, d);
3641         }
3642     }
3643
3644     _mm_empty ();
3645 }
3646
3647 /* -----------------------------------------------------------------
3648  * composite_over_n_8_8888
3649  */
3650
3651 static void
3652 sse2_composite_over_n_8_8888 (pixman_implementation_t *imp,
3653                               pixman_op_t              op,
3654                               pixman_image_t *         src_image,
3655                               pixman_image_t *         mask_image,
3656                               pixman_image_t *         dst_image,
3657                               int32_t                  src_x,
3658                               int32_t                  src_y,
3659                               int32_t                  mask_x,
3660                               int32_t                  mask_y,
3661                               int32_t                  dest_x,
3662                               int32_t                  dest_y,
3663                               int32_t                  width,
3664                               int32_t                  height)
3665 {
3666     uint32_t src, srca;
3667     uint32_t *dst_line, *dst;
3668     uint8_t *mask_line, *mask;
3669     int dst_stride, mask_stride;
3670     uint16_t w;
3671     uint32_t m, d;
3672
3673     __m128i xmm_src, xmm_alpha, xmm_def;
3674     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
3675     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
3676
3677     __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
3678
3679     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
3680
3681     srca = src >> 24;
3682     if (src == 0)
3683         return;
3684
3685     PIXMAN_IMAGE_GET_LINE (
3686         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3687     PIXMAN_IMAGE_GET_LINE (
3688         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
3689
3690     xmm_def = create_mask_2x32_128 (src, src);
3691     xmm_src = expand_pixel_32_1x128 (src);
3692     xmm_alpha = expand_alpha_1x128 (xmm_src);
3693     mmx_src   = _mm_movepi64_pi64 (xmm_src);
3694     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
3695
3696     while (height--)
3697     {
3698         dst = dst_line;
3699         dst_line += dst_stride;
3700         mask = mask_line;
3701         mask_line += mask_stride;
3702         w = width;
3703
3704         /* call prefetch hint to optimize cache load*/
3705         cache_prefetch ((__m128i*)mask);
3706         cache_prefetch ((__m128i*)dst);
3707
3708         while (w && (unsigned long)dst & 15)
3709         {
3710             uint8_t m = *mask++;
3711
3712             if (m)
3713             {
3714                 d = *dst;
3715                 mmx_mask = expand_pixel_8_1x64 (m);
3716                 mmx_dest = unpack_32_1x64 (d);
3717
3718                 *dst = pack_1x64_32 (in_over_1x64 (&mmx_src,
3719                                                    &mmx_alpha,
3720                                                    &mmx_mask,
3721                                                    &mmx_dest));
3722             }
3723
3724             w--;
3725             dst++;
3726         }
3727
3728         /* call prefetch hint to optimize cache load*/
3729         cache_prefetch ((__m128i*)mask);
3730         cache_prefetch ((__m128i*)dst);
3731
3732         while (w >= 4)
3733         {
3734             /* fill cache line with next memory */
3735             cache_prefetch_next ((__m128i*)mask);
3736             cache_prefetch_next ((__m128i*)dst);
3737
3738             m = *((uint32_t*)mask);
3739
3740             if (srca == 0xff && m == 0xffffffff)
3741             {
3742                 save_128_aligned ((__m128i*)dst, xmm_def);
3743             }
3744             else if (m)
3745             {
3746                 xmm_dst = load_128_aligned ((__m128i*) dst);
3747                 xmm_mask = unpack_32_1x128 (m);
3748                 xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ());
3749
3750                 /* Unpacking */
3751                 unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
3752                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
3753
3754                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
3755                                         &xmm_mask_lo, &xmm_mask_hi);
3756
3757                 in_over_2x128 (&xmm_src, &xmm_src,
3758                                &xmm_alpha, &xmm_alpha,
3759                                &xmm_mask_lo, &xmm_mask_hi,
3760                                &xmm_dst_lo, &xmm_dst_hi);
3761
3762                 save_128_aligned (
3763                     (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
3764             }
3765
3766             w -= 4;
3767             dst += 4;
3768             mask += 4;
3769         }
3770
3771         while (w)
3772         {
3773             uint8_t m = *mask++;
3774
3775             if (m)
3776             {
3777                 d = *dst;
3778                 mmx_mask = expand_pixel_8_1x64 (m);
3779                 mmx_dest = unpack_32_1x64 (d);
3780
3781                 *dst = pack_1x64_32 (in_over_1x64 (&mmx_src,
3782                                                    &mmx_alpha,
3783                                                    &mmx_mask,
3784                                                    &mmx_dest));
3785             }
3786
3787             w--;
3788             dst++;
3789         }
3790     }
3791
3792     _mm_empty ();
3793 }
3794
3795 /* ----------------------------------------------------------------
3796  * composite_over_n_8_8888
3797  */
3798
3799 pixman_bool_t
3800 pixman_fill_sse2 (uint32_t *bits,
3801                   int       stride,
3802                   int       bpp,
3803                   int       x,
3804                   int       y,
3805                   int       width,
3806                   int       height,
3807                   uint32_t  data)
3808 {
3809     uint32_t byte_width;
3810     uint8_t         *byte_line;
3811
3812     __m128i xmm_def;
3813
3814     if (bpp == 16 && (data >> 16 != (data & 0xffff)))
3815         return FALSE;
3816
3817     if (bpp != 16 && bpp != 32)
3818         return FALSE;
3819
3820     if (bpp == 16)
3821     {
3822         stride = stride * (int) sizeof (uint32_t) / 2;
3823         byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x);
3824         byte_width = 2 * width;
3825         stride *= 2;
3826     }
3827     else
3828     {
3829         stride = stride * (int) sizeof (uint32_t) / 4;
3830         byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x);
3831         byte_width = 4 * width;
3832         stride *= 4;
3833     }
3834
3835     cache_prefetch ((__m128i*)byte_line);
3836     xmm_def = create_mask_2x32_128 (data, data);
3837
3838     while (height--)
3839     {
3840         int w;
3841         uint8_t *d = byte_line;
3842         byte_line += stride;
3843         w = byte_width;
3844
3845
3846         cache_prefetch_next ((__m128i*)d);
3847
3848         while (w >= 2 && ((unsigned long)d & 3))
3849         {
3850             *(uint16_t *)d = data;
3851             w -= 2;
3852             d += 2;
3853         }
3854
3855         while (w >= 4 && ((unsigned long)d & 15))
3856         {
3857             *(uint32_t *)d = data;
3858
3859             w -= 4;
3860             d += 4;
3861         }
3862
3863         cache_prefetch_next ((__m128i*)d);
3864
3865         while (w >= 128)
3866         {
3867             cache_prefetch (((__m128i*)d) + 12);
3868
3869             save_128_aligned ((__m128i*)(d),     xmm_def);
3870             save_128_aligned ((__m128i*)(d + 16),  xmm_def);
3871             save_128_aligned ((__m128i*)(d + 32),  xmm_def);
3872             save_128_aligned ((__m128i*)(d + 48),  xmm_def);
3873             save_128_aligned ((__m128i*)(d + 64),  xmm_def);
3874             save_128_aligned ((__m128i*)(d + 80),  xmm_def);
3875             save_128_aligned ((__m128i*)(d + 96),  xmm_def);
3876             save_128_aligned ((__m128i*)(d + 112), xmm_def);
3877
3878             d += 128;
3879             w -= 128;
3880         }
3881
3882         if (w >= 64)
3883         {
3884             cache_prefetch (((__m128i*)d) + 8);
3885
3886             save_128_aligned ((__m128i*)(d),     xmm_def);
3887             save_128_aligned ((__m128i*)(d + 16),  xmm_def);
3888             save_128_aligned ((__m128i*)(d + 32),  xmm_def);
3889             save_128_aligned ((__m128i*)(d + 48),  xmm_def);
3890
3891             d += 64;
3892             w -= 64;
3893         }
3894
3895         cache_prefetch_next ((__m128i*)d);
3896
3897         if (w >= 32)
3898         {
3899             save_128_aligned ((__m128i*)(d),     xmm_def);
3900             save_128_aligned ((__m128i*)(d + 16),  xmm_def);
3901
3902             d += 32;
3903             w -= 32;
3904         }
3905
3906         if (w >= 16)
3907         {
3908             save_128_aligned ((__m128i*)(d),     xmm_def);
3909
3910             d += 16;
3911             w -= 16;
3912         }
3913
3914         cache_prefetch_next ((__m128i*)d);
3915
3916         while (w >= 4)
3917         {
3918             *(uint32_t *)d = data;
3919
3920             w -= 4;
3921             d += 4;
3922         }
3923
3924         if (w >= 2)
3925         {
3926             *(uint16_t *)d = data;
3927             w -= 2;
3928             d += 2;
3929         }
3930     }
3931
3932     _mm_empty ();
3933     return TRUE;
3934 }
3935
3936 static void
3937 sse2_composite_src_n_8_8888 (pixman_implementation_t *imp,
3938                              pixman_op_t              op,
3939                              pixman_image_t *         src_image,
3940                              pixman_image_t *         mask_image,
3941                              pixman_image_t *         dst_image,
3942                              int32_t                  src_x,
3943                              int32_t                  src_y,
3944                              int32_t                  mask_x,
3945                              int32_t                  mask_y,
3946                              int32_t                  dest_x,
3947                              int32_t                  dest_y,
3948                              int32_t                  width,
3949                              int32_t                  height)
3950 {
3951     uint32_t src, srca;
3952     uint32_t    *dst_line, *dst;
3953     uint8_t     *mask_line, *mask;
3954     int dst_stride, mask_stride;
3955     uint16_t w;
3956     uint32_t m;
3957
3958     __m128i xmm_src, xmm_def;
3959     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
3960
3961     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
3962
3963     srca = src >> 24;
3964     if (src == 0)
3965     {
3966         pixman_fill_sse2 (dst_image->bits.bits, dst_image->bits.rowstride,
3967                           PIXMAN_FORMAT_BPP (dst_image->bits.format),
3968                           dest_x, dest_y, width, height, 0);
3969         return;
3970     }
3971
3972     PIXMAN_IMAGE_GET_LINE (
3973         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3974     PIXMAN_IMAGE_GET_LINE (
3975         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
3976
3977     xmm_def = create_mask_2x32_128 (src, src);
3978     xmm_src = expand_pixel_32_1x128 (src);
3979
3980     while (height--)
3981     {
3982         dst = dst_line;
3983         dst_line += dst_stride;
3984         mask = mask_line;
3985         mask_line += mask_stride;
3986         w = width;
3987
3988         /* call prefetch hint to optimize cache load*/
3989         cache_prefetch ((__m128i*)mask);
3990         cache_prefetch ((__m128i*)dst);
3991
3992         while (w && (unsigned long)dst & 15)
3993         {
3994             uint8_t m = *mask++;
3995
3996             if (m)
3997             {
3998                 *dst = pack_1x64_32 (
3999                     pix_multiply_1x64 (
4000                         _mm_movepi64_pi64 (xmm_src), expand_pixel_8_1x64 (m)));
4001             }
4002             else
4003             {
4004                 *dst = 0;
4005             }
4006
4007             w--;
4008             dst++;
4009         }
4010
4011         /* call prefetch hint to optimize cache load*/
4012         cache_prefetch ((__m128i*)mask);
4013         cache_prefetch ((__m128i*)dst);
4014
4015         while (w >= 4)
4016         {
4017             /* fill cache line with next memory */
4018             cache_prefetch_next ((__m128i*)mask);
4019             cache_prefetch_next ((__m128i*)dst);
4020
4021             m = *((uint32_t*)mask);
4022
4023             if (srca == 0xff && m == 0xffffffff)
4024             {
4025                 save_128_aligned ((__m128i*)dst, xmm_def);
4026             }
4027             else if (m)
4028             {
4029                 xmm_mask = unpack_32_1x128 (m);
4030                 xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ());
4031
4032                 /* Unpacking */
4033                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4034
4035                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
4036                                         &xmm_mask_lo, &xmm_mask_hi);
4037
4038                 pix_multiply_2x128 (&xmm_src, &xmm_src,
4039                                     &xmm_mask_lo, &xmm_mask_hi,
4040                                     &xmm_mask_lo, &xmm_mask_hi);
4041
4042                 save_128_aligned (
4043                     (__m128i*)dst, pack_2x128_128 (xmm_mask_lo, xmm_mask_hi));
4044             }
4045             else
4046             {
4047                 save_128_aligned ((__m128i*)dst, _mm_setzero_si128 ());
4048             }
4049
4050             w -= 4;
4051             dst += 4;
4052             mask += 4;
4053         }
4054
4055         while (w)
4056         {
4057             uint8_t m = *mask++;
4058
4059             if (m)
4060             {
4061                 *dst = pack_1x64_32 (
4062                     pix_multiply_1x64 (
4063                         _mm_movepi64_pi64 (xmm_src), expand_pixel_8_1x64 (m)));
4064             }
4065             else
4066             {
4067                 *dst = 0;
4068             }
4069
4070             w--;
4071             dst++;
4072         }
4073     }
4074
4075     _mm_empty ();
4076 }
4077
4078 /*-----------------------------------------------------------------------
4079  * composite_over_n_8_0565
4080  */
4081
4082 static void
4083 sse2_composite_over_n_8_0565 (pixman_implementation_t *imp,
4084                               pixman_op_t              op,
4085                               pixman_image_t *         src_image,
4086                               pixman_image_t *         mask_image,
4087                               pixman_image_t *         dst_image,
4088                               int32_t                  src_x,
4089                               int32_t                  src_y,
4090                               int32_t                  mask_x,
4091                               int32_t                  mask_y,
4092                               int32_t                  dest_x,
4093                               int32_t                  dest_y,
4094                               int32_t                  width,
4095                               int32_t                  height)
4096 {
4097     uint32_t src, srca;
4098     uint16_t    *dst_line, *dst, d;
4099     uint8_t     *mask_line, *mask;
4100     int dst_stride, mask_stride;
4101     uint16_t w;
4102     uint32_t m;
4103     __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
4104
4105     __m128i xmm_src, xmm_alpha;
4106     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
4107     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
4108
4109     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
4110
4111     srca = src >> 24;
4112     if (src == 0)
4113         return;
4114
4115     PIXMAN_IMAGE_GET_LINE (
4116         dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
4117     PIXMAN_IMAGE_GET_LINE (
4118         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
4119
4120     xmm_src = expand_pixel_32_1x128 (src);
4121     xmm_alpha = expand_alpha_1x128 (xmm_src);
4122     mmx_src = _mm_movepi64_pi64 (xmm_src);
4123     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
4124
4125     while (height--)
4126     {
4127         dst = dst_line;
4128         dst_line += dst_stride;
4129         mask = mask_line;
4130         mask_line += mask_stride;
4131         w = width;
4132
4133         /* call prefetch hint to optimize cache load*/
4134         cache_prefetch ((__m128i*)mask);
4135         cache_prefetch ((__m128i*)dst);
4136
4137         while (w && (unsigned long)dst & 15)
4138         {
4139             m = *mask++;
4140
4141             if (m)
4142             {
4143                 d = *dst;
4144                 mmx_mask = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
4145                 mmx_dest = expand565_16_1x64 (d);
4146
4147                 *dst = pack_565_32_16 (
4148                     pack_1x64_32 (
4149                         in_over_1x64 (
4150                             &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)));
4151             }
4152
4153             w--;
4154             dst++;
4155         }
4156
4157         /* call prefetch hint to optimize cache load*/
4158         cache_prefetch ((__m128i*)mask);
4159         cache_prefetch ((__m128i*)dst);
4160
4161         while (w >= 8)
4162         {
4163             /* fill cache line with next memory */
4164             cache_prefetch_next ((__m128i*)mask);
4165             cache_prefetch_next ((__m128i*)dst);
4166
4167             xmm_dst = load_128_aligned ((__m128i*) dst);
4168             unpack_565_128_4x128 (xmm_dst,
4169                                   &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
4170
4171             m = *((uint32_t*)mask);
4172             mask += 4;
4173
4174             if (m)
4175             {
4176                 xmm_mask = unpack_32_1x128 (m);
4177                 xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ());
4178
4179                 /* Unpacking */
4180                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4181
4182                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
4183                                         &xmm_mask_lo, &xmm_mask_hi);
4184
4185                 in_over_2x128 (&xmm_src, &xmm_src,
4186                                &xmm_alpha, &xmm_alpha,
4187                                &xmm_mask_lo, &xmm_mask_hi,
4188                                &xmm_dst0, &xmm_dst1);
4189             }
4190
4191             m = *((uint32_t*)mask);
4192             mask += 4;
4193
4194             if (m)
4195             {
4196                 xmm_mask = unpack_32_1x128 (m);
4197                 xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ());
4198
4199                 /* Unpacking */
4200                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4201
4202                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
4203                                         &xmm_mask_lo, &xmm_mask_hi);
4204                 in_over_2x128 (&xmm_src, &xmm_src,
4205                                &xmm_alpha, &xmm_alpha,
4206                                &xmm_mask_lo, &xmm_mask_hi,
4207                                &xmm_dst2, &xmm_dst3);
4208             }
4209
4210             save_128_aligned (
4211                 (__m128i*)dst, pack_565_4x128_128 (
4212                     &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
4213
4214             w -= 8;
4215             dst += 8;
4216         }
4217
4218         while (w)
4219         {
4220             m = *mask++;
4221
4222             if (m)
4223             {
4224                 d = *dst;
4225                 mmx_mask = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
4226                 mmx_dest = expand565_16_1x64 (d);
4227
4228                 *dst = pack_565_32_16 (
4229                     pack_1x64_32 (
4230                         in_over_1x64 (
4231                             &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)));
4232             }
4233
4234             w--;
4235             dst++;
4236         }
4237     }
4238
4239     _mm_empty ();
4240 }
4241
4242 /* -----------------------------------------------------------------------
4243  * composite_over_pixbuf_0565
4244  */
4245
4246 static void
4247 sse2_composite_over_pixbuf_0565 (pixman_implementation_t *imp,
4248                                  pixman_op_t              op,
4249                                  pixman_image_t *         src_image,
4250                                  pixman_image_t *         mask_image,
4251                                  pixman_image_t *         dst_image,
4252                                  int32_t                  src_x,
4253                                  int32_t                  src_y,
4254                                  int32_t                  mask_x,
4255                                  int32_t                  mask_y,
4256                                  int32_t                  dest_x,
4257                                  int32_t                  dest_y,
4258                                  int32_t                  width,
4259                                  int32_t                  height)
4260 {
4261     uint16_t    *dst_line, *dst, d;
4262     uint32_t    *src_line, *src, s;
4263     int dst_stride, src_stride;
4264     uint16_t w;
4265     uint32_t opaque, zero;
4266
4267     __m64 ms;
4268     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
4269     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
4270
4271     PIXMAN_IMAGE_GET_LINE (
4272         dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
4273     PIXMAN_IMAGE_GET_LINE (
4274         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
4275
4276 #if 0
4277     /* FIXME
4278      *
4279      * I copy the code from MMX one and keep the fixme.
4280      * If it's a problem there, probably is a problem here.
4281      */
4282     assert (src_image->drawable == mask_image->drawable);
4283 #endif
4284
4285     while (height--)
4286     {
4287         dst = dst_line;
4288         dst_line += dst_stride;
4289         src = src_line;
4290         src_line += src_stride;
4291         w = width;
4292
4293         /* call prefetch hint to optimize cache load*/
4294         cache_prefetch ((__m128i*)src);
4295         cache_prefetch ((__m128i*)dst);
4296
4297         while (w && (unsigned long)dst & 15)
4298         {
4299             s = *src++;
4300             d = *dst;
4301
4302             ms = unpack_32_1x64 (s);
4303
4304             *dst++ = pack_565_32_16 (
4305                 pack_1x64_32 (
4306                     over_rev_non_pre_1x64 (ms, expand565_16_1x64 (d))));
4307             w--;
4308         }
4309
4310         /* call prefetch hint to optimize cache load*/
4311         cache_prefetch ((__m128i*)src);
4312         cache_prefetch ((__m128i*)dst);
4313
4314         while (w >= 8)
4315         {
4316             /* fill cache line with next memory */
4317             cache_prefetch_next ((__m128i*)src);
4318             cache_prefetch_next ((__m128i*)dst);
4319
4320             /* First round */
4321             xmm_src = load_128_unaligned ((__m128i*)src);
4322             xmm_dst = load_128_aligned  ((__m128i*)dst);
4323
4324             opaque = is_opaque (xmm_src);
4325             zero = is_zero (xmm_src);
4326
4327             unpack_565_128_4x128 (xmm_dst,
4328                                   &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
4329             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
4330
4331             /* preload next round*/
4332             xmm_src = load_128_unaligned ((__m128i*)(src + 4));
4333
4334             if (opaque)
4335             {
4336                 invert_colors_2x128 (xmm_src_lo, xmm_src_hi,
4337                                      &xmm_dst0, &xmm_dst1);
4338             }
4339             else if (!zero)
4340             {
4341                 over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi,
4342                                         &xmm_dst0, &xmm_dst1);
4343             }
4344
4345             /* Second round */
4346             opaque = is_opaque (xmm_src);
4347             zero = is_zero (xmm_src);
4348
4349             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
4350
4351             if (opaque)
4352             {
4353                 invert_colors_2x128 (xmm_src_lo, xmm_src_hi,
4354                                      &xmm_dst2, &xmm_dst3);
4355             }
4356             else if (zero)
4357             {
4358                 over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi,
4359                                         &xmm_dst2, &xmm_dst3);
4360             }
4361
4362             save_128_aligned (
4363                 (__m128i*)dst, pack_565_4x128_128 (
4364                     &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
4365
4366             w -= 8;
4367             src += 8;
4368             dst += 8;
4369         }
4370
4371         while (w)
4372         {
4373             s = *src++;
4374             d = *dst;
4375
4376             ms = unpack_32_1x64 (s);
4377
4378             *dst++ = pack_565_32_16 (
4379                 pack_1x64_32 (
4380                     over_rev_non_pre_1x64 (ms, expand565_16_1x64 (d))));
4381             w--;
4382         }
4383     }
4384
4385     _mm_empty ();
4386 }
4387
4388 /* -------------------------------------------------------------------------
4389  * composite_over_pixbuf_8888
4390  */
4391
4392 static void
4393 sse2_composite_over_pixbuf_8888 (pixman_implementation_t *imp,
4394                                  pixman_op_t              op,
4395                                  pixman_image_t *         src_image,
4396                                  pixman_image_t *         mask_image,
4397                                  pixman_image_t *         dst_image,
4398                                  int32_t                  src_x,
4399                                  int32_t                  src_y,
4400                                  int32_t                  mask_x,
4401                                  int32_t                  mask_y,
4402                                  int32_t                  dest_x,
4403                                  int32_t                  dest_y,
4404                                  int32_t                  width,
4405                                  int32_t                  height)
4406 {
4407     uint32_t    *dst_line, *dst, d;
4408     uint32_t    *src_line, *src, s;
4409     int dst_stride, src_stride;
4410     uint16_t w;
4411     uint32_t opaque, zero;
4412
4413     __m128i xmm_src_lo, xmm_src_hi;
4414     __m128i xmm_dst_lo, xmm_dst_hi;
4415
4416     PIXMAN_IMAGE_GET_LINE (
4417         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
4418     PIXMAN_IMAGE_GET_LINE (
4419         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
4420
4421 #if 0
4422     /* FIXME
4423      *
4424      * I copy the code from MMX one and keep the fixme.
4425      * If it's a problem there, probably is a problem here.
4426      */
4427     assert (src_image->drawable == mask_image->drawable);
4428 #endif
4429
4430     while (height--)
4431     {
4432         dst = dst_line;
4433         dst_line += dst_stride;
4434         src = src_line;
4435         src_line += src_stride;
4436         w = width;
4437
4438         /* call prefetch hint to optimize cache load*/
4439         cache_prefetch ((__m128i*)src);
4440         cache_prefetch ((__m128i*)dst);
4441
4442         while (w && (unsigned long)dst & 15)
4443         {
4444             s = *src++;
4445             d = *dst;
4446
4447             *dst++ = pack_1x64_32 (
4448                 over_rev_non_pre_1x64 (
4449                     unpack_32_1x64 (s), unpack_32_1x64 (d)));
4450
4451             w--;
4452         }
4453
4454         /* call prefetch hint to optimize cache load*/
4455         cache_prefetch ((__m128i*)src);
4456         cache_prefetch ((__m128i*)dst);
4457
4458         while (w >= 4)
4459         {
4460             /* fill cache line with next memory */
4461             cache_prefetch_next ((__m128i*)src);
4462             cache_prefetch_next ((__m128i*)dst);
4463
4464             xmm_src_hi = load_128_unaligned ((__m128i*)src);
4465
4466             opaque = is_opaque (xmm_src_hi);
4467             zero = is_zero (xmm_src_hi);
4468
4469             unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
4470
4471             if (opaque)
4472             {
4473                 invert_colors_2x128 (xmm_src_lo, xmm_src_hi,
4474                                      &xmm_dst_lo, &xmm_dst_hi);
4475
4476                 save_128_aligned (
4477                     (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4478             }
4479             else if (!zero)
4480             {
4481                 xmm_dst_hi = load_128_aligned  ((__m128i*)dst);
4482
4483                 unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
4484
4485                 over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi,
4486                                         &xmm_dst_lo, &xmm_dst_hi);
4487
4488                 save_128_aligned (
4489                     (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4490             }
4491
4492             w -= 4;
4493             dst += 4;
4494             src += 4;
4495         }
4496
4497         while (w)
4498         {
4499             s = *src++;
4500             d = *dst;
4501
4502             *dst++ = pack_1x64_32 (
4503                 over_rev_non_pre_1x64 (
4504                     unpack_32_1x64 (s), unpack_32_1x64 (d)));
4505
4506             w--;
4507         }
4508     }
4509
4510     _mm_empty ();
4511 }
4512
4513 /* -------------------------------------------------------------------------------------------------
4514  * composite_over_n_8888_0565_ca
4515  */
4516
4517 static void
4518 sse2_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
4519                                     pixman_op_t              op,
4520                                     pixman_image_t *         src_image,
4521                                     pixman_image_t *         mask_image,
4522                                     pixman_image_t *         dst_image,
4523                                     int32_t                  src_x,
4524                                     int32_t                  src_y,
4525                                     int32_t                  mask_x,
4526                                     int32_t                  mask_y,
4527                                     int32_t                  dest_x,
4528                                     int32_t                  dest_y,
4529                                     int32_t                  width,
4530                                     int32_t                  height)
4531 {
4532     uint32_t src;
4533     uint16_t    *dst_line, *dst, d;
4534     uint32_t    *mask_line, *mask, m;
4535     int dst_stride, mask_stride;
4536     int w;
4537     uint32_t pack_cmp;
4538
4539     __m128i xmm_src, xmm_alpha;
4540     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
4541     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
4542
4543     __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
4544
4545     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
4546
4547     if (src == 0)
4548         return;
4549
4550     PIXMAN_IMAGE_GET_LINE (
4551         dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
4552     PIXMAN_IMAGE_GET_LINE (
4553         mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
4554
4555     xmm_src = expand_pixel_32_1x128 (src);
4556     xmm_alpha = expand_alpha_1x128 (xmm_src);
4557     mmx_src = _mm_movepi64_pi64 (xmm_src);
4558     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
4559
4560     while (height--)
4561     {
4562         w = width;
4563         mask = mask_line;
4564         dst = dst_line;
4565         mask_line += mask_stride;
4566         dst_line += dst_stride;
4567
4568         /* call prefetch hint to optimize cache load*/
4569         cache_prefetch ((__m128i*)mask);
4570         cache_prefetch ((__m128i*)dst);
4571
4572         while (w && ((unsigned long)dst & 15))
4573         {
4574             m = *(uint32_t *) mask;
4575
4576             if (m)
4577             {
4578                 d = *dst;
4579                 mmx_mask = unpack_32_1x64 (m);
4580                 mmx_dest = expand565_16_1x64 (d);
4581
4582                 *dst = pack_565_32_16 (
4583                     pack_1x64_32 (
4584                         in_over_1x64 (
4585                             &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)));
4586             }
4587
4588             w--;
4589             dst++;
4590             mask++;
4591         }
4592
4593         /* call prefetch hint to optimize cache load*/
4594         cache_prefetch ((__m128i*)mask);
4595         cache_prefetch ((__m128i*)dst);
4596
4597         while (w >= 8)
4598         {
4599             /* fill cache line with next memory */
4600             cache_prefetch_next ((__m128i*)mask);
4601             cache_prefetch_next ((__m128i*)dst);
4602
4603             /* First round */
4604             xmm_mask = load_128_unaligned ((__m128i*)mask);
4605             xmm_dst = load_128_aligned ((__m128i*)dst);
4606
4607             pack_cmp = _mm_movemask_epi8 (
4608                 _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ()));
4609
4610             unpack_565_128_4x128 (xmm_dst,
4611                                   &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
4612             unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4613
4614             /* preload next round */
4615             xmm_mask = load_128_unaligned ((__m128i*)(mask + 4));
4616
4617             /* preload next round */
4618             if (pack_cmp != 0xffff)
4619             {
4620                 in_over_2x128 (&xmm_src, &xmm_src,
4621                                &xmm_alpha, &xmm_alpha,
4622                                &xmm_mask_lo, &xmm_mask_hi,
4623                                &xmm_dst0, &xmm_dst1);
4624             }
4625
4626             /* Second round */
4627             pack_cmp = _mm_movemask_epi8 (
4628                 _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ()));
4629
4630             unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4631
4632             if (pack_cmp != 0xffff)
4633             {
4634                 in_over_2x128 (&xmm_src, &xmm_src,
4635                                &xmm_alpha, &xmm_alpha,
4636                                &xmm_mask_lo, &xmm_mask_hi,
4637                                &xmm_dst2, &xmm_dst3);
4638             }
4639
4640             save_128_aligned (
4641                 (__m128i*)dst, pack_565_4x128_128 (
4642                     &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
4643
4644             w -= 8;
4645             dst += 8;
4646             mask += 8;
4647         }
4648
4649         while (w)
4650         {
4651             m = *(uint32_t *) mask;
4652
4653             if (m)
4654             {
4655                 d = *dst;
4656                 mmx_mask = unpack_32_1x64 (m);
4657                 mmx_dest = expand565_16_1x64 (d);
4658
4659                 *dst = pack_565_32_16 (
4660                     pack_1x64_32 (
4661                         in_over_1x64 (
4662                             &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)));
4663             }
4664
4665             w--;
4666             dst++;
4667             mask++;
4668         }
4669     }
4670
4671     _mm_empty ();
4672 }
4673
4674 /* -----------------------------------------------------------------------
4675  * composite_in_n_8_8
4676  */
4677
4678 static void
4679 sse2_composite_in_n_8_8 (pixman_implementation_t *imp,
4680                          pixman_op_t              op,
4681                          pixman_image_t *         src_image,
4682                          pixman_image_t *         mask_image,
4683                          pixman_image_t *         dst_image,
4684                          int32_t                  src_x,
4685                          int32_t                  src_y,
4686                          int32_t                  mask_x,
4687                          int32_t                  mask_y,
4688                          int32_t                  dest_x,
4689                          int32_t                  dest_y,
4690                          int32_t                  width,
4691                          int32_t                  height)
4692 {
4693     uint8_t     *dst_line, *dst;
4694     uint8_t     *mask_line, *mask;
4695     int dst_stride, mask_stride;
4696     uint16_t w, d, m;
4697     uint32_t src;
4698     uint8_t sa;
4699
4700     __m128i xmm_alpha;
4701     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
4702     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
4703
4704     PIXMAN_IMAGE_GET_LINE (
4705         dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
4706     PIXMAN_IMAGE_GET_LINE (
4707         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
4708
4709     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
4710
4711     sa = src >> 24;
4712     if (sa == 0)
4713         return;
4714
4715     xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src));
4716
4717     while (height--)
4718     {
4719         dst = dst_line;
4720         dst_line += dst_stride;
4721         mask = mask_line;
4722         mask_line += mask_stride;
4723         w = width;
4724
4725         /* call prefetch hint to optimize cache load*/
4726         cache_prefetch ((__m128i*)mask);
4727         cache_prefetch ((__m128i*)dst);
4728
4729         while (w && ((unsigned long)dst & 15))
4730         {
4731             m = (uint32_t) *mask++;
4732             d = (uint32_t) *dst;
4733
4734             *dst++ = (uint8_t) pack_1x64_32 (
4735                 pix_multiply_1x64 (
4736                     pix_multiply_1x64 (_mm_movepi64_pi64 (xmm_alpha),
4737                                        unpack_32_1x64 (m)),
4738                     unpack_32_1x64 (d)));
4739             w--;
4740         }
4741
4742         /* call prefetch hint to optimize cache load*/
4743         cache_prefetch ((__m128i*)mask);
4744         cache_prefetch ((__m128i*)dst);
4745
4746         while (w >= 16)
4747         {
4748             /* fill cache line with next memory */
4749             cache_prefetch_next ((__m128i*)mask);
4750             cache_prefetch_next ((__m128i*)dst);
4751
4752             xmm_mask = load_128_unaligned ((__m128i*)mask);
4753             xmm_dst = load_128_aligned ((__m128i*)dst);
4754
4755             unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4756             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
4757
4758             pix_multiply_2x128 (&xmm_alpha, &xmm_alpha,
4759                                 &xmm_mask_lo, &xmm_mask_hi,
4760                                 &xmm_mask_lo, &xmm_mask_hi);
4761
4762             pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
4763                                 &xmm_dst_lo, &xmm_dst_hi,
4764                                 &xmm_dst_lo, &xmm_dst_hi);
4765
4766             save_128_aligned (
4767                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4768
4769             mask += 16;
4770             dst += 16;
4771             w -= 16;
4772         }
4773
4774         while (w)
4775         {
4776             m = (uint32_t) *mask++;
4777             d = (uint32_t) *dst;
4778
4779             *dst++ = (uint8_t) pack_1x64_32 (
4780                 pix_multiply_1x64 (
4781                     pix_multiply_1x64 (
4782                         _mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
4783                     unpack_32_1x64 (d)));
4784             w--;
4785         }
4786     }
4787
4788     _mm_empty ();
4789 }
4790
4791 /* ---------------------------------------------------------------------------
4792  * composite_in_8_8
4793  */
4794
4795 static void
4796 sse2_composite_in_8_8 (pixman_implementation_t *imp,
4797                        pixman_op_t              op,
4798                        pixman_image_t *         src_image,
4799                        pixman_image_t *         mask_image,
4800                        pixman_image_t *         dst_image,
4801                        int32_t                  src_x,
4802                        int32_t                  src_y,
4803                        int32_t                  mask_x,
4804                        int32_t                  mask_y,
4805                        int32_t                  dest_x,
4806                        int32_t                  dest_y,
4807                        int32_t                  width,
4808                        int32_t                  height)
4809 {
4810     uint8_t     *dst_line, *dst;
4811     uint8_t     *src_line, *src;
4812     int src_stride, dst_stride;
4813     uint16_t w;
4814     uint32_t s, d;
4815
4816     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
4817     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
4818
4819     PIXMAN_IMAGE_GET_LINE (
4820         dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
4821     PIXMAN_IMAGE_GET_LINE (
4822         src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
4823
4824     while (height--)
4825     {
4826         dst = dst_line;
4827         dst_line += dst_stride;
4828         src = src_line;
4829         src_line += src_stride;
4830         w = width;
4831
4832         /* call prefetch hint to optimize cache load*/
4833         cache_prefetch ((__m128i*)src);
4834         cache_prefetch ((__m128i*)dst);
4835
4836         while (w && ((unsigned long)dst & 15))
4837         {
4838             s = (uint32_t) *src++;
4839             d = (uint32_t) *dst;
4840
4841             *dst++ = (uint8_t) pack_1x64_32 (
4842                 pix_multiply_1x64 (
4843                     unpack_32_1x64 (s), unpack_32_1x64 (d)));
4844             w--;
4845         }
4846
4847         /* call prefetch hint to optimize cache load*/
4848         cache_prefetch ((__m128i*)src);
4849         cache_prefetch ((__m128i*)dst);
4850
4851         while (w >= 16)
4852         {
4853             /* fill cache line with next memory */
4854             cache_prefetch_next ((__m128i*)src);
4855             cache_prefetch_next ((__m128i*)dst);
4856
4857             xmm_src = load_128_unaligned ((__m128i*)src);
4858             xmm_dst = load_128_aligned ((__m128i*)dst);
4859
4860             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
4861             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
4862
4863             pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
4864                                 &xmm_dst_lo, &xmm_dst_hi,
4865                                 &xmm_dst_lo, &xmm_dst_hi);
4866
4867             save_128_aligned (
4868                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4869
4870             src += 16;
4871             dst += 16;
4872             w -= 16;
4873         }
4874
4875         while (w)
4876         {
4877             s = (uint32_t) *src++;
4878             d = (uint32_t) *dst;
4879
4880             *dst++ = (uint8_t) pack_1x64_32 (
4881                 pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (d)));
4882             w--;
4883         }
4884     }
4885
4886     _mm_empty ();
4887 }
4888
4889 /* -------------------------------------------------------------------------
4890  * composite_add_8888_8_8
4891  */
4892
4893 static void
4894 sse2_composite_add_8888_8_8 (pixman_implementation_t *imp,
4895                              pixman_op_t              op,
4896                              pixman_image_t *         src_image,
4897                              pixman_image_t *         mask_image,
4898                              pixman_image_t *         dst_image,
4899                              int32_t                  src_x,
4900                              int32_t                  src_y,
4901                              int32_t                  mask_x,
4902                              int32_t                  mask_y,
4903                              int32_t                  dest_x,
4904                              int32_t                  dest_y,
4905                              int32_t                  width,
4906                              int32_t                  height)
4907 {
4908     uint8_t     *dst_line, *dst;
4909     uint8_t     *mask_line, *mask;
4910     int dst_stride, mask_stride;
4911     uint16_t w;
4912     uint32_t src;
4913     uint8_t sa;
4914     uint32_t m, d;
4915
4916     __m128i xmm_alpha;
4917     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
4918     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
4919
4920     PIXMAN_IMAGE_GET_LINE (
4921         dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
4922     PIXMAN_IMAGE_GET_LINE (
4923         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
4924
4925     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
4926
4927     sa = src >> 24;
4928     if (sa == 0)
4929         return;
4930
4931     xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src));
4932
4933     while (height--)
4934     {
4935         dst = dst_line;
4936         dst_line += dst_stride;
4937         mask = mask_line;
4938         mask_line += mask_stride;
4939         w = width;
4940
4941         /* call prefetch hint to optimize cache load*/
4942         cache_prefetch ((__m128i*)mask);
4943         cache_prefetch ((__m128i*)dst);
4944
4945         while (w && ((unsigned long)dst & 15))
4946         {
4947             m = (uint32_t) *mask++;
4948             d = (uint32_t) *dst;
4949
4950             *dst++ = (uint8_t) pack_1x64_32 (
4951                 _mm_adds_pu16 (
4952                     pix_multiply_1x64 (
4953                         _mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
4954                     unpack_32_1x64 (d)));
4955             w--;
4956         }
4957
4958         /* call prefetch hint to optimize cache load*/
4959         cache_prefetch ((__m128i*)mask);
4960         cache_prefetch ((__m128i*)dst);
4961
4962         while (w >= 16)
4963         {
4964             /* fill cache line with next memory */
4965             cache_prefetch_next ((__m128i*)mask);
4966             cache_prefetch_next ((__m128i*)dst);
4967
4968             xmm_mask = load_128_unaligned ((__m128i*)mask);
4969             xmm_dst = load_128_aligned ((__m128i*)dst);
4970
4971             unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4972             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
4973
4974             pix_multiply_2x128 (&xmm_alpha, &xmm_alpha,
4975                                 &xmm_mask_lo, &xmm_mask_hi,
4976                                 &xmm_mask_lo, &xmm_mask_hi);
4977
4978             xmm_dst_lo = _mm_adds_epu16 (xmm_mask_lo, xmm_dst_lo);
4979             xmm_dst_hi = _mm_adds_epu16 (xmm_mask_hi, xmm_dst_hi);
4980
4981             save_128_aligned (
4982                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4983
4984             mask += 16;
4985             dst += 16;
4986             w -= 16;
4987         }
4988
4989         while (w)
4990         {
4991             m = (uint32_t) *mask++;
4992             d = (uint32_t) *dst;
4993
4994             *dst++ = (uint8_t) pack_1x64_32 (
4995                 _mm_adds_pu16 (
4996                     pix_multiply_1x64 (
4997                         _mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
4998                     unpack_32_1x64 (d)));
4999
5000             w--;
5001         }
5002     }
5003
5004     _mm_empty ();
5005 }
5006
5007 /* ----------------------------------------------------------------------
5008  * composite_add_8000_8000
5009  */
5010
5011 static void
5012 sse2_composite_add_8000_8000 (pixman_implementation_t *imp,
5013                               pixman_op_t              op,
5014                               pixman_image_t *         src_image,
5015                               pixman_image_t *         mask_image,
5016                               pixman_image_t *         dst_image,
5017                               int32_t                  src_x,
5018                               int32_t                  src_y,
5019                               int32_t                  mask_x,
5020                               int32_t                  mask_y,
5021                               int32_t                  dest_x,
5022                               int32_t                  dest_y,
5023                               int32_t                  width,
5024                               int32_t                  height)
5025 {
5026     uint8_t     *dst_line, *dst;
5027     uint8_t     *src_line, *src;
5028     int dst_stride, src_stride;
5029     uint16_t w;
5030     uint16_t t;
5031
5032     PIXMAN_IMAGE_GET_LINE (
5033         src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
5034     PIXMAN_IMAGE_GET_LINE (
5035         dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
5036
5037     while (height--)
5038     {
5039         dst = dst_line;
5040         src = src_line;
5041
5042         /* call prefetch hint to optimize cache load*/
5043         cache_prefetch ((__m128i*)src);
5044         cache_prefetch ((__m128i*)dst);
5045
5046         dst_line += dst_stride;
5047         src_line += src_stride;
5048         w = width;
5049
5050         /* Small head */
5051         while (w && (unsigned long)dst & 3)
5052         {
5053             t = (*dst) + (*src++);
5054             *dst++ = t | (0 - (t >> 8));
5055             w--;
5056         }
5057
5058         core_combine_add_u_sse2 ((uint32_t*)dst, (uint32_t*)src, NULL, w >> 2);
5059
5060         /* Small tail */
5061         dst += w & 0xfffc;
5062         src += w & 0xfffc;
5063
5064         w &= 3;
5065
5066         while (w)
5067         {
5068             t = (*dst) + (*src++);
5069             *dst++ = t | (0 - (t >> 8));
5070             w--;
5071         }
5072     }
5073
5074     _mm_empty ();
5075 }
5076
5077 /* ---------------------------------------------------------------------
5078  * composite_add_8888_8888
5079  */
5080 static void
5081 sse2_composite_add_8888_8888 (pixman_implementation_t *imp,
5082                               pixman_op_t              op,
5083                               pixman_image_t *         src_image,
5084                               pixman_image_t *         mask_image,
5085                               pixman_image_t *         dst_image,
5086                               int32_t                  src_x,
5087                               int32_t                  src_y,
5088                               int32_t                  mask_x,
5089                               int32_t                  mask_y,
5090                               int32_t                  dest_x,
5091                               int32_t                  dest_y,
5092                               int32_t                  width,
5093                               int32_t                  height)
5094 {
5095     uint32_t    *dst_line, *dst;
5096     uint32_t    *src_line, *src;
5097     int dst_stride, src_stride;
5098
5099     PIXMAN_IMAGE_GET_LINE (
5100         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
5101     PIXMAN_IMAGE_GET_LINE (
5102         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
5103
5104     while (height--)
5105     {
5106         dst = dst_line;
5107         dst_line += dst_stride;
5108         src = src_line;
5109         src_line += src_stride;
5110
5111         core_combine_add_u_sse2 (dst, src, NULL, width);
5112     }
5113
5114     _mm_empty ();
5115 }
5116
5117 /* -------------------------------------------------------------------------------------------------
5118  * sse2_composite_copy_area
5119  */
5120
5121 static pixman_bool_t
5122 pixman_blt_sse2 (uint32_t *src_bits,
5123                  uint32_t *dst_bits,
5124                  int       src_stride,
5125                  int       dst_stride,
5126                  int       src_bpp,
5127                  int       dst_bpp,
5128                  int       src_x,
5129                  int       src_y,
5130                  int       dst_x,
5131                  int       dst_y,
5132                  int       width,
5133                  int       height)
5134 {
5135     uint8_t *   src_bytes;
5136     uint8_t *   dst_bytes;
5137     int byte_width;
5138
5139     if (src_bpp != dst_bpp)
5140         return FALSE;
5141
5142     if (src_bpp == 16)
5143     {
5144         src_stride = src_stride * (int) sizeof (uint32_t) / 2;
5145         dst_stride = dst_stride * (int) sizeof (uint32_t) / 2;
5146         src_bytes =(uint8_t *)(((uint16_t *)src_bits) + src_stride * (src_y) + (src_x));
5147         dst_bytes = (uint8_t *)(((uint16_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
5148         byte_width = 2 * width;
5149         src_stride *= 2;
5150         dst_stride *= 2;
5151     }
5152     else if (src_bpp == 32)
5153     {
5154         src_stride = src_stride * (int) sizeof (uint32_t) / 4;
5155         dst_stride = dst_stride * (int) sizeof (uint32_t) / 4;
5156         src_bytes = (uint8_t *)(((uint32_t *)src_bits) + src_stride * (src_y) + (src_x));
5157         dst_bytes = (uint8_t *)(((uint32_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
5158         byte_width = 4 * width;
5159         src_stride *= 4;
5160         dst_stride *= 4;
5161     }
5162     else
5163     {
5164         return FALSE;
5165     }
5166
5167     cache_prefetch ((__m128i*)src_bytes);
5168     cache_prefetch ((__m128i*)dst_bytes);
5169
5170     while (height--)
5171     {
5172         int w;
5173         uint8_t *s = src_bytes;
5174         uint8_t *d = dst_bytes;
5175         src_bytes += src_stride;
5176         dst_bytes += dst_stride;
5177         w = byte_width;
5178
5179         cache_prefetch_next ((__m128i*)s);
5180         cache_prefetch_next ((__m128i*)d);
5181
5182         while (w >= 2 && ((unsigned long)d & 3))
5183         {
5184             *(uint16_t *)d = *(uint16_t *)s;
5185             w -= 2;
5186             s += 2;
5187             d += 2;
5188         }
5189
5190         while (w >= 4 && ((unsigned long)d & 15))
5191         {
5192             *(uint32_t *)d = *(uint32_t *)s;
5193
5194             w -= 4;
5195             s += 4;
5196             d += 4;
5197         }
5198
5199         cache_prefetch_next ((__m128i*)s);
5200         cache_prefetch_next ((__m128i*)d);
5201
5202         while (w >= 64)
5203         {
5204             __m128i xmm0, xmm1, xmm2, xmm3;
5205
5206             /* 128 bytes ahead */
5207             cache_prefetch (((__m128i*)s) + 8);
5208             cache_prefetch (((__m128i*)d) + 8);
5209
5210             xmm0 = load_128_unaligned ((__m128i*)(s));
5211             xmm1 = load_128_unaligned ((__m128i*)(s + 16));
5212             xmm2 = load_128_unaligned ((__m128i*)(s + 32));
5213             xmm3 = load_128_unaligned ((__m128i*)(s + 48));
5214
5215             save_128_aligned ((__m128i*)(d),    xmm0);
5216             save_128_aligned ((__m128i*)(d + 16), xmm1);
5217             save_128_aligned ((__m128i*)(d + 32), xmm2);
5218             save_128_aligned ((__m128i*)(d + 48), xmm3);
5219
5220             s += 64;
5221             d += 64;
5222             w -= 64;
5223         }
5224
5225         cache_prefetch_next ((__m128i*)s);
5226         cache_prefetch_next ((__m128i*)d);
5227
5228         while (w >= 16)
5229         {
5230             save_128_aligned ((__m128i*)d, load_128_unaligned ((__m128i*)s) );
5231
5232             w -= 16;
5233             d += 16;
5234             s += 16;
5235         }
5236
5237         cache_prefetch_next ((__m128i*)s);
5238         cache_prefetch_next ((__m128i*)d);
5239
5240         while (w >= 4)
5241         {
5242             *(uint32_t *)d = *(uint32_t *)s;
5243
5244             w -= 4;
5245             s += 4;
5246             d += 4;
5247         }
5248
5249         if (w >= 2)
5250         {
5251             *(uint16_t *)d = *(uint16_t *)s;
5252             w -= 2;
5253             s += 2;
5254             d += 2;
5255         }
5256     }
5257
5258     _mm_empty ();
5259
5260     return TRUE;
5261 }
5262
5263 static void
5264 sse2_composite_copy_area (pixman_implementation_t *imp,
5265                           pixman_op_t              op,
5266                           pixman_image_t *         src_image,
5267                           pixman_image_t *         mask_image,
5268                           pixman_image_t *         dst_image,
5269                           int32_t                  src_x,
5270                           int32_t                  src_y,
5271                           int32_t                  mask_x,
5272                           int32_t                  mask_y,
5273                           int32_t                  dest_x,
5274                           int32_t                  dest_y,
5275                           int32_t                  width,
5276                           int32_t                  height)
5277 {
5278     pixman_blt_sse2 (src_image->bits.bits,
5279                      dst_image->bits.bits,
5280                      src_image->bits.rowstride,
5281                      dst_image->bits.rowstride,
5282                      PIXMAN_FORMAT_BPP (src_image->bits.format),
5283                      PIXMAN_FORMAT_BPP (dst_image->bits.format),
5284                      src_x, src_y, dest_x, dest_y, width, height);
5285 }
5286
5287 #if 0
5288 /* This code are buggy in MMX version, now the bug was translated to SSE2 version */
5289 void
5290 sse2_composite_over_x888_8_8888 (pixman_implementation_t *imp,
5291                                  pixman_op_t              op,
5292                                  pixman_image_t *         src_image,
5293                                  pixman_image_t *         mask_image,
5294                                  pixman_image_t *         dst_image,
5295                                  int32_t                  src_x,
5296                                  int32_t                  src_y,
5297                                  int32_t                  mask_x,
5298                                  int32_t                  mask_y,
5299                                  int32_t                  dest_x,
5300                                  int32_t                  dest_y,
5301                                  int32_t                  width,
5302                                  int32_t                  height)
5303 {
5304     uint32_t    *src, *src_line, s;
5305     uint32_t    *dst, *dst_line, d;
5306     uint8_t         *mask, *mask_line;
5307     uint32_t m;
5308     int src_stride, mask_stride, dst_stride;
5309     uint16_t w;
5310
5311     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
5312     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
5313     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
5314
5315     PIXMAN_IMAGE_GET_LINE (
5316         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
5317     PIXMAN_IMAGE_GET_LINE (
5318         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
5319     PIXMAN_IMAGE_GET_LINE (
5320         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
5321
5322     while (height--)
5323     {
5324         src = src_line;
5325         src_line += src_stride;
5326         dst = dst_line;
5327         dst_line += dst_stride;
5328         mask = mask_line;
5329         mask_line += mask_stride;
5330
5331         w = width;
5332
5333         /* call prefetch hint to optimize cache load*/
5334         cache_prefetch ((__m128i*)src);
5335         cache_prefetch ((__m128i*)dst);
5336         cache_prefetch ((__m128i*)mask);
5337
5338         while (w && (unsigned long)dst & 15)
5339         {
5340             s = 0xff000000 | *src++;
5341             m = (uint32_t) *mask++;
5342             d = *dst;
5343
5344             __m64 ms = unpack_32_1x64 (s);
5345
5346             if (m != 0xff)
5347             {
5348                 ms = in_over_1x64 (ms,
5349                                    mask_x00ff,
5350                                    expand_alpha_rev_1x64 (unpack_32_1x64 (m)),
5351                                    unpack_32_1x64 (d));
5352             }
5353
5354             *dst++ = pack_1x64_32 (ms);
5355             w--;
5356         }
5357
5358         /* call prefetch hint to optimize cache load*/
5359         cache_prefetch ((__m128i*)src);
5360         cache_prefetch ((__m128i*)dst);
5361         cache_prefetch ((__m128i*)mask);
5362
5363         while (w >= 4)
5364         {
5365             /* fill cache line with next memory */
5366             cache_prefetch_next ((__m128i*)src);
5367             cache_prefetch_next ((__m128i*)dst);
5368             cache_prefetch_next ((__m128i*)mask);
5369
5370             m = *(uint32_t*) mask;
5371             xmm_src = _mm_or_si128 (load_128_unaligned ((__m128i*)src), mask_ff000000);
5372
5373             if (m == 0xffffffff)
5374             {
5375                 save_128_aligned ((__m128i*)dst, xmm_src);
5376             }
5377             else
5378             {
5379                 xmm_dst = load_128_aligned ((__m128i*)dst);
5380
5381                 xmm_mask = _mm_unpacklo_epi16 (
5382                     unpack_32_1x128 (m), _mm_setzero_si128 ());
5383
5384                 unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
5385                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
5386                 unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
5387
5388                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
5389                                         &xmm_mask_lo, &xmm_mask_hi);
5390
5391                 in_over_2x128 (xmm_src_lo, xmm_src_hi,
5392                                mask_00ff, mask_00ff,
5393                                xmm_mask_lo, xmm_mask_hi,
5394                                &xmm_dst_lo, &xmm_dst_hi);
5395
5396                 save_128_aligned (
5397                     (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
5398             }
5399
5400             src += 4;
5401             dst += 4;
5402             mask += 4;
5403             w -= 4;
5404         }
5405
5406         while (w)
5407         {
5408             m = (uint32_t) *mask++;
5409
5410             if (m)
5411             {
5412                 s = 0xff000000 | *src;
5413
5414                 if (m == 0xff)
5415                 {
5416                     *dst = s;
5417                 }
5418                 else
5419                 {
5420                     d = *dst;
5421
5422                     *dst = pack_1x64_32 (
5423                         in_over_1x64 (
5424                             unpack_32_1x64 (s),
5425                             mask_x00ff,
5426                             expand_alpha_rev_1x64 (unpack_32_1x64 (m)),
5427                             unpack_32_1x64 (d)));
5428                 }
5429
5430             }
5431
5432             src++;
5433             dst++;
5434             w--;
5435         }
5436     }
5437
5438     _mm_empty ();
5439 }
5440
5441 #endif
5442
5443 static const pixman_fast_path_t sse2_fast_paths[] =
5444 {
5445     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   sse2_composite_over_n_8_0565,       0 },
5446     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   sse2_composite_over_n_8_0565,       0 },
5447     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_over_n_8888,         0 },
5448     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_over_n_8888,         0 },
5449     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   sse2_composite_over_n_0565,         0 },
5450     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_over_8888_8888,      0 },
5451     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_over_8888_8888,      0 },
5452     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_composite_over_8888_8888,      0 },
5453     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_over_8888_8888,      0 },
5454     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   sse2_composite_over_8888_0565,      0 },
5455     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   sse2_composite_over_8888_0565,      0 },
5456     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_n_8_8888,       0 },
5457     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_n_8_8888,       0 },
5458     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_over_n_8_8888,       0 },
5459     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_n_8_8888,       0 },
5460 #if 0
5461     /* FIXME: This code are buggy in MMX version, now the bug was translated to SSE2 version */
5462     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_x888_8_8888,    0 },
5463     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_x888_8_8888,    0 },
5464     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_x888_8_8888,    0 },
5465     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_x888_8_8888,    0 },
5466 #endif
5467     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_x888_n_8888,    NEED_SOLID_MASK },
5468     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_x888_n_8888,    NEED_SOLID_MASK },
5469     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_over_x888_n_8888,    NEED_SOLID_MASK },
5470     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_x888_n_8888,    NEED_SOLID_MASK },
5471     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_8888_n_8888,    NEED_SOLID_MASK },
5472     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_8888_n_8888,    NEED_SOLID_MASK },
5473     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_over_8888_n_8888,    NEED_SOLID_MASK },
5474     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_8888_n_8888,    NEED_SOLID_MASK },
5475     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
5476     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
5477     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
5478     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
5479     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   sse2_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
5480     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   sse2_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
5481     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5482     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5483     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5484     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5485     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5486     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5487     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5488     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5489     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   sse2_composite_over_pixbuf_0565,    NEED_PIXBUF },
5490     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5,   sse2_composite_over_pixbuf_0565,    NEED_PIXBUF },
5491     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5,   sse2_composite_over_pixbuf_0565,    NEED_PIXBUF },
5492     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   sse2_composite_over_pixbuf_0565,    NEED_PIXBUF },
5493     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_copy_area,           0 },
5494     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_copy_area,           0 },
5495
5496     { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       sse2_composite_add_8000_8000,       0 },
5497     { PIXMAN_OP_ADD,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_add_8888_8888,       0 },
5498     { PIXMAN_OP_ADD,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_composite_add_8888_8888,       0 },
5499     { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       sse2_composite_add_8888_8_8,        0 },
5500
5501     { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_src_n_8_8888,        0 },
5502     { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_src_n_8_8888,        0 },
5503     { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_src_n_8_8888,        0 },
5504     { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_src_n_8_8888,        0 },
5505     { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_copy_area,           0 },
5506     { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_composite_copy_area,           0 },
5507     { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_copy_area,           0 },
5508     { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_copy_area,           0 },
5509     { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_copy_area,           0 },
5510     { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_copy_area,           0 },
5511     { PIXMAN_OP_SRC, PIXMAN_r5g6b5,    PIXMAN_null,     PIXMAN_r5g6b5,   sse2_composite_copy_area,           0 },
5512     { PIXMAN_OP_SRC, PIXMAN_b5g6r5,    PIXMAN_null,     PIXMAN_b5g6r5,   sse2_composite_copy_area,           0 },
5513
5514     { PIXMAN_OP_IN,  PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       sse2_composite_in_8_8,              0 },
5515     { PIXMAN_OP_IN,  PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8,       sse2_composite_in_n_8_8,            0 },
5516
5517     { PIXMAN_OP_NONE },
5518 };
5519
5520 /*
5521  * Work around GCC bug causing crashes in Mozilla with SSE2
5522  *
5523  * When using -msse, gcc generates movdqa instructions assuming that
5524  * the stack is 16 byte aligned. Unfortunately some applications, such
5525  * as Mozilla and Mono, end up aligning the stack to 4 bytes, which
5526  * causes the movdqa instructions to fail.
5527  *
5528  * The __force_align_arg_pointer__ makes gcc generate a prologue that
5529  * realigns the stack pointer to 16 bytes.
5530  *
5531  * On x86-64 this is not necessary because the standard ABI already
5532  * calls for a 16 byte aligned stack.
5533  *
5534  * See https://bugs.freedesktop.org/show_bug.cgi?id=15693
5535  */
5536 #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
5537 __attribute__((__force_align_arg_pointer__))
5538 #endif
5539 static void
5540 sse2_composite (pixman_implementation_t *imp,
5541                 pixman_op_t              op,
5542                 pixman_image_t *         src,
5543                 pixman_image_t *         mask,
5544                 pixman_image_t *         dest,
5545                 int32_t                  src_x,
5546                 int32_t                  src_y,
5547                 int32_t                  mask_x,
5548                 int32_t                  mask_y,
5549                 int32_t                  dest_x,
5550                 int32_t                  dest_y,
5551                 int32_t                  width,
5552                 int32_t                  height)
5553 {
5554     if (_pixman_run_fast_path (sse2_fast_paths, imp,
5555                                op, src, mask, dest,
5556                                src_x, src_y,
5557                                mask_x, mask_y,
5558                                dest_x, dest_y,
5559                                width, height))
5560     {
5561         return;
5562     }
5563
5564     _pixman_implementation_composite (imp->delegate, op,
5565                                       src, mask, dest,
5566                                       src_x, src_y,
5567                                       mask_x, mask_y,
5568                                       dest_x, dest_y,
5569                                       width, height);
5570 }
5571
5572 #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
5573 __attribute__((__force_align_arg_pointer__))
5574 #endif
5575 static pixman_bool_t
5576 sse2_blt (pixman_implementation_t *imp,
5577           uint32_t *               src_bits,
5578           uint32_t *               dst_bits,
5579           int                      src_stride,
5580           int                      dst_stride,
5581           int                      src_bpp,
5582           int                      dst_bpp,
5583           int                      src_x,
5584           int                      src_y,
5585           int                      dst_x,
5586           int                      dst_y,
5587           int                      width,
5588           int                      height)
5589 {
5590     if (!pixman_blt_sse2 (
5591             src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
5592             src_x, src_y, dst_x, dst_y, width, height))
5593
5594     {
5595         return _pixman_implementation_blt (
5596             imp->delegate,
5597             src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
5598             src_x, src_y, dst_x, dst_y, width, height);
5599     }
5600
5601     return TRUE;
5602 }
5603
5604 #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
5605 __attribute__((__force_align_arg_pointer__))
5606 #endif
5607 static pixman_bool_t
5608 sse2_fill (pixman_implementation_t *imp,
5609            uint32_t *               bits,
5610            int                      stride,
5611            int                      bpp,
5612            int                      x,
5613            int                      y,
5614            int                      width,
5615            int                      height,
5616            uint32_t xor)
5617 {
5618     if (!pixman_fill_sse2 (bits, stride, bpp, x, y, width, height, xor))
5619     {
5620         return _pixman_implementation_fill (
5621             imp->delegate, bits, stride, bpp, x, y, width, height, xor);
5622     }
5623
5624     return TRUE;
5625 }
5626
5627 #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
5628 __attribute__((__force_align_arg_pointer__))
5629 #endif
5630 pixman_implementation_t *
5631 _pixman_implementation_create_sse2 (void)
5632 {
5633     pixman_implementation_t *mmx = _pixman_implementation_create_mmx ();
5634     pixman_implementation_t *imp = _pixman_implementation_create (mmx);
5635
5636     /* SSE2 constants */
5637     mask_565_r  = create_mask_2x32_128 (0x00f80000, 0x00f80000);
5638     mask_565_g1 = create_mask_2x32_128 (0x00070000, 0x00070000);
5639     mask_565_g2 = create_mask_2x32_128 (0x000000e0, 0x000000e0);
5640     mask_565_b  = create_mask_2x32_128 (0x0000001f, 0x0000001f);
5641     mask_red   = create_mask_2x32_128 (0x00f80000, 0x00f80000);
5642     mask_green = create_mask_2x32_128 (0x0000fc00, 0x0000fc00);
5643     mask_blue  = create_mask_2x32_128 (0x000000f8, 0x000000f8);
5644     mask_565_fix_rb = create_mask_2x32_128 (0x00e000e0, 0x00e000e0);
5645     mask_565_fix_g = create_mask_2x32_128  (0x0000c000, 0x0000c000);
5646     mask_0080 = create_mask_16_128 (0x0080);
5647     mask_00ff = create_mask_16_128 (0x00ff);
5648     mask_0101 = create_mask_16_128 (0x0101);
5649     mask_ffff = create_mask_16_128 (0xffff);
5650     mask_ff000000 = create_mask_2x32_128 (0xff000000, 0xff000000);
5651     mask_alpha = create_mask_2x32_128 (0x00ff0000, 0x00000000);
5652
5653     /* MMX constants */
5654     mask_x565_rgb = create_mask_2x32_64 (0x000001f0, 0x003f001f);
5655     mask_x565_unpack = create_mask_2x32_64 (0x00000084, 0x04100840);
5656
5657     mask_x0080 = create_mask_16_64 (0x0080);
5658     mask_x00ff = create_mask_16_64 (0x00ff);
5659     mask_x0101 = create_mask_16_64 (0x0101);
5660     mask_x_alpha = create_mask_2x32_64 (0x00ff0000, 0x00000000);
5661
5662     _mm_empty ();
5663
5664     /* Set up function pointers */
5665
5666     /* SSE code patch for fbcompose.c */
5667     imp->combine_32[PIXMAN_OP_OVER] = sse2_combine_over_u;
5668     imp->combine_32[PIXMAN_OP_OVER_REVERSE] = sse2_combine_over_reverse_u;
5669     imp->combine_32[PIXMAN_OP_IN] = sse2_combine_in_u;
5670     imp->combine_32[PIXMAN_OP_IN_REVERSE] = sse2_combine_in_reverse_u;
5671     imp->combine_32[PIXMAN_OP_OUT] = sse2_combine_out_u;
5672     imp->combine_32[PIXMAN_OP_OUT_REVERSE] = sse2_combine_out_reverse_u;
5673     imp->combine_32[PIXMAN_OP_ATOP] = sse2_combine_atop_u;
5674     imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = sse2_combine_atop_reverse_u;
5675     imp->combine_32[PIXMAN_OP_XOR] = sse2_combine_xor_u;
5676     imp->combine_32[PIXMAN_OP_ADD] = sse2_combine_add_u;
5677
5678     imp->combine_32[PIXMAN_OP_SATURATE] = sse2_combine_saturate_u;
5679
5680     imp->combine_32_ca[PIXMAN_OP_SRC] = sse2_combine_src_ca;
5681     imp->combine_32_ca[PIXMAN_OP_OVER] = sse2_combine_over_ca;
5682     imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = sse2_combine_over_reverse_ca;
5683     imp->combine_32_ca[PIXMAN_OP_IN] = sse2_combine_in_ca;
5684     imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = sse2_combine_in_reverse_ca;
5685     imp->combine_32_ca[PIXMAN_OP_OUT] = sse2_combine_out_ca;
5686     imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = sse2_combine_out_reverse_ca;
5687     imp->combine_32_ca[PIXMAN_OP_ATOP] = sse2_combine_atop_ca;
5688     imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = sse2_combine_atop_reverse_ca;
5689     imp->combine_32_ca[PIXMAN_OP_XOR] = sse2_combine_xor_ca;
5690     imp->combine_32_ca[PIXMAN_OP_ADD] = sse2_combine_add_ca;
5691
5692     imp->composite = sse2_composite;
5693     imp->blt = sse2_blt;
5694     imp->fill = sse2_fill;
5695
5696     return imp;
5697 }
5698
5699 #endif /* USE_SSE2 */