Reformat and reindent pixman-sse2.c
[profile/ivi/pixman.git] / pixman / pixman-sse2.c
1 /*
2  * Copyright © 2008 Rodrigo Kumpera
3  * Copyright © 2008 André Tupinambá
4  *
5  * Permission to use, copy, modify, distribute, and sell this software and its
6  * documentation for any purpose is hereby granted without fee, provided that
7  * the above copyright notice appear in all copies and that both that
8  * copyright notice and this permission notice appear in supporting
9  * documentation, and that the name of Red Hat not be used in advertising or
10  * publicity pertaining to distribution of the software without specific,
11  * written prior permission.  Red Hat makes no representations about the
12  * suitability of this software for any purpose.  It is provided "as is"
13  * without express or implied warranty.
14  *
15  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
16  * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
17  * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
18  * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
20  * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
21  * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
22  * SOFTWARE.
23  *
24  * Author:  Rodrigo Kumpera (kumpera@gmail.com)
25  *          André Tupinambá (andrelrt@gmail.com)
26  *
27  * Based on work by Owen Taylor and Søren Sandmann
28  */
29 #ifdef HAVE_CONFIG_H
30 #include <config.h>
31 #endif
32
33 #include <mmintrin.h>
34 #include <xmmintrin.h> /* for _mm_shuffle_pi16 and _MM_SHUFFLE */
35 #include <emmintrin.h> /* for SSE2 intrinsics */
36 #include "pixman-private.h"
37 #include "pixman-combine32.h"
38
39 #ifdef USE_SSE2
40
41 /* --------------------------------------------------------------------
42  * Locals
43  */
44
45 static __m64 mask_x0080;
46 static __m64 mask_x00ff;
47 static __m64 mask_x0101;
48 static __m64 mask_x_alpha;
49
50 static __m64 mask_x565_rgb;
51 static __m64 mask_x565_unpack;
52
53 static __m128i mask_0080;
54 static __m128i mask_00ff;
55 static __m128i mask_0101;
56 static __m128i mask_ffff;
57 static __m128i mask_ff000000;
58 static __m128i mask_alpha;
59
60 static __m128i mask_565_r;
61 static __m128i mask_565_g1, mask_565_g2;
62 static __m128i mask_565_b;
63 static __m128i mask_red;
64 static __m128i mask_green;
65 static __m128i mask_blue;
66
67 static __m128i mask_565_fix_rb;
68 static __m128i mask_565_fix_g;
69
70 /* ----------------------------------------------------------------------
71  * SSE2 Inlines
72  */
73 static force_inline __m128i
74 unpack_32_1x128 (uint32_t data)
75 {
76     return _mm_unpacklo_epi8 (_mm_cvtsi32_si128 (data), _mm_setzero_si128 ());
77 }
78
79 static force_inline void
80 unpack_128_2x128 (__m128i data, __m128i* data_lo, __m128i* data_hi)
81 {
82     *data_lo = _mm_unpacklo_epi8 (data, _mm_setzero_si128 ());
83     *data_hi = _mm_unpackhi_epi8 (data, _mm_setzero_si128 ());
84 }
85
86 static force_inline __m128i
87 unpack_565_to_8888 (__m128i lo)
88 {
89     __m128i r, g, b, rb, t;
90
91     r = _mm_and_si128 (_mm_slli_epi32 (lo, 8), mask_red);
92     g = _mm_and_si128 (_mm_slli_epi32 (lo, 5), mask_green);
93     b = _mm_and_si128 (_mm_slli_epi32 (lo, 3), mask_blue);
94
95     rb = _mm_or_si128 (r, b);
96     t  = _mm_and_si128 (rb, mask_565_fix_rb);
97     t  = _mm_srli_epi32 (t, 5);
98     rb = _mm_or_si128 (rb, t);
99
100     t  = _mm_and_si128 (g, mask_565_fix_g);
101     t  = _mm_srli_epi32 (t, 6);
102     g  = _mm_or_si128 (g, t);
103
104     return _mm_or_si128 (rb, g);
105 }
106
107 static force_inline void
108 unpack_565_128_4x128 (__m128i  data,
109                       __m128i* data0,
110                       __m128i* data1,
111                       __m128i* data2,
112                       __m128i* data3)
113 {
114     __m128i lo, hi;
115
116     lo = _mm_unpacklo_epi16 (data, _mm_setzero_si128 ());
117     hi = _mm_unpackhi_epi16 (data, _mm_setzero_si128 ());
118
119     lo = unpack_565_to_8888 (lo);
120     hi = unpack_565_to_8888 (hi);
121
122     unpack_128_2x128 (lo, data0, data1);
123     unpack_128_2x128 (hi, data2, data3);
124 }
125
126 static force_inline uint16_t
127 pack_565_32_16 (uint32_t pixel)
128 {
129     return (uint16_t) (((pixel >> 8) & 0xf800) |
130                        ((pixel >> 5) & 0x07e0) |
131                        ((pixel >> 3) & 0x001f));
132 }
133
134 static force_inline __m128i
135 pack_2x128_128 (__m128i lo, __m128i hi)
136 {
137     return _mm_packus_epi16 (lo, hi);
138 }
139
140 static force_inline __m128i
141 pack_565_2x128_128 (__m128i lo, __m128i hi)
142 {
143     __m128i data;
144     __m128i r, g1, g2, b;
145
146     data = pack_2x128_128 (lo, hi);
147
148     r  = _mm_and_si128 (data, mask_565_r);
149     g1 = _mm_and_si128 (_mm_slli_epi32 (data, 3), mask_565_g1);
150     g2 = _mm_and_si128 (_mm_srli_epi32 (data, 5), mask_565_g2);
151     b  = _mm_and_si128 (_mm_srli_epi32 (data, 3), mask_565_b);
152
153     return _mm_or_si128 (_mm_or_si128 (_mm_or_si128 (r, g1), g2), b);
154 }
155
156 static force_inline __m128i
157 pack_565_4x128_128 (__m128i* xmm0, __m128i* xmm1, __m128i* xmm2, __m128i* xmm3)
158 {
159     return _mm_packus_epi16 (pack_565_2x128_128 (*xmm0, *xmm1),
160                              pack_565_2x128_128 (*xmm2, *xmm3));
161 }
162
163 static force_inline int
164 is_opaque (__m128i x)
165 {
166     __m128i ffs = _mm_cmpeq_epi8 (x, x);
167
168     return (_mm_movemask_epi8 (_mm_cmpeq_epi8 (x, ffs)) & 0x8888) == 0x8888;
169 }
170
171 static force_inline int
172 is_zero (__m128i x)
173 {
174     return _mm_movemask_epi8 (
175         _mm_cmpeq_epi8 (x, _mm_setzero_si128 ())) == 0xffff;
176 }
177
178 static force_inline int
179 is_transparent (__m128i x)
180 {
181     return (_mm_movemask_epi8 (
182                 _mm_cmpeq_epi8 (x, _mm_setzero_si128 ())) & 0x8888) == 0x8888;
183 }
184
185 static force_inline __m128i
186 expand_pixel_32_1x128 (uint32_t data)
187 {
188     return _mm_shuffle_epi32 (unpack_32_1x128 (data), _MM_SHUFFLE (1, 0, 1, 0));
189 }
190
191 static force_inline __m128i
192 expand_alpha_1x128 (__m128i data)
193 {
194     return _mm_shufflehi_epi16 (_mm_shufflelo_epi16 (data,
195                                                      _MM_SHUFFLE (3, 3, 3, 3)),
196                                 _MM_SHUFFLE (3, 3, 3, 3));
197 }
198
199 static force_inline void
200 expand_alpha_2x128 (__m128i  data_lo,
201                     __m128i  data_hi,
202                     __m128i* alpha_lo,
203                     __m128i* alpha_hi)
204 {
205     __m128i lo, hi;
206
207     lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (3, 3, 3, 3));
208     hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (3, 3, 3, 3));
209
210     *alpha_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (3, 3, 3, 3));
211     *alpha_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (3, 3, 3, 3));
212 }
213
214 static force_inline void
215 expand_alpha_rev_2x128 (__m128i  data_lo,
216                         __m128i  data_hi,
217                         __m128i* alpha_lo,
218                         __m128i* alpha_hi)
219 {
220     __m128i lo, hi;
221
222     lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (0, 0, 0, 0));
223     hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (0, 0, 0, 0));
224     *alpha_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (0, 0, 0, 0));
225     *alpha_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (0, 0, 0, 0));
226 }
227
228 static force_inline void
229 pix_multiply_2x128 (__m128i* data_lo,
230                     __m128i* data_hi,
231                     __m128i* alpha_lo,
232                     __m128i* alpha_hi,
233                     __m128i* ret_lo,
234                     __m128i* ret_hi)
235 {
236     __m128i lo, hi;
237
238     lo = _mm_mullo_epi16 (*data_lo, *alpha_lo);
239     hi = _mm_mullo_epi16 (*data_hi, *alpha_hi);
240     lo = _mm_adds_epu16 (lo, mask_0080);
241     hi = _mm_adds_epu16 (hi, mask_0080);
242     *ret_lo = _mm_mulhi_epu16 (lo, mask_0101);
243     *ret_hi = _mm_mulhi_epu16 (hi, mask_0101);
244 }
245
246 static force_inline void
247 pix_add_multiply_2x128 (__m128i* src_lo,
248                         __m128i* src_hi,
249                         __m128i* alpha_dst_lo,
250                         __m128i* alpha_dst_hi,
251                         __m128i* dst_lo,
252                         __m128i* dst_hi,
253                         __m128i* alpha_src_lo,
254                         __m128i* alpha_src_hi,
255                         __m128i* ret_lo,
256                         __m128i* ret_hi)
257 {
258     __m128i lo, hi;
259     __m128i mul_lo, mul_hi;
260
261     lo = _mm_mullo_epi16 (*src_lo, *alpha_dst_lo);
262     hi = _mm_mullo_epi16 (*src_hi, *alpha_dst_hi);
263     mul_lo = _mm_mullo_epi16 (*dst_lo, *alpha_src_lo);
264     mul_hi = _mm_mullo_epi16 (*dst_hi, *alpha_src_hi);
265     lo = _mm_adds_epu16 (lo, mask_0080);
266     hi = _mm_adds_epu16 (hi, mask_0080);
267     lo = _mm_adds_epu16 (lo, mul_lo);
268     hi = _mm_adds_epu16 (hi, mul_hi);
269     *ret_lo = _mm_mulhi_epu16 (lo, mask_0101);
270     *ret_hi = _mm_mulhi_epu16 (hi, mask_0101);
271 }
272
273 static force_inline void
274 negate_2x128 (__m128i  data_lo,
275               __m128i  data_hi,
276               __m128i* neg_lo,
277               __m128i* neg_hi)
278 {
279     *neg_lo = _mm_xor_si128 (data_lo, mask_00ff);
280     *neg_hi = _mm_xor_si128 (data_hi, mask_00ff);
281 }
282
283 static force_inline void
284 invert_colors_2x128 (__m128i  data_lo,
285                      __m128i  data_hi,
286                      __m128i* inv_lo,
287                      __m128i* inv_hi)
288 {
289     __m128i lo, hi;
290
291     lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (3, 0, 1, 2));
292     hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (3, 0, 1, 2));
293     *inv_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (3, 0, 1, 2));
294     *inv_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (3, 0, 1, 2));
295 }
296
297 static force_inline void
298 over_2x128 (__m128i* src_lo,
299             __m128i* src_hi,
300             __m128i* alpha_lo,
301             __m128i* alpha_hi,
302             __m128i* dst_lo,
303             __m128i* dst_hi)
304 {
305     __m128i t1, t2;
306
307     negate_2x128 (*alpha_lo, *alpha_hi, &t1, &t2);
308
309     pix_multiply_2x128 (dst_lo, dst_hi, &t1, &t2, dst_lo, dst_hi);
310
311     *dst_lo = _mm_adds_epu8 (*src_lo, *dst_lo);
312     *dst_hi = _mm_adds_epu8 (*src_hi, *dst_hi);
313 }
314
315 static force_inline void
316 over_rev_non_pre_2x128 (__m128i  src_lo,
317                         __m128i  src_hi,
318                         __m128i* dst_lo,
319                         __m128i* dst_hi)
320 {
321     __m128i lo, hi;
322     __m128i alpha_lo, alpha_hi;
323
324     expand_alpha_2x128 (src_lo, src_hi, &alpha_lo, &alpha_hi);
325
326     lo = _mm_or_si128 (alpha_lo, mask_alpha);
327     hi = _mm_or_si128 (alpha_hi, mask_alpha);
328
329     invert_colors_2x128 (src_lo, src_hi, &src_lo, &src_hi);
330
331     pix_multiply_2x128 (&src_lo, &src_hi, &lo, &hi, &lo, &hi);
332
333     over_2x128 (&lo, &hi, &alpha_lo, &alpha_hi, dst_lo, dst_hi);
334 }
335
336 static force_inline void
337 in_over_2x128 (__m128i* src_lo,
338                __m128i* src_hi,
339                __m128i* alpha_lo,
340                __m128i* alpha_hi,
341                __m128i* mask_lo,
342                __m128i* mask_hi,
343                __m128i* dst_lo,
344                __m128i* dst_hi)
345 {
346     __m128i s_lo, s_hi;
347     __m128i a_lo, a_hi;
348
349     pix_multiply_2x128 (src_lo,   src_hi, mask_lo, mask_hi, &s_lo, &s_hi);
350     pix_multiply_2x128 (alpha_lo, alpha_hi, mask_lo, mask_hi, &a_lo, &a_hi);
351
352     over_2x128 (&s_lo, &s_hi, &a_lo, &a_hi, dst_lo, dst_hi);
353 }
354
355 static force_inline void
356 cache_prefetch (__m128i* addr)
357 {
358     _mm_prefetch (addr, _MM_HINT_T0);
359 }
360
361 static force_inline void
362 cache_prefetch_next (__m128i* addr)
363 {
364     _mm_prefetch (addr + 4, _MM_HINT_T0); // 64 bytes ahead
365 }
366
367 /* load 4 pixels from a 16-byte boundary aligned address */
368 static force_inline __m128i
369 load_128_aligned (__m128i* src)
370 {
371     return _mm_load_si128 (src);
372 }
373
374 /* load 4 pixels from a unaligned address */
375 static force_inline __m128i
376 load_128_unaligned (const __m128i* src)
377 {
378     return _mm_loadu_si128 (src);
379 }
380
381 /* save 4 pixels using Write Combining memory on a 16-byte
382  * boundary aligned address
383  */
384 static force_inline void
385 save_128_write_combining (__m128i* dst,
386                           __m128i  data)
387 {
388     _mm_stream_si128 (dst, data);
389 }
390
391 /* save 4 pixels on a 16-byte boundary aligned address */
392 static force_inline void
393 save_128_aligned (__m128i* dst,
394                   __m128i  data)
395 {
396     _mm_store_si128 (dst, data);
397 }
398
399 /* save 4 pixels on a unaligned address */
400 static force_inline void
401 save_128_unaligned (__m128i* dst,
402                     __m128i  data)
403 {
404     _mm_storeu_si128 (dst, data);
405 }
406
407 /* ------------------------------------------------------------------
408  * MMX inlines
409  */
410
411 static force_inline __m64
412 unpack_32_1x64 (uint32_t data)
413 {
414     return _mm_unpacklo_pi8 (_mm_cvtsi32_si64 (data), _mm_setzero_si64 ());
415 }
416
417 static force_inline __m64
418 expand_alpha_1x64 (__m64 data)
419 {
420     return _mm_shuffle_pi16 (data, _MM_SHUFFLE (3, 3, 3, 3));
421 }
422
423 static force_inline __m64
424 expand_alpha_rev_1x64 (__m64 data)
425 {
426     return _mm_shuffle_pi16 (data, _MM_SHUFFLE (0, 0, 0, 0));
427 }
428
429 static force_inline __m64
430 expand_pixel_8_1x64 (uint8_t data)
431 {
432     return _mm_shuffle_pi16 (
433         unpack_32_1x64 ((uint32_t)data), _MM_SHUFFLE (0, 0, 0, 0));
434 }
435
436 static force_inline __m64
437 pix_multiply_1x64 (__m64 data,
438                    __m64 alpha)
439 {
440     return _mm_mulhi_pu16 (_mm_adds_pu16 (_mm_mullo_pi16 (data, alpha),
441                                           mask_x0080),
442                            mask_x0101);
443 }
444
445 static force_inline __m64
446 pix_add_multiply_1x64 (__m64* src,
447                        __m64* alpha_dst,
448                        __m64* dst,
449                        __m64* alpha_src)
450 {
451     return _mm_mulhi_pu16 (
452         _mm_adds_pu16 (_mm_adds_pu16 (_mm_mullo_pi16 (*src, *alpha_dst),
453                                       mask_x0080),
454                        _mm_mullo_pi16 (*dst, *alpha_src)),
455         mask_x0101);
456 }
457
458 static force_inline __m64
459 negate_1x64 (__m64 data)
460 {
461     return _mm_xor_si64 (data, mask_x00ff);
462 }
463
464 static force_inline __m64
465 invert_colors_1x64 (__m64 data)
466 {
467     return _mm_shuffle_pi16 (data, _MM_SHUFFLE (3, 0, 1, 2));
468 }
469
470 static force_inline __m64
471 over_1x64 (__m64 src, __m64 alpha, __m64 dst)
472 {
473     return _mm_adds_pu8 (src, pix_multiply_1x64 (dst, negate_1x64 (alpha)));
474 }
475
476 static force_inline __m64
477 in_over_1x64 (__m64* src, __m64* alpha, __m64* mask, __m64* dst)
478 {
479     return over_1x64 (pix_multiply_1x64 (*src, *mask),
480                       pix_multiply_1x64 (*alpha, *mask),
481                       *dst);
482 }
483
484 static force_inline __m64
485 over_rev_non_pre_1x64 (__m64 src, __m64 dst)
486 {
487     __m64 alpha = expand_alpha_1x64 (src);
488
489     return over_1x64 (pix_multiply_1x64 (invert_colors_1x64 (src),
490                                          _mm_or_si64 (alpha, mask_x_alpha)),
491                       alpha,
492                       dst);
493 }
494
495 static force_inline uint32_t
496 pack_1x64_32 (__m64 data)
497 {
498     return _mm_cvtsi64_si32 (_mm_packs_pu16 (data, _mm_setzero_si64 ()));
499 }
500
501 /* Expand 16 bits positioned at @pos (0-3) of a mmx register into
502  *
503  *    00RR00GG00BB
504  *
505  * --- Expanding 565 in the low word ---
506  *
507  * m = (m << (32 - 3)) | (m << (16 - 5)) | m;
508  * m = m & (01f0003f001f);
509  * m = m * (008404100840);
510  * m = m >> 8;
511  *
512  * Note the trick here - the top word is shifted by another nibble to
513  * avoid it bumping into the middle word
514  */
515 static force_inline __m64
516 expand565_16_1x64 (uint16_t pixel)
517 {
518     __m64 p;
519     __m64 t1, t2;
520
521     p = _mm_cvtsi32_si64 ((uint32_t) pixel);
522
523     t1 = _mm_slli_si64 (p, 36 - 11);
524     t2 = _mm_slli_si64 (p, 16 - 5);
525
526     p = _mm_or_si64 (t1, p);
527     p = _mm_or_si64 (t2, p);
528     p = _mm_and_si64 (p, mask_x565_rgb);
529     p = _mm_mullo_pi16 (p, mask_x565_unpack);
530
531     return _mm_srli_pi16 (p, 8);
532 }
533
534 /* ----------------------------------------------------------------------------
535  * Compose Core transformations
536  */
537 static force_inline uint32_t
538 core_combine_over_u_pixel_sse2 (uint32_t src, uint32_t dst)
539 {
540     uint8_t a;
541     __m64 ms;
542
543     a = src >> 24;
544
545     if (a == 0xff)
546     {
547         return src;
548     }
549     else if (src)
550     {
551         ms = unpack_32_1x64 (src);
552         return pack_1x64_32 (
553             over_1x64 (ms, expand_alpha_1x64 (ms), unpack_32_1x64 (dst)));
554     }
555
556     return dst;
557 }
558
559 static force_inline uint32_t
560 combine1 (const uint32_t *ps, const uint32_t *pm)
561 {
562     uint32_t s = *ps;
563
564     if (pm)
565     {
566         __m64 ms, mm;
567
568         mm = unpack_32_1x64 (*pm);
569         mm = expand_alpha_1x64 (mm);
570
571         ms = unpack_32_1x64 (s);
572         ms = pix_multiply_1x64 (ms, mm);
573
574         s = pack_1x64_32 (ms);
575     }
576
577     return s;
578 }
579
580 static force_inline __m128i
581 combine4 (const __m128i *ps, const __m128i *pm)
582 {
583     __m128i xmm_src_lo, xmm_src_hi;
584     __m128i xmm_msk_lo, xmm_msk_hi;
585     __m128i s;
586
587     if (pm)
588     {
589         xmm_msk_lo = load_128_unaligned (pm);
590
591         if (is_transparent (xmm_msk_lo))
592             return _mm_setzero_si128 ();
593     }
594
595     s = load_128_unaligned (ps);
596
597     if (pm)
598     {
599         unpack_128_2x128 (s, &xmm_src_lo, &xmm_src_hi);
600         unpack_128_2x128 (xmm_msk_lo, &xmm_msk_lo, &xmm_msk_hi);
601
602         expand_alpha_2x128 (xmm_msk_lo, xmm_msk_hi, &xmm_msk_lo, &xmm_msk_hi);
603
604         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
605                             &xmm_msk_lo, &xmm_msk_hi,
606                             &xmm_src_lo, &xmm_src_hi);
607
608         s = pack_2x128_128 (xmm_src_lo, xmm_src_hi);
609     }
610
611     return s;
612 }
613
614 static force_inline void
615 core_combine_over_u_sse2 (uint32_t*       pd,
616                           const uint32_t* ps,
617                           const uint32_t* pm,
618                           int             w)
619 {
620     uint32_t s, d;
621
622     __m128i xmm_dst_lo, xmm_dst_hi;
623     __m128i xmm_src_lo, xmm_src_hi;
624     __m128i xmm_alpha_lo, xmm_alpha_hi;
625
626     /* call prefetch hint to optimize cache load*/
627     cache_prefetch ((__m128i*)ps);
628     cache_prefetch ((__m128i*)pd);
629     cache_prefetch ((__m128i*)pm);
630
631     /* Align dst on a 16-byte boundary */
632     while (w &&
633            ((unsigned long)pd & 15))
634     {
635         d = *pd;
636         s = combine1 (ps, pm);
637
638         *pd++ = core_combine_over_u_pixel_sse2 (s, d);
639         ps++;
640         if (pm)
641             pm++;
642         w--;
643     }
644
645     /* call prefetch hint to optimize cache load*/
646     cache_prefetch ((__m128i*)ps);
647     cache_prefetch ((__m128i*)pd);
648     cache_prefetch ((__m128i*)pm);
649
650     while (w >= 4)
651     {
652         /* fill cache line with next memory */
653         cache_prefetch_next ((__m128i*)ps);
654         cache_prefetch_next ((__m128i*)pd);
655         cache_prefetch_next ((__m128i*)pm);
656
657         /* I'm loading unaligned because I'm not sure about
658          * the address alignment.
659          */
660         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
661
662         if (is_opaque (xmm_src_hi))
663         {
664             save_128_aligned ((__m128i*)pd, xmm_src_hi);
665         }
666         else if (!is_zero (xmm_src_hi))
667         {
668             xmm_dst_hi = load_128_aligned ((__m128i*) pd);
669
670             unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
671             unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
672
673             expand_alpha_2x128 (
674                 xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
675
676             over_2x128 (&xmm_src_lo, &xmm_src_hi,
677                         &xmm_alpha_lo, &xmm_alpha_hi,
678                         &xmm_dst_lo, &xmm_dst_hi);
679
680             /* rebuid the 4 pixel data and save*/
681             save_128_aligned ((__m128i*)pd,
682                               pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
683         }
684
685         w -= 4;
686         ps += 4;
687         pd += 4;
688         if (pm)
689             pm += 4;
690     }
691
692     while (w)
693     {
694         d = *pd;
695         s = combine1 (ps, pm);
696
697         *pd++ = core_combine_over_u_pixel_sse2 (s, d);
698         ps++;
699         if (pm)
700             pm++;
701
702         w--;
703     }
704 }
705
706 static force_inline void
707 core_combine_over_reverse_u_sse2 (uint32_t*       pd,
708                                   const uint32_t* ps,
709                                   const uint32_t* pm,
710                                   int             w)
711 {
712     uint32_t s, d;
713
714     __m128i xmm_dst_lo, xmm_dst_hi;
715     __m128i xmm_src_lo, xmm_src_hi;
716     __m128i xmm_alpha_lo, xmm_alpha_hi;
717
718     /* call prefetch hint to optimize cache load*/
719     cache_prefetch ((__m128i*)ps);
720     cache_prefetch ((__m128i*)pd);
721     cache_prefetch ((__m128i*)pm);
722
723     /* Align dst on a 16-byte boundary */
724     while (w &&
725            ((unsigned long)pd & 15))
726     {
727         d = *pd;
728         s = combine1 (ps, pm);
729
730         *pd++ = core_combine_over_u_pixel_sse2 (d, s);
731         w--;
732         ps++;
733         if (pm)
734             pm++;
735     }
736
737     /* call prefetch hint to optimize cache load*/
738     cache_prefetch ((__m128i*)ps);
739     cache_prefetch ((__m128i*)pd);
740     cache_prefetch ((__m128i*)pm);
741
742     while (w >= 4)
743     {
744         /* fill cache line with next memory */
745         cache_prefetch_next ((__m128i*)ps);
746         cache_prefetch_next ((__m128i*)pd);
747         cache_prefetch_next ((__m128i*)pm);
748
749         /* I'm loading unaligned because I'm not sure
750          * about the address alignment.
751          */
752         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
753         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
754
755         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
756         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
757
758         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
759                             &xmm_alpha_lo, &xmm_alpha_hi);
760
761         over_2x128 (&xmm_dst_lo, &xmm_dst_hi,
762                     &xmm_alpha_lo, &xmm_alpha_hi,
763                     &xmm_src_lo, &xmm_src_hi);
764
765         /* rebuid the 4 pixel data and save*/
766         save_128_aligned ((__m128i*)pd,
767                           pack_2x128_128 (xmm_src_lo, xmm_src_hi));
768
769         w -= 4;
770         ps += 4;
771         pd += 4;
772
773         if (pm)
774             pm += 4;
775     }
776
777     while (w)
778     {
779         d = *pd;
780         s = combine1 (ps, pm);
781
782         *pd++ = core_combine_over_u_pixel_sse2 (d, s);
783         ps++;
784         w--;
785         if (pm)
786             pm++;
787     }
788 }
789
790 static force_inline uint32_t
791 core_combine_in_u_pixelsse2 (uint32_t src, uint32_t dst)
792 {
793     uint32_t maska = src >> 24;
794
795     if (maska == 0)
796     {
797         return 0;
798     }
799     else if (maska != 0xff)
800     {
801         return pack_1x64_32 (
802             pix_multiply_1x64 (unpack_32_1x64 (dst),
803                                expand_alpha_1x64 (unpack_32_1x64 (src))));
804     }
805
806     return dst;
807 }
808
809 static force_inline void
810 core_combine_in_u_sse2 (uint32_t*       pd,
811                         const uint32_t* ps,
812                         const uint32_t* pm,
813                         int             w)
814 {
815     uint32_t s, d;
816
817     __m128i xmm_src_lo, xmm_src_hi;
818     __m128i xmm_dst_lo, xmm_dst_hi;
819
820     /* call prefetch hint to optimize cache load*/
821     cache_prefetch ((__m128i*)ps);
822     cache_prefetch ((__m128i*)pd);
823     cache_prefetch ((__m128i*)pm);
824
825     while (w && ((unsigned long) pd & 15))
826     {
827         s = combine1 (ps, pm);
828         d = *pd;
829
830         *pd++ = core_combine_in_u_pixelsse2 (d, s);
831         w--;
832         ps++;
833         if (pm)
834             pm++;
835     }
836
837     /* call prefetch hint to optimize cache load*/
838     cache_prefetch ((__m128i*)ps);
839     cache_prefetch ((__m128i*)pd);
840     cache_prefetch ((__m128i*)pm);
841
842     while (w >= 4)
843     {
844         /* fill cache line with next memory */
845         cache_prefetch_next ((__m128i*)ps);
846         cache_prefetch_next ((__m128i*)pd);
847         cache_prefetch_next ((__m128i*)pm);
848
849         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
850         xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*) pm);
851
852         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
853         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
854
855         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
856         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
857                             &xmm_dst_lo, &xmm_dst_hi,
858                             &xmm_dst_lo, &xmm_dst_hi);
859
860         save_128_aligned ((__m128i*)pd,
861                           pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
862
863         ps += 4;
864         pd += 4;
865         w -= 4;
866         if (pm)
867             pm += 4;
868     }
869
870     while (w)
871     {
872         s = combine1 (ps, pm);
873         d = *pd;
874
875         *pd++ = core_combine_in_u_pixelsse2 (d, s);
876         w--;
877         ps++;
878         if (pm)
879             pm++;
880     }
881 }
882
883 static force_inline void
884 core_combine_reverse_in_u_sse2 (uint32_t*       pd,
885                                 const uint32_t* ps,
886                                 const uint32_t *pm,
887                                 int             w)
888 {
889     uint32_t s, d;
890
891     __m128i xmm_src_lo, xmm_src_hi;
892     __m128i xmm_dst_lo, xmm_dst_hi;
893
894     /* call prefetch hint to optimize cache load*/
895     cache_prefetch ((__m128i*)ps);
896     cache_prefetch ((__m128i*)pd);
897     cache_prefetch ((__m128i*)pm);
898
899     while (w && ((unsigned long) pd & 15))
900     {
901         s = combine1 (ps, pm);
902         d = *pd;
903
904         *pd++ = core_combine_in_u_pixelsse2 (s, d);
905         ps++;
906         w--;
907         if (pm)
908             pm++;
909     }
910
911     /* call prefetch hint to optimize cache load*/
912     cache_prefetch ((__m128i*)ps);
913     cache_prefetch ((__m128i*)pd);
914     cache_prefetch ((__m128i*)pm);
915
916     while (w >= 4)
917     {
918         /* fill cache line with next memory */
919         cache_prefetch_next ((__m128i*)ps);
920         cache_prefetch_next ((__m128i*)pd);
921         cache_prefetch_next ((__m128i*)pm);
922
923         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
924         xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm);
925
926         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
927         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
928
929         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
930         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
931                             &xmm_src_lo, &xmm_src_hi,
932                             &xmm_dst_lo, &xmm_dst_hi);
933
934         save_128_aligned (
935             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
936
937         ps += 4;
938         pd += 4;
939         w -= 4;
940         if (pm)
941             pm += 4;
942     }
943
944     while (w)
945     {
946         s = combine1 (ps, pm);
947         d = *pd;
948
949         *pd++ = core_combine_in_u_pixelsse2 (s, d);
950         w--;
951         ps++;
952         if (pm)
953             pm++;
954     }
955 }
956
957 static force_inline void
958 core_combine_reverse_out_u_sse2 (uint32_t*       pd,
959                                  const uint32_t* ps,
960                                  const uint32_t* pm,
961                                  int             w)
962 {
963     /* call prefetch hint to optimize cache load*/
964     cache_prefetch ((__m128i*)ps);
965     cache_prefetch ((__m128i*)pd);
966     cache_prefetch ((__m128i*)pm);
967
968     while (w && ((unsigned long) pd & 15))
969     {
970         uint32_t s = combine1 (ps, pm);
971         uint32_t d = *pd;
972
973         *pd++ = pack_1x64_32 (
974             pix_multiply_1x64 (
975                 unpack_32_1x64 (d), negate_1x64 (
976                     expand_alpha_1x64 (unpack_32_1x64 (s)))));
977         
978         if (pm)
979             pm++;
980         ps++;
981         w--;
982     }
983
984     /* call prefetch hint to optimize cache load*/
985     cache_prefetch ((__m128i*)ps);
986     cache_prefetch ((__m128i*)pd);
987     cache_prefetch ((__m128i*)pm);
988
989     while (w >= 4)
990     {
991         __m128i xmm_src_lo, xmm_src_hi;
992         __m128i xmm_dst_lo, xmm_dst_hi;
993
994         /* fill cache line with next memory */
995         cache_prefetch_next ((__m128i*)ps);
996         cache_prefetch_next ((__m128i*)pd);
997         cache_prefetch_next ((__m128i*)pm);
998
999         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
1000         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
1001
1002         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1003         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1004
1005         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1006         negate_2x128       (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1007
1008         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
1009                             &xmm_src_lo, &xmm_src_hi,
1010                             &xmm_dst_lo, &xmm_dst_hi);
1011
1012         save_128_aligned (
1013             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1014
1015         ps += 4;
1016         pd += 4;
1017         if (pm)
1018             pm += 4;
1019
1020         w -= 4;
1021     }
1022
1023     while (w)
1024     {
1025         uint32_t s = combine1 (ps, pm);
1026         uint32_t d = *pd;
1027
1028         *pd++ = pack_1x64_32 (
1029             pix_multiply_1x64 (
1030                 unpack_32_1x64 (d), negate_1x64 (
1031                     expand_alpha_1x64 (unpack_32_1x64 (s)))));
1032         ps++;
1033         if (pm)
1034             pm++;
1035         w--;
1036     }
1037 }
1038
1039 static force_inline void
1040 core_combine_out_u_sse2 (uint32_t*       pd,
1041                          const uint32_t* ps,
1042                          const uint32_t* pm,
1043                          int             w)
1044 {
1045     /* call prefetch hint to optimize cache load*/
1046     cache_prefetch ((__m128i*)ps);
1047     cache_prefetch ((__m128i*)pd);
1048     cache_prefetch ((__m128i*)pm);
1049
1050     while (w && ((unsigned long) pd & 15))
1051     {
1052         uint32_t s = combine1 (ps, pm);
1053         uint32_t d = *pd;
1054
1055         *pd++ = pack_1x64_32 (
1056             pix_multiply_1x64 (
1057                 unpack_32_1x64 (s), negate_1x64 (
1058                     expand_alpha_1x64 (unpack_32_1x64 (d)))));
1059         w--;
1060         ps++;
1061         if (pm)
1062             pm++;
1063     }
1064
1065     /* call prefetch hint to optimize cache load*/
1066     cache_prefetch ((__m128i*)ps);
1067     cache_prefetch ((__m128i*)pd);
1068     cache_prefetch ((__m128i*)pm);
1069
1070     while (w >= 4)
1071     {
1072         __m128i xmm_src_lo, xmm_src_hi;
1073         __m128i xmm_dst_lo, xmm_dst_hi;
1074
1075         /* fill cache line with next memory */
1076         cache_prefetch_next ((__m128i*)ps);
1077         cache_prefetch_next ((__m128i*)pd);
1078         cache_prefetch_next ((__m128i*)pm);
1079
1080         xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm);
1081         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
1082
1083         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1084         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1085
1086         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1087         negate_2x128       (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1088
1089         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
1090                             &xmm_dst_lo, &xmm_dst_hi,
1091                             &xmm_dst_lo, &xmm_dst_hi);
1092
1093         save_128_aligned (
1094             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1095
1096         ps += 4;
1097         pd += 4;
1098         w -= 4;
1099         if (pm)
1100             pm += 4;
1101     }
1102
1103     while (w)
1104     {
1105         uint32_t s = combine1 (ps, pm);
1106         uint32_t d = *pd;
1107
1108         *pd++ = pack_1x64_32 (
1109             pix_multiply_1x64 (
1110                 unpack_32_1x64 (s), negate_1x64 (
1111                     expand_alpha_1x64 (unpack_32_1x64 (d)))));
1112         w--;
1113         ps++;
1114         if (pm)
1115             pm++;
1116     }
1117 }
1118
1119 static force_inline uint32_t
1120 core_combine_atop_u_pixel_sse2 (uint32_t src,
1121                                 uint32_t dst)
1122 {
1123     __m64 s = unpack_32_1x64 (src);
1124     __m64 d = unpack_32_1x64 (dst);
1125
1126     __m64 sa = negate_1x64 (expand_alpha_1x64 (s));
1127     __m64 da = expand_alpha_1x64 (d);
1128
1129     return pack_1x64_32 (pix_add_multiply_1x64 (&s, &da, &d, &sa));
1130 }
1131
1132 static force_inline void
1133 core_combine_atop_u_sse2 (uint32_t*       pd,
1134                           const uint32_t* ps,
1135                           const uint32_t* pm,
1136                           int             w)
1137 {
1138     uint32_t s, d;
1139
1140     __m128i xmm_src_lo, xmm_src_hi;
1141     __m128i xmm_dst_lo, xmm_dst_hi;
1142     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
1143     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
1144
1145     /* call prefetch hint to optimize cache load*/
1146     cache_prefetch ((__m128i*)ps);
1147     cache_prefetch ((__m128i*)pd);
1148     cache_prefetch ((__m128i*)pm);
1149
1150     while (w && ((unsigned long) pd & 15))
1151     {
1152         s = combine1 (ps, pm);
1153         d = *pd;
1154
1155         *pd++ = core_combine_atop_u_pixel_sse2 (s, d);
1156         w--;
1157         ps++;
1158         if (pm)
1159             pm++;
1160     }
1161
1162     /* call prefetch hint to optimize cache load*/
1163     cache_prefetch ((__m128i*)ps);
1164     cache_prefetch ((__m128i*)pd);
1165     cache_prefetch ((__m128i*)pm);
1166
1167     while (w >= 4)
1168     {
1169         /* fill cache line with next memory */
1170         cache_prefetch_next ((__m128i*)ps);
1171         cache_prefetch_next ((__m128i*)pd);
1172         cache_prefetch_next ((__m128i*)pm);
1173
1174         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
1175         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
1176
1177         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1178         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1179
1180         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
1181                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1182         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
1183                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1184
1185         negate_2x128 (xmm_alpha_src_lo, xmm_alpha_src_hi,
1186                       &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1187
1188         pix_add_multiply_2x128 (
1189             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
1190             &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
1191             &xmm_dst_lo, &xmm_dst_hi);
1192
1193         save_128_aligned (
1194             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1195
1196         ps += 4;
1197         pd += 4;
1198         w -= 4;
1199         if (pm)
1200             pm += 4;
1201     }
1202
1203     while (w)
1204     {
1205         s = combine1 (ps, pm);
1206         d = *pd;
1207
1208         *pd++ = core_combine_atop_u_pixel_sse2 (s, d);
1209         w--;
1210         ps++;
1211         if (pm)
1212             pm++;
1213     }
1214 }
1215
1216 static force_inline uint32_t
1217 core_combine_reverse_atop_u_pixel_sse2 (uint32_t src,
1218                                         uint32_t dst)
1219 {
1220     __m64 s = unpack_32_1x64 (src);
1221     __m64 d = unpack_32_1x64 (dst);
1222
1223     __m64 sa = expand_alpha_1x64 (s);
1224     __m64 da = negate_1x64 (expand_alpha_1x64 (d));
1225
1226     return pack_1x64_32 (pix_add_multiply_1x64 (&s, &da, &d, &sa));
1227 }
1228
1229 static force_inline void
1230 core_combine_reverse_atop_u_sse2 (uint32_t*       pd,
1231                                   const uint32_t* ps,
1232                                   const uint32_t* pm,
1233                                   int             w)
1234 {
1235     uint32_t s, d;
1236
1237     __m128i xmm_src_lo, xmm_src_hi;
1238     __m128i xmm_dst_lo, xmm_dst_hi;
1239     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
1240     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
1241
1242     /* call prefetch hint to optimize cache load*/
1243     cache_prefetch ((__m128i*)ps);
1244     cache_prefetch ((__m128i*)pd);
1245     cache_prefetch ((__m128i*)pm);
1246
1247     while (w && ((unsigned long) pd & 15))
1248     {
1249         s = combine1 (ps, pm);
1250         d = *pd;
1251
1252         *pd++ = core_combine_reverse_atop_u_pixel_sse2 (s, d);
1253         ps++;
1254         w--;
1255         if (pm)
1256             pm++;
1257     }
1258
1259     /* call prefetch hint to optimize cache load*/
1260     cache_prefetch ((__m128i*)ps);
1261     cache_prefetch ((__m128i*)pd);
1262     cache_prefetch ((__m128i*)pm);
1263
1264     while (w >= 4)
1265     {
1266         /* fill cache line with next memory */
1267         cache_prefetch_next ((__m128i*)ps);
1268         cache_prefetch_next ((__m128i*)pd);
1269         cache_prefetch_next ((__m128i*)pm);
1270
1271         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
1272         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
1273
1274         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1275         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1276
1277         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
1278                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1279         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
1280                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1281
1282         negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi,
1283                       &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1284
1285         pix_add_multiply_2x128 (
1286             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
1287             &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
1288             &xmm_dst_lo, &xmm_dst_hi);
1289
1290         save_128_aligned (
1291             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1292
1293         ps += 4;
1294         pd += 4;
1295         w -= 4;
1296         if (pm)
1297             pm += 4;
1298     }
1299
1300     while (w)
1301     {
1302         s = combine1 (ps, pm);
1303         d = *pd;
1304
1305         *pd++ = core_combine_reverse_atop_u_pixel_sse2 (s, d);
1306         ps++;
1307         w--;
1308         if (pm)
1309             pm++;
1310     }
1311 }
1312
1313 static force_inline uint32_t
1314 core_combine_xor_u_pixel_sse2 (uint32_t src,
1315                                uint32_t dst)
1316 {
1317     __m64 s = unpack_32_1x64 (src);
1318     __m64 d = unpack_32_1x64 (dst);
1319
1320     __m64 neg_d = negate_1x64 (expand_alpha_1x64 (d));
1321     __m64 neg_s = negate_1x64 (expand_alpha_1x64 (s));
1322
1323     return pack_1x64_32 (pix_add_multiply_1x64 (&s, &neg_d, &d, &neg_s));
1324 }
1325
1326 static force_inline void
1327 core_combine_xor_u_sse2 (uint32_t*       dst,
1328                          const uint32_t* src,
1329                          const uint32_t *mask,
1330                          int             width)
1331 {
1332     int w = width;
1333     uint32_t s, d;
1334     uint32_t* pd = dst;
1335     const uint32_t* ps = src;
1336     const uint32_t* pm = mask;
1337
1338     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
1339     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
1340     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
1341     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
1342
1343     /* call prefetch hint to optimize cache load*/
1344     cache_prefetch ((__m128i*)ps);
1345     cache_prefetch ((__m128i*)pd);
1346     cache_prefetch ((__m128i*)pm);
1347
1348     while (w && ((unsigned long) pd & 15))
1349     {
1350         s = combine1 (ps, pm);
1351         d = *pd;
1352
1353         *pd++ = core_combine_xor_u_pixel_sse2 (s, d);
1354         w--;
1355         ps++;
1356         if (pm)
1357             pm++;
1358     }
1359
1360     /* call prefetch hint to optimize cache load*/
1361     cache_prefetch ((__m128i*)ps);
1362     cache_prefetch ((__m128i*)pd);
1363     cache_prefetch ((__m128i*)pm);
1364
1365     while (w >= 4)
1366     {
1367         /* fill cache line with next memory */
1368         cache_prefetch_next ((__m128i*)ps);
1369         cache_prefetch_next ((__m128i*)pd);
1370         cache_prefetch_next ((__m128i*)pm);
1371
1372         xmm_src = combine4 ((__m128i*) ps, (__m128i*) pm);
1373         xmm_dst = load_128_aligned ((__m128i*) pd);
1374
1375         unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
1376         unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
1377
1378         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
1379                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1380         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
1381                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1382
1383         negate_2x128 (xmm_alpha_src_lo, xmm_alpha_src_hi,
1384                       &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1385         negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi,
1386                       &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1387
1388         pix_add_multiply_2x128 (
1389             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
1390             &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
1391             &xmm_dst_lo, &xmm_dst_hi);
1392
1393         save_128_aligned (
1394             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1395
1396         ps += 4;
1397         pd += 4;
1398         w -= 4;
1399         if (pm)
1400             pm += 4;
1401     }
1402
1403     while (w)
1404     {
1405         s = combine1 (ps, pm);
1406         d = *pd;
1407
1408         *pd++ = core_combine_xor_u_pixel_sse2 (s, d);
1409         w--;
1410         ps++;
1411         if (pm)
1412             pm++;
1413     }
1414 }
1415
1416 static force_inline void
1417 core_combine_add_u_sse2 (uint32_t*       dst,
1418                          const uint32_t* src,
1419                          const uint32_t* mask,
1420                          int             width)
1421 {
1422     int w = width;
1423     uint32_t s, d;
1424     uint32_t* pd = dst;
1425     const uint32_t* ps = src;
1426     const uint32_t* pm = mask;
1427
1428     /* call prefetch hint to optimize cache load*/
1429     cache_prefetch ((__m128i*)ps);
1430     cache_prefetch ((__m128i*)pd);
1431     cache_prefetch ((__m128i*)pm);
1432
1433     while (w && (unsigned long)pd & 15)
1434     {
1435         s = combine1 (ps, pm);
1436         d = *pd;
1437
1438         ps++;
1439         if (pm)
1440             pm++;
1441         *pd++ = _mm_cvtsi64_si32 (
1442             _mm_adds_pu8 (_mm_cvtsi32_si64 (s), _mm_cvtsi32_si64 (d)));
1443         w--;
1444     }
1445
1446     /* call prefetch hint to optimize cache load*/
1447     cache_prefetch ((__m128i*)ps);
1448     cache_prefetch ((__m128i*)pd);
1449     cache_prefetch ((__m128i*)pm);
1450
1451     while (w >= 4)
1452     {
1453         __m128i s;
1454
1455         /* fill cache line with next memory */
1456         cache_prefetch_next ((__m128i*)ps);
1457         cache_prefetch_next ((__m128i*)pd);
1458         cache_prefetch_next ((__m128i*)pm);
1459
1460         s = combine4 ((__m128i*)ps, (__m128i*)pm);
1461
1462         save_128_aligned (
1463             (__m128i*)pd, _mm_adds_epu8 (s, load_128_aligned  ((__m128i*)pd)));
1464
1465         pd += 4;
1466         ps += 4;
1467         if (pm)
1468             pm += 4;
1469         w -= 4;
1470     }
1471
1472     while (w--)
1473     {
1474         s = combine1 (ps, pm);
1475         d = *pd;
1476
1477         ps++;
1478         *pd++ = _mm_cvtsi64_si32 (
1479             _mm_adds_pu8 (_mm_cvtsi32_si64 (s), _mm_cvtsi32_si64 (d)));
1480         if (pm)
1481             pm++;
1482     }
1483 }
1484
1485 static force_inline uint32_t
1486 core_combine_saturate_u_pixel_sse2 (uint32_t src,
1487                                     uint32_t dst)
1488 {
1489     __m64 ms = unpack_32_1x64 (src);
1490     __m64 md = unpack_32_1x64 (dst);
1491     uint32_t sa = src >> 24;
1492     uint32_t da = ~dst >> 24;
1493
1494     if (sa > da)
1495     {
1496         ms = pix_multiply_1x64 (
1497             ms, expand_alpha_1x64 (unpack_32_1x64 (DIV_UN8 (da, sa) << 24)));
1498     }
1499
1500     return pack_1x64_32 (_mm_adds_pu16 (md, ms));
1501 }
1502
1503 static force_inline void
1504 core_combine_saturate_u_sse2 (uint32_t *      pd,
1505                               const uint32_t *ps,
1506                               const uint32_t *pm,
1507                               int             w)
1508 {
1509     uint32_t s, d;
1510
1511     uint32_t pack_cmp;
1512     __m128i xmm_src, xmm_dst;
1513
1514     /* call prefetch hint to optimize cache load*/
1515     cache_prefetch ((__m128i*)ps);
1516     cache_prefetch ((__m128i*)pd);
1517     cache_prefetch ((__m128i*)pm);
1518
1519     while (w && (unsigned long)pd & 15)
1520     {
1521         s = combine1 (ps, pm);
1522         d = *pd;
1523
1524         *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1525         w--;
1526         ps++;
1527         if (pm)
1528             pm++;
1529     }
1530
1531     /* call prefetch hint to optimize cache load*/
1532     cache_prefetch ((__m128i*)ps);
1533     cache_prefetch ((__m128i*)pd);
1534     cache_prefetch ((__m128i*)pm);
1535
1536     while (w >= 4)
1537     {
1538         /* fill cache line with next memory */
1539         cache_prefetch_next ((__m128i*)ps);
1540         cache_prefetch_next ((__m128i*)pd);
1541         cache_prefetch_next ((__m128i*)pm);
1542
1543         xmm_dst = load_128_aligned  ((__m128i*)pd);
1544         xmm_src = combine4 ((__m128i*)ps, (__m128i*)pm);
1545
1546         pack_cmp = _mm_movemask_epi8 (
1547             _mm_cmpgt_epi32 (
1548                 _mm_srli_epi32 (xmm_src, 24),
1549                 _mm_srli_epi32 (_mm_xor_si128 (xmm_dst, mask_ff000000), 24)));
1550
1551         /* if some alpha src is grater than respective ~alpha dst */
1552         if (pack_cmp)
1553         {
1554             s = combine1 (ps++, pm);
1555             d = *pd;
1556             *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1557             if (pm)
1558                 pm++;
1559
1560             s = combine1 (ps++, pm);
1561             d = *pd;
1562             *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1563             if (pm)
1564                 pm++;
1565
1566             s = combine1 (ps++, pm);
1567             d = *pd;
1568             *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1569             if (pm)
1570                 pm++;
1571
1572             s = combine1 (ps++, pm);
1573             d = *pd;
1574             *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1575             if (pm)
1576                 pm++;
1577         }
1578         else
1579         {
1580             save_128_aligned ((__m128i*)pd, _mm_adds_epu8 (xmm_dst, xmm_src));
1581
1582             pd += 4;
1583             ps += 4;
1584             if (pm)
1585                 pm += 4;
1586         }
1587
1588         w -= 4;
1589     }
1590
1591     while (w--)
1592     {
1593         s = combine1 (ps, pm);
1594         d = *pd;
1595
1596         *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1597         ps++;
1598         if (pm)
1599             pm++;
1600     }
1601 }
1602
1603 static force_inline void
1604 core_combine_src_ca_sse2 (uint32_t*       pd,
1605                           const uint32_t* ps,
1606                           const uint32_t *pm,
1607                           int             w)
1608 {
1609     uint32_t s, m;
1610
1611     __m128i xmm_src_lo, xmm_src_hi;
1612     __m128i xmm_mask_lo, xmm_mask_hi;
1613     __m128i xmm_dst_lo, xmm_dst_hi;
1614
1615     /* call prefetch hint to optimize cache load*/
1616     cache_prefetch ((__m128i*)ps);
1617     cache_prefetch ((__m128i*)pd);
1618     cache_prefetch ((__m128i*)pm);
1619
1620     while (w && (unsigned long)pd & 15)
1621     {
1622         s = *ps++;
1623         m = *pm++;
1624         *pd++ = pack_1x64_32 (
1625             pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)));
1626         w--;
1627     }
1628
1629     /* call prefetch hint to optimize cache load*/
1630     cache_prefetch ((__m128i*)ps);
1631     cache_prefetch ((__m128i*)pd);
1632     cache_prefetch ((__m128i*)pm);
1633
1634     while (w >= 4)
1635     {
1636         /* fill cache line with next memory */
1637         cache_prefetch_next ((__m128i*)ps);
1638         cache_prefetch_next ((__m128i*)pd);
1639         cache_prefetch_next ((__m128i*)pm);
1640
1641         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1642         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1643
1644         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1645         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1646
1647         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
1648                             &xmm_mask_lo, &xmm_mask_hi,
1649                             &xmm_dst_lo, &xmm_dst_hi);
1650
1651         save_128_aligned (
1652             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1653
1654         ps += 4;
1655         pd += 4;
1656         pm += 4;
1657         w -= 4;
1658     }
1659
1660     while (w)
1661     {
1662         s = *ps++;
1663         m = *pm++;
1664         *pd++ = pack_1x64_32 (
1665             pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)));
1666         w--;
1667     }
1668 }
1669
1670 static force_inline uint32_t
1671 core_combine_over_ca_pixel_sse2 (uint32_t src,
1672                                  uint32_t mask,
1673                                  uint32_t dst)
1674 {
1675     __m64 s = unpack_32_1x64 (src);
1676     __m64 expAlpha = expand_alpha_1x64 (s);
1677     __m64 unpk_mask = unpack_32_1x64 (mask);
1678     __m64 unpk_dst  = unpack_32_1x64 (dst);
1679
1680     return pack_1x64_32 (in_over_1x64 (&s, &expAlpha, &unpk_mask, &unpk_dst));
1681 }
1682
1683 static force_inline void
1684 core_combine_over_ca_sse2 (uint32_t*       pd,
1685                            const uint32_t* ps,
1686                            const uint32_t *pm,
1687                            int             w)
1688 {
1689     uint32_t s, m, d;
1690
1691     __m128i xmm_alpha_lo, xmm_alpha_hi;
1692     __m128i xmm_src_lo, xmm_src_hi;
1693     __m128i xmm_dst_lo, xmm_dst_hi;
1694     __m128i xmm_mask_lo, xmm_mask_hi;
1695
1696     /* call prefetch hint to optimize cache load*/
1697     cache_prefetch ((__m128i*)ps);
1698     cache_prefetch ((__m128i*)pd);
1699     cache_prefetch ((__m128i*)pm);
1700
1701     while (w && (unsigned long)pd & 15)
1702     {
1703         s = *ps++;
1704         m = *pm++;
1705         d = *pd;
1706
1707         *pd++ = core_combine_over_ca_pixel_sse2 (s, m, d);
1708         w--;
1709     }
1710
1711     /* call prefetch hint to optimize cache load*/
1712     cache_prefetch ((__m128i*)ps);
1713     cache_prefetch ((__m128i*)pd);
1714     cache_prefetch ((__m128i*)pm);
1715
1716     while (w >= 4)
1717     {
1718         /* fill cache line with next memory */
1719         cache_prefetch_next ((__m128i*)ps);
1720         cache_prefetch_next ((__m128i*)pd);
1721         cache_prefetch_next ((__m128i*)pm);
1722
1723         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1724         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1725         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1726
1727         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1728         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1729         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1730
1731         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
1732                             &xmm_alpha_lo, &xmm_alpha_hi);
1733
1734         in_over_2x128 (&xmm_src_lo, &xmm_src_hi,
1735                        &xmm_alpha_lo, &xmm_alpha_hi,
1736                        &xmm_mask_lo, &xmm_mask_hi,
1737                        &xmm_dst_lo, &xmm_dst_hi);
1738
1739         save_128_aligned (
1740             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1741
1742         ps += 4;
1743         pd += 4;
1744         pm += 4;
1745         w -= 4;
1746     }
1747
1748     while (w)
1749     {
1750         s = *ps++;
1751         m = *pm++;
1752         d = *pd;
1753
1754         *pd++ = core_combine_over_ca_pixel_sse2 (s, m, d);
1755         w--;
1756     }
1757 }
1758
1759 static force_inline uint32_t
1760 core_combine_over_reverse_ca_pixel_sse2 (uint32_t src,
1761                                          uint32_t mask,
1762                                          uint32_t dst)
1763 {
1764     __m64 d = unpack_32_1x64 (dst);
1765
1766     return pack_1x64_32 (
1767         over_1x64 (d, expand_alpha_1x64 (d),
1768                    pix_multiply_1x64 (unpack_32_1x64 (src),
1769                                       unpack_32_1x64 (mask))));
1770 }
1771
1772 static force_inline void
1773 core_combine_over_reverse_ca_sse2 (uint32_t*       pd,
1774                                    const uint32_t* ps,
1775                                    const uint32_t *pm,
1776                                    int             w)
1777 {
1778     uint32_t s, m, d;
1779
1780     __m128i xmm_alpha_lo, xmm_alpha_hi;
1781     __m128i xmm_src_lo, xmm_src_hi;
1782     __m128i xmm_dst_lo, xmm_dst_hi;
1783     __m128i xmm_mask_lo, xmm_mask_hi;
1784
1785     /* call prefetch hint to optimize cache load*/
1786     cache_prefetch ((__m128i*)ps);
1787     cache_prefetch ((__m128i*)pd);
1788     cache_prefetch ((__m128i*)pm);
1789
1790     while (w && (unsigned long)pd & 15)
1791     {
1792         s = *ps++;
1793         m = *pm++;
1794         d = *pd;
1795
1796         *pd++ = core_combine_over_reverse_ca_pixel_sse2 (s, m, d);
1797         w--;
1798     }
1799
1800     /* call prefetch hint to optimize cache load*/
1801     cache_prefetch ((__m128i*)ps);
1802     cache_prefetch ((__m128i*)pd);
1803     cache_prefetch ((__m128i*)pm);
1804
1805     while (w >= 4)
1806     {
1807         /* fill cache line with next memory */
1808         cache_prefetch_next ((__m128i*)ps);
1809         cache_prefetch_next ((__m128i*)pd);
1810         cache_prefetch_next ((__m128i*)pm);
1811
1812         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1813         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1814         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1815
1816         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1817         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1818         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1819
1820         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
1821                             &xmm_alpha_lo, &xmm_alpha_hi);
1822         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
1823                             &xmm_mask_lo, &xmm_mask_hi,
1824                             &xmm_mask_lo, &xmm_mask_hi);
1825
1826         over_2x128 (&xmm_dst_lo, &xmm_dst_hi,
1827                     &xmm_alpha_lo, &xmm_alpha_hi,
1828                     &xmm_mask_lo, &xmm_mask_hi);
1829
1830         save_128_aligned (
1831             (__m128i*)pd, pack_2x128_128 (xmm_mask_lo, xmm_mask_hi));
1832
1833         ps += 4;
1834         pd += 4;
1835         pm += 4;
1836         w -= 4;
1837     }
1838
1839     while (w)
1840     {
1841         s = *ps++;
1842         m = *pm++;
1843         d = *pd;
1844
1845         *pd++ = core_combine_over_reverse_ca_pixel_sse2 (s, m, d);
1846         w--;
1847     }
1848 }
1849
1850 static force_inline void
1851 core_combine_in_ca_sse2 (uint32_t *      pd,
1852                          const uint32_t *ps,
1853                          const uint32_t *pm,
1854                          int             w)
1855 {
1856     uint32_t s, m, d;
1857
1858     __m128i xmm_alpha_lo, xmm_alpha_hi;
1859     __m128i xmm_src_lo, xmm_src_hi;
1860     __m128i xmm_dst_lo, xmm_dst_hi;
1861     __m128i xmm_mask_lo, xmm_mask_hi;
1862
1863     /* call prefetch hint to optimize cache load*/
1864     cache_prefetch ((__m128i*)ps);
1865     cache_prefetch ((__m128i*)pd);
1866     cache_prefetch ((__m128i*)pm);
1867
1868     while (w && (unsigned long)pd & 15)
1869     {
1870         s = *ps++;
1871         m = *pm++;
1872         d = *pd;
1873
1874         *pd++ = pack_1x64_32 (
1875             pix_multiply_1x64 (
1876                 pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)),
1877                 expand_alpha_1x64 (unpack_32_1x64 (d))));
1878
1879         w--;
1880     }
1881
1882     /* call prefetch hint to optimize cache load*/
1883     cache_prefetch ((__m128i*)ps);
1884     cache_prefetch ((__m128i*)pd);
1885     cache_prefetch ((__m128i*)pm);
1886
1887     while (w >= 4)
1888     {
1889         /* fill cache line with next memory */
1890         cache_prefetch_next ((__m128i*)ps);
1891         cache_prefetch_next ((__m128i*)pd);
1892         cache_prefetch_next ((__m128i*)pm);
1893
1894         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1895         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1896         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1897
1898         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1899         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1900         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1901
1902         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
1903                             &xmm_alpha_lo, &xmm_alpha_hi);
1904
1905         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
1906                             &xmm_mask_lo, &xmm_mask_hi,
1907                             &xmm_dst_lo, &xmm_dst_hi);
1908
1909         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
1910                             &xmm_alpha_lo, &xmm_alpha_hi,
1911                             &xmm_dst_lo, &xmm_dst_hi);
1912
1913         save_128_aligned (
1914             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1915
1916         ps += 4;
1917         pd += 4;
1918         pm += 4;
1919         w -= 4;
1920     }
1921
1922     while (w)
1923     {
1924         s = *ps++;
1925         m = *pm++;
1926         d = *pd;
1927
1928         *pd++ = pack_1x64_32 (
1929             pix_multiply_1x64 (
1930                 pix_multiply_1x64 (
1931                     unpack_32_1x64 (s), unpack_32_1x64 (m)),
1932                 expand_alpha_1x64 (unpack_32_1x64 (d))));
1933
1934         w--;
1935     }
1936 }
1937
1938 static force_inline void
1939 core_combine_in_reverse_ca_sse2 (uint32_t *      pd,
1940                                  const uint32_t *ps,
1941                                  const uint32_t *pm,
1942                                  int             w)
1943 {
1944     uint32_t s, m, d;
1945
1946     __m128i xmm_alpha_lo, xmm_alpha_hi;
1947     __m128i xmm_src_lo, xmm_src_hi;
1948     __m128i xmm_dst_lo, xmm_dst_hi;
1949     __m128i xmm_mask_lo, xmm_mask_hi;
1950
1951     /* call prefetch hint to optimize cache load*/
1952     cache_prefetch ((__m128i*)ps);
1953     cache_prefetch ((__m128i*)pd);
1954     cache_prefetch ((__m128i*)pm);
1955
1956     while (w && (unsigned long)pd & 15)
1957     {
1958         s = *ps++;
1959         m = *pm++;
1960         d = *pd;
1961
1962         *pd++ = pack_1x64_32 (
1963             pix_multiply_1x64 (
1964                 unpack_32_1x64 (d),
1965                 pix_multiply_1x64 (unpack_32_1x64 (m),
1966                                    expand_alpha_1x64 (unpack_32_1x64 (s)))));
1967         w--;
1968     }
1969
1970     /* call prefetch hint to optimize cache load*/
1971     cache_prefetch ((__m128i*)ps);
1972     cache_prefetch ((__m128i*)pd);
1973     cache_prefetch ((__m128i*)pm);
1974
1975     while (w >= 4)
1976     {
1977         /* fill cache line with next memory */
1978         cache_prefetch_next ((__m128i*)ps);
1979         cache_prefetch_next ((__m128i*)pd);
1980         cache_prefetch_next ((__m128i*)pm);
1981
1982         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1983         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1984         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1985
1986         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1987         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1988         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1989
1990         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
1991                             &xmm_alpha_lo, &xmm_alpha_hi);
1992         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
1993                             &xmm_alpha_lo, &xmm_alpha_hi,
1994                             &xmm_alpha_lo, &xmm_alpha_hi);
1995
1996         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
1997                             &xmm_alpha_lo, &xmm_alpha_hi,
1998                             &xmm_dst_lo, &xmm_dst_hi);
1999
2000         save_128_aligned (
2001             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2002
2003         ps += 4;
2004         pd += 4;
2005         pm += 4;
2006         w -= 4;
2007     }
2008
2009     while (w)
2010     {
2011         s = *ps++;
2012         m = *pm++;
2013         d = *pd;
2014
2015         *pd++ = pack_1x64_32 (
2016             pix_multiply_1x64 (
2017                 unpack_32_1x64 (d),
2018                 pix_multiply_1x64 (unpack_32_1x64 (m),
2019                                    expand_alpha_1x64 (unpack_32_1x64 (s)))));
2020         w--;
2021     }
2022 }
2023
2024 static force_inline void
2025 core_combine_out_ca_sse2 (uint32_t *      pd,
2026                           const uint32_t *ps,
2027                           const uint32_t *pm,
2028                           int             w)
2029 {
2030     uint32_t s, m, d;
2031
2032     __m128i xmm_alpha_lo, xmm_alpha_hi;
2033     __m128i xmm_src_lo, xmm_src_hi;
2034     __m128i xmm_dst_lo, xmm_dst_hi;
2035     __m128i xmm_mask_lo, xmm_mask_hi;
2036
2037     /* call prefetch hint to optimize cache load*/
2038     cache_prefetch ((__m128i*)ps);
2039     cache_prefetch ((__m128i*)pd);
2040     cache_prefetch ((__m128i*)pm);
2041
2042     while (w && (unsigned long)pd & 15)
2043     {
2044         s = *ps++;
2045         m = *pm++;
2046         d = *pd;
2047
2048         *pd++ = pack_1x64_32 (
2049             pix_multiply_1x64 (
2050                 pix_multiply_1x64 (
2051                     unpack_32_1x64 (s), unpack_32_1x64 (m)),
2052                 negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d)))));
2053         w--;
2054     }
2055
2056     /* call prefetch hint to optimize cache load*/
2057     cache_prefetch ((__m128i*)ps);
2058     cache_prefetch ((__m128i*)pd);
2059     cache_prefetch ((__m128i*)pm);
2060
2061     while (w >= 4)
2062     {
2063         /* fill cache line with next memory */
2064         cache_prefetch_next ((__m128i*)ps);
2065         cache_prefetch_next ((__m128i*)pd);
2066         cache_prefetch_next ((__m128i*)pm);
2067
2068         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2069         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2070         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2071
2072         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2073         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2074         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2075
2076         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
2077                             &xmm_alpha_lo, &xmm_alpha_hi);
2078         negate_2x128 (xmm_alpha_lo, xmm_alpha_hi,
2079                       &xmm_alpha_lo, &xmm_alpha_hi);
2080
2081         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
2082                             &xmm_mask_lo, &xmm_mask_hi,
2083                             &xmm_dst_lo, &xmm_dst_hi);
2084         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
2085                             &xmm_alpha_lo, &xmm_alpha_hi,
2086                             &xmm_dst_lo, &xmm_dst_hi);
2087
2088         save_128_aligned (
2089             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2090
2091         ps += 4;
2092         pd += 4;
2093         pm += 4;
2094         w -= 4;
2095     }
2096
2097     while (w)
2098     {
2099         s = *ps++;
2100         m = *pm++;
2101         d = *pd;
2102
2103         *pd++ = pack_1x64_32 (
2104             pix_multiply_1x64 (
2105                 pix_multiply_1x64 (
2106                     unpack_32_1x64 (s), unpack_32_1x64 (m)),
2107                 negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d)))));
2108
2109         w--;
2110     }
2111 }
2112
2113 static force_inline void
2114 core_combine_out_reverse_ca_sse2 (uint32_t *      pd,
2115                                   const uint32_t *ps,
2116                                   const uint32_t *pm,
2117                                   int             w)
2118 {
2119     uint32_t s, m, d;
2120
2121     __m128i xmm_alpha_lo, xmm_alpha_hi;
2122     __m128i xmm_src_lo, xmm_src_hi;
2123     __m128i xmm_dst_lo, xmm_dst_hi;
2124     __m128i xmm_mask_lo, xmm_mask_hi;
2125
2126     /* call prefetch hint to optimize cache load*/
2127     cache_prefetch ((__m128i*)ps);
2128     cache_prefetch ((__m128i*)pd);
2129     cache_prefetch ((__m128i*)pm);
2130
2131     while (w && (unsigned long)pd & 15)
2132     {
2133         s = *ps++;
2134         m = *pm++;
2135         d = *pd;
2136
2137         *pd++ = pack_1x64_32 (
2138             pix_multiply_1x64 (
2139                 unpack_32_1x64 (d),
2140                 negate_1x64 (pix_multiply_1x64 (
2141                                  unpack_32_1x64 (m),
2142                                  expand_alpha_1x64 (unpack_32_1x64 (s))))));
2143         w--;
2144     }
2145
2146     /* call prefetch hint to optimize cache load*/
2147     cache_prefetch ((__m128i*)ps);
2148     cache_prefetch ((__m128i*)pd);
2149     cache_prefetch ((__m128i*)pm);
2150
2151     while (w >= 4)
2152     {
2153         /* fill cache line with next memory */
2154         cache_prefetch_next ((__m128i*)ps);
2155         cache_prefetch_next ((__m128i*)pd);
2156         cache_prefetch_next ((__m128i*)pm);
2157
2158         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2159         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2160         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2161
2162         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2163         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2164         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2165
2166         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
2167                             &xmm_alpha_lo, &xmm_alpha_hi);
2168
2169         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
2170                             &xmm_alpha_lo, &xmm_alpha_hi,
2171                             &xmm_mask_lo, &xmm_mask_hi);
2172
2173         negate_2x128 (xmm_mask_lo, xmm_mask_hi,
2174                       &xmm_mask_lo, &xmm_mask_hi);
2175
2176         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
2177                             &xmm_mask_lo, &xmm_mask_hi,
2178                             &xmm_dst_lo, &xmm_dst_hi);
2179
2180         save_128_aligned (
2181             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2182
2183         ps += 4;
2184         pd += 4;
2185         pm += 4;
2186         w -= 4;
2187     }
2188
2189     while (w)
2190     {
2191         s = *ps++;
2192         m = *pm++;
2193         d = *pd;
2194
2195         *pd++ = pack_1x64_32 (
2196             pix_multiply_1x64 (
2197                 unpack_32_1x64 (d),
2198                 negate_1x64 (pix_multiply_1x64 (
2199                                  unpack_32_1x64 (m),
2200                                  expand_alpha_1x64 (unpack_32_1x64 (s))))));
2201         w--;
2202     }
2203 }
2204
2205 static force_inline uint32_t
2206 core_combine_atop_ca_pixel_sse2 (uint32_t src,
2207                                  uint32_t mask,
2208                                  uint32_t dst)
2209 {
2210     __m64 m = unpack_32_1x64 (mask);
2211     __m64 s = unpack_32_1x64 (src);
2212     __m64 d = unpack_32_1x64 (dst);
2213     __m64 sa = expand_alpha_1x64 (s);
2214     __m64 da = expand_alpha_1x64 (d);
2215
2216     s = pix_multiply_1x64 (s, m);
2217     m = negate_1x64 (pix_multiply_1x64 (m, sa));
2218
2219     return pack_1x64_32 (pix_add_multiply_1x64 (&d, &m, &s, &da));
2220 }
2221
2222 static force_inline void
2223 core_combine_atop_ca_sse2 (uint32_t *      pd,
2224                            const uint32_t *ps,
2225                            const uint32_t *pm,
2226                            int             w)
2227 {
2228     uint32_t s, m, d;
2229
2230     __m128i xmm_src_lo, xmm_src_hi;
2231     __m128i xmm_dst_lo, xmm_dst_hi;
2232     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
2233     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
2234     __m128i xmm_mask_lo, xmm_mask_hi;
2235
2236     /* call prefetch hint to optimize cache load*/
2237     cache_prefetch ((__m128i*)ps);
2238     cache_prefetch ((__m128i*)pd);
2239     cache_prefetch ((__m128i*)pm);
2240
2241     while (w && (unsigned long)pd & 15)
2242     {
2243         s = *ps++;
2244         m = *pm++;
2245         d = *pd;
2246
2247         *pd++ = core_combine_atop_ca_pixel_sse2 (s, m, d);
2248         w--;
2249     }
2250
2251     /* call prefetch hint to optimize cache load*/
2252     cache_prefetch ((__m128i*)ps);
2253     cache_prefetch ((__m128i*)pd);
2254     cache_prefetch ((__m128i*)pm);
2255
2256     while (w >= 4)
2257     {
2258         /* fill cache line with next memory */
2259         cache_prefetch_next ((__m128i*)ps);
2260         cache_prefetch_next ((__m128i*)pd);
2261         cache_prefetch_next ((__m128i*)pm);
2262
2263         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2264         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2265         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2266
2267         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2268         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2269         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2270
2271         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
2272                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
2273         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
2274                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2275
2276         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
2277                             &xmm_mask_lo, &xmm_mask_hi,
2278                             &xmm_src_lo, &xmm_src_hi);
2279         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
2280                             &xmm_alpha_src_lo, &xmm_alpha_src_hi,
2281                             &xmm_mask_lo, &xmm_mask_hi);
2282
2283         negate_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2284
2285         pix_add_multiply_2x128 (
2286             &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
2287             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
2288             &xmm_dst_lo, &xmm_dst_hi);
2289
2290         save_128_aligned (
2291             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2292
2293         ps += 4;
2294         pd += 4;
2295         pm += 4;
2296         w -= 4;
2297     }
2298
2299     while (w)
2300     {
2301         s = *ps++;
2302         m = *pm++;
2303         d = *pd;
2304
2305         *pd++ = core_combine_atop_ca_pixel_sse2 (s, m, d);
2306         w--;
2307     }
2308 }
2309
2310 static force_inline uint32_t
2311 core_combine_reverse_atop_ca_pixel_sse2 (uint32_t src,
2312                                          uint32_t mask,
2313                                          uint32_t dst)
2314 {
2315     __m64 m = unpack_32_1x64 (mask);
2316     __m64 s = unpack_32_1x64 (src);
2317     __m64 d = unpack_32_1x64 (dst);
2318
2319     __m64 da = negate_1x64 (expand_alpha_1x64 (d));
2320     __m64 sa = expand_alpha_1x64 (s);
2321
2322     s = pix_multiply_1x64 (s, m);
2323     m = pix_multiply_1x64 (m, sa);
2324
2325     return pack_1x64_32 (pix_add_multiply_1x64 (&d, &m, &s, &da));
2326 }
2327
2328 static force_inline void
2329 core_combine_reverse_atop_ca_sse2 (uint32_t *      pd,
2330                                    const uint32_t *ps,
2331                                    const uint32_t *pm,
2332                                    int             w)
2333 {
2334     uint32_t s, m, d;
2335
2336     __m128i xmm_src_lo, xmm_src_hi;
2337     __m128i xmm_dst_lo, xmm_dst_hi;
2338     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
2339     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
2340     __m128i xmm_mask_lo, xmm_mask_hi;
2341
2342     /* call prefetch hint to optimize cache load*/
2343     cache_prefetch ((__m128i*)ps);
2344     cache_prefetch ((__m128i*)pd);
2345     cache_prefetch ((__m128i*)pm);
2346
2347     while (w && (unsigned long)pd & 15)
2348     {
2349         s = *ps++;
2350         m = *pm++;
2351         d = *pd;
2352
2353         *pd++ = core_combine_reverse_atop_ca_pixel_sse2 (s, m, d);
2354         w--;
2355     }
2356
2357     /* call prefetch hint to optimize cache load*/
2358     cache_prefetch ((__m128i*)ps);
2359     cache_prefetch ((__m128i*)pd);
2360     cache_prefetch ((__m128i*)pm);
2361
2362     while (w >= 4)
2363     {
2364         /* fill cache line with next memory */
2365         cache_prefetch_next ((__m128i*)ps);
2366         cache_prefetch_next ((__m128i*)pd);
2367         cache_prefetch_next ((__m128i*)pm);
2368
2369         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2370         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2371         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2372
2373         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2374         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2375         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2376
2377         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
2378                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
2379         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
2380                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2381
2382         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
2383                             &xmm_mask_lo, &xmm_mask_hi,
2384                             &xmm_src_lo, &xmm_src_hi);
2385         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
2386                             &xmm_alpha_src_lo, &xmm_alpha_src_hi,
2387                             &xmm_mask_lo, &xmm_mask_hi);
2388
2389         negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi,
2390                       &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2391
2392         pix_add_multiply_2x128 (
2393             &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
2394             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
2395             &xmm_dst_lo, &xmm_dst_hi);
2396
2397         save_128_aligned (
2398             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2399
2400         ps += 4;
2401         pd += 4;
2402         pm += 4;
2403         w -= 4;
2404     }
2405
2406     while (w)
2407     {
2408         s = *ps++;
2409         m = *pm++;
2410         d = *pd;
2411
2412         *pd++ = core_combine_reverse_atop_ca_pixel_sse2 (s, m, d);
2413         w--;
2414     }
2415 }
2416
2417 static force_inline uint32_t
2418 core_combine_xor_ca_pixel_sse2 (uint32_t src,
2419                                 uint32_t mask,
2420                                 uint32_t dst)
2421 {
2422     __m64 a = unpack_32_1x64 (mask);
2423     __m64 s = unpack_32_1x64 (src);
2424     __m64 d = unpack_32_1x64 (dst);
2425
2426     __m64 alpha_dst = negate_1x64 (pix_multiply_1x64 (
2427                                        a, expand_alpha_1x64 (s)));
2428     __m64 dest      = pix_multiply_1x64 (s, a);
2429     __m64 alpha_src = negate_1x64 (expand_alpha_1x64 (d));
2430
2431     return pack_1x64_32 (pix_add_multiply_1x64 (&d,
2432                                                 &alpha_dst,
2433                                                 &dest,
2434                                                 &alpha_src));
2435 }
2436
2437 static force_inline void
2438 core_combine_xor_ca_sse2 (uint32_t *      pd,
2439                           const uint32_t *ps,
2440                           const uint32_t *pm,
2441                           int             w)
2442 {
2443     uint32_t s, m, d;
2444
2445     __m128i xmm_src_lo, xmm_src_hi;
2446     __m128i xmm_dst_lo, xmm_dst_hi;
2447     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
2448     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
2449     __m128i xmm_mask_lo, xmm_mask_hi;
2450
2451     /* call prefetch hint to optimize cache load*/
2452     cache_prefetch ((__m128i*)ps);
2453     cache_prefetch ((__m128i*)pd);
2454     cache_prefetch ((__m128i*)pm);
2455
2456     while (w && (unsigned long)pd & 15)
2457     {
2458         s = *ps++;
2459         m = *pm++;
2460         d = *pd;
2461
2462         *pd++ = core_combine_xor_ca_pixel_sse2 (s, m, d);
2463         w--;
2464     }
2465
2466     /* call prefetch hint to optimize cache load*/
2467     cache_prefetch ((__m128i*)ps);
2468     cache_prefetch ((__m128i*)pd);
2469     cache_prefetch ((__m128i*)pm);
2470
2471     while (w >= 4)
2472     {
2473         /* fill cache line with next memory */
2474         cache_prefetch_next ((__m128i*)ps);
2475         cache_prefetch_next ((__m128i*)pd);
2476         cache_prefetch_next ((__m128i*)pm);
2477
2478         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2479         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2480         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2481
2482         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2483         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2484         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2485
2486         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
2487                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
2488         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
2489                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2490
2491         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
2492                             &xmm_mask_lo, &xmm_mask_hi,
2493                             &xmm_src_lo, &xmm_src_hi);
2494         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
2495                             &xmm_alpha_src_lo, &xmm_alpha_src_hi,
2496                             &xmm_mask_lo, &xmm_mask_hi);
2497
2498         negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi,
2499                       &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2500         negate_2x128 (xmm_mask_lo, xmm_mask_hi,
2501                       &xmm_mask_lo, &xmm_mask_hi);
2502
2503         pix_add_multiply_2x128 (
2504             &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
2505             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
2506             &xmm_dst_lo, &xmm_dst_hi);
2507
2508         save_128_aligned (
2509             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2510
2511         ps += 4;
2512         pd += 4;
2513         pm += 4;
2514         w -= 4;
2515     }
2516
2517     while (w)
2518     {
2519         s = *ps++;
2520         m = *pm++;
2521         d = *pd;
2522
2523         *pd++ = core_combine_xor_ca_pixel_sse2 (s, m, d);
2524         w--;
2525     }
2526 }
2527
2528 static force_inline void
2529 core_combine_add_ca_sse2 (uint32_t *      pd,
2530                           const uint32_t *ps,
2531                           const uint32_t *pm,
2532                           int             w)
2533 {
2534     uint32_t s, m, d;
2535
2536     __m128i xmm_src_lo, xmm_src_hi;
2537     __m128i xmm_dst_lo, xmm_dst_hi;
2538     __m128i xmm_mask_lo, xmm_mask_hi;
2539
2540     /* call prefetch hint to optimize cache load*/
2541     cache_prefetch ((__m128i*)ps);
2542     cache_prefetch ((__m128i*)pd);
2543     cache_prefetch ((__m128i*)pm);
2544
2545     while (w && (unsigned long)pd & 15)
2546     {
2547         s = *ps++;
2548         m = *pm++;
2549         d = *pd;
2550
2551         *pd++ = pack_1x64_32 (
2552             _mm_adds_pu8 (pix_multiply_1x64 (unpack_32_1x64 (s),
2553                                              unpack_32_1x64 (m)),
2554                           unpack_32_1x64 (d)));
2555         w--;
2556     }
2557
2558     /* call prefetch hint to optimize cache load*/
2559     cache_prefetch ((__m128i*)ps);
2560     cache_prefetch ((__m128i*)pd);
2561     cache_prefetch ((__m128i*)pm);
2562
2563     while (w >= 4)
2564     {
2565         /* fill cache line with next memory */
2566         cache_prefetch_next ((__m128i*)ps);
2567         cache_prefetch_next ((__m128i*)pd);
2568         cache_prefetch_next ((__m128i*)pm);
2569
2570         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2571         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2572         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2573
2574         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2575         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2576         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2577
2578         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
2579                             &xmm_mask_lo, &xmm_mask_hi,
2580                             &xmm_src_lo, &xmm_src_hi);
2581
2582         save_128_aligned (
2583             (__m128i*)pd, pack_2x128_128 (
2584                 _mm_adds_epu8 (xmm_src_lo, xmm_dst_lo),
2585                 _mm_adds_epu8 (xmm_src_hi, xmm_dst_hi)));
2586
2587         ps += 4;
2588         pd += 4;
2589         pm += 4;
2590         w -= 4;
2591     }
2592
2593     while (w)
2594     {
2595         s = *ps++;
2596         m = *pm++;
2597         d = *pd;
2598
2599         *pd++ = pack_1x64_32 (
2600             _mm_adds_pu8 (pix_multiply_1x64 (unpack_32_1x64 (s),
2601                                              unpack_32_1x64 (m)),
2602                           unpack_32_1x64 (d)));
2603         w--;
2604     }
2605 }
2606
2607 /* ---------------------------------------------------
2608  * fb_compose_setup_sSE2
2609  */
2610 static force_inline __m64
2611 create_mask_16_64 (uint16_t mask)
2612 {
2613     return _mm_set1_pi16 (mask);
2614 }
2615
2616 static force_inline __m128i
2617 create_mask_16_128 (uint16_t mask)
2618 {
2619     return _mm_set1_epi16 (mask);
2620 }
2621
2622 static force_inline __m64
2623 create_mask_2x32_64 (uint32_t mask0,
2624                      uint32_t mask1)
2625 {
2626     return _mm_set_pi32 (mask0, mask1);
2627 }
2628
2629 static force_inline __m128i
2630 create_mask_2x32_128 (uint32_t mask0,
2631                       uint32_t mask1)
2632 {
2633     return _mm_set_epi32 (mask0, mask1, mask0, mask1);
2634 }
2635
2636 /* SSE2 code patch for fbcompose.c */
2637
2638 static void
2639 sse2_combine_over_u (pixman_implementation_t *imp,
2640                      pixman_op_t              op,
2641                      uint32_t *               dst,
2642                      const uint32_t *         src,
2643                      const uint32_t *         mask,
2644                      int                      width)
2645 {
2646     core_combine_over_u_sse2 (dst, src, mask, width);
2647     _mm_empty ();
2648 }
2649
2650 static void
2651 sse2_combine_over_reverse_u (pixman_implementation_t *imp,
2652                              pixman_op_t              op,
2653                              uint32_t *               dst,
2654                              const uint32_t *         src,
2655                              const uint32_t *         mask,
2656                              int                      width)
2657 {
2658     core_combine_over_reverse_u_sse2 (dst, src, mask, width);
2659     _mm_empty ();
2660 }
2661
2662 static void
2663 sse2_combine_in_u (pixman_implementation_t *imp,
2664                    pixman_op_t              op,
2665                    uint32_t *               dst,
2666                    const uint32_t *         src,
2667                    const uint32_t *         mask,
2668                    int                      width)
2669 {
2670     core_combine_in_u_sse2 (dst, src, mask, width);
2671     _mm_empty ();
2672 }
2673
2674 static void
2675 sse2_combine_in_reverse_u (pixman_implementation_t *imp,
2676                            pixman_op_t              op,
2677                            uint32_t *               dst,
2678                            const uint32_t *         src,
2679                            const uint32_t *         mask,
2680                            int                      width)
2681 {
2682     core_combine_reverse_in_u_sse2 (dst, src, mask, width);
2683     _mm_empty ();
2684 }
2685
2686 static void
2687 sse2_combine_out_u (pixman_implementation_t *imp,
2688                     pixman_op_t              op,
2689                     uint32_t *               dst,
2690                     const uint32_t *         src,
2691                     const uint32_t *         mask,
2692                     int                      width)
2693 {
2694     core_combine_out_u_sse2 (dst, src, mask, width);
2695     _mm_empty ();
2696 }
2697
2698 static void
2699 sse2_combine_out_reverse_u (pixman_implementation_t *imp,
2700                             pixman_op_t              op,
2701                             uint32_t *               dst,
2702                             const uint32_t *         src,
2703                             const uint32_t *         mask,
2704                             int                      width)
2705 {
2706     core_combine_reverse_out_u_sse2 (dst, src, mask, width);
2707     _mm_empty ();
2708 }
2709
2710 static void
2711 sse2_combine_atop_u (pixman_implementation_t *imp,
2712                      pixman_op_t              op,
2713                      uint32_t *               dst,
2714                      const uint32_t *         src,
2715                      const uint32_t *         mask,
2716                      int                      width)
2717 {
2718     core_combine_atop_u_sse2 (dst, src, mask, width);
2719     _mm_empty ();
2720 }
2721
2722 static void
2723 sse2_combine_atop_reverse_u (pixman_implementation_t *imp,
2724                              pixman_op_t              op,
2725                              uint32_t *               dst,
2726                              const uint32_t *         src,
2727                              const uint32_t *         mask,
2728                              int                      width)
2729 {
2730     core_combine_reverse_atop_u_sse2 (dst, src, mask, width);
2731     _mm_empty ();
2732 }
2733
2734 static void
2735 sse2_combine_xor_u (pixman_implementation_t *imp,
2736                     pixman_op_t              op,
2737                     uint32_t *               dst,
2738                     const uint32_t *         src,
2739                     const uint32_t *         mask,
2740                     int                      width)
2741 {
2742     core_combine_xor_u_sse2 (dst, src, mask, width);
2743     _mm_empty ();
2744 }
2745
2746 static void
2747 sse2_combine_add_u (pixman_implementation_t *imp,
2748                     pixman_op_t              op,
2749                     uint32_t *               dst,
2750                     const uint32_t *         src,
2751                     const uint32_t *         mask,
2752                     int                      width)
2753 {
2754     core_combine_add_u_sse2 (dst, src, mask, width);
2755     _mm_empty ();
2756 }
2757
2758 static void
2759 sse2_combine_saturate_u (pixman_implementation_t *imp,
2760                          pixman_op_t              op,
2761                          uint32_t *               dst,
2762                          const uint32_t *         src,
2763                          const uint32_t *         mask,
2764                          int                      width)
2765 {
2766     core_combine_saturate_u_sse2 (dst, src, mask, width);
2767     _mm_empty ();
2768 }
2769
2770 static void
2771 sse2_combine_src_ca (pixman_implementation_t *imp,
2772                      pixman_op_t              op,
2773                      uint32_t *               dst,
2774                      const uint32_t *         src,
2775                      const uint32_t *         mask,
2776                      int                      width)
2777 {
2778     core_combine_src_ca_sse2 (dst, src, mask, width);
2779     _mm_empty ();
2780 }
2781
2782 static void
2783 sse2_combine_over_ca (pixman_implementation_t *imp,
2784                       pixman_op_t              op,
2785                       uint32_t *               dst,
2786                       const uint32_t *         src,
2787                       const uint32_t *         mask,
2788                       int                      width)
2789 {
2790     core_combine_over_ca_sse2 (dst, src, mask, width);
2791     _mm_empty ();
2792 }
2793
2794 static void
2795 sse2_combine_over_reverse_ca (pixman_implementation_t *imp,
2796                               pixman_op_t              op,
2797                               uint32_t *               dst,
2798                               const uint32_t *         src,
2799                               const uint32_t *         mask,
2800                               int                      width)
2801 {
2802     core_combine_over_reverse_ca_sse2 (dst, src, mask, width);
2803     _mm_empty ();
2804 }
2805
2806 static void
2807 sse2_combine_in_ca (pixman_implementation_t *imp,
2808                     pixman_op_t              op,
2809                     uint32_t *               dst,
2810                     const uint32_t *         src,
2811                     const uint32_t *         mask,
2812                     int                      width)
2813 {
2814     core_combine_in_ca_sse2 (dst, src, mask, width);
2815     _mm_empty ();
2816 }
2817
2818 static void
2819 sse2_combine_in_reverse_ca (pixman_implementation_t *imp,
2820                             pixman_op_t              op,
2821                             uint32_t *               dst,
2822                             const uint32_t *         src,
2823                             const uint32_t *         mask,
2824                             int                      width)
2825 {
2826     core_combine_in_reverse_ca_sse2 (dst, src, mask, width);
2827     _mm_empty ();
2828 }
2829
2830 static void
2831 sse2_combine_out_ca (pixman_implementation_t *imp,
2832                      pixman_op_t              op,
2833                      uint32_t *               dst,
2834                      const uint32_t *         src,
2835                      const uint32_t *         mask,
2836                      int                      width)
2837 {
2838     core_combine_out_ca_sse2 (dst, src, mask, width);
2839     _mm_empty ();
2840 }
2841
2842 static void
2843 sse2_combine_out_reverse_ca (pixman_implementation_t *imp,
2844                              pixman_op_t              op,
2845                              uint32_t *               dst,
2846                              const uint32_t *         src,
2847                              const uint32_t *         mask,
2848                              int                      width)
2849 {
2850     core_combine_out_reverse_ca_sse2 (dst, src, mask, width);
2851     _mm_empty ();
2852 }
2853
2854 static void
2855 sse2_combine_atop_ca (pixman_implementation_t *imp,
2856                       pixman_op_t              op,
2857                       uint32_t *               dst,
2858                       const uint32_t *         src,
2859                       const uint32_t *         mask,
2860                       int                      width)
2861 {
2862     core_combine_atop_ca_sse2 (dst, src, mask, width);
2863     _mm_empty ();
2864 }
2865
2866 static void
2867 sse2_combine_atop_reverse_ca (pixman_implementation_t *imp,
2868                               pixman_op_t              op,
2869                               uint32_t *               dst,
2870                               const uint32_t *         src,
2871                               const uint32_t *         mask,
2872                               int                      width)
2873 {
2874     core_combine_reverse_atop_ca_sse2 (dst, src, mask, width);
2875     _mm_empty ();
2876 }
2877
2878 static void
2879 sse2_combine_xor_ca (pixman_implementation_t *imp,
2880                      pixman_op_t              op,
2881                      uint32_t *               dst,
2882                      const uint32_t *         src,
2883                      const uint32_t *         mask,
2884                      int                      width)
2885 {
2886     core_combine_xor_ca_sse2 (dst, src, mask, width);
2887     _mm_empty ();
2888 }
2889
2890 static void
2891 sse2_combine_add_ca (pixman_implementation_t *imp,
2892                      pixman_op_t              op,
2893                      uint32_t *               dst,
2894                      const uint32_t *         src,
2895                      const uint32_t *         mask,
2896                      int                      width)
2897 {
2898     core_combine_add_ca_sse2 (dst, src, mask, width);
2899     _mm_empty ();
2900 }
2901
2902 /* -------------------------------------------------------------------
2903  * composite_over_n_8888
2904  */
2905
2906 static void
2907 sse2_composite_over_n_8888 (pixman_implementation_t *imp,
2908                             pixman_op_t              op,
2909                             pixman_image_t *         src_image,
2910                             pixman_image_t *         mask_image,
2911                             pixman_image_t *         dst_image,
2912                             int32_t                  src_x,
2913                             int32_t                  src_y,
2914                             int32_t                  mask_x,
2915                             int32_t                  mask_y,
2916                             int32_t                  dest_x,
2917                             int32_t                  dest_y,
2918                             int32_t                  width,
2919                             int32_t                  height)
2920 {
2921     uint32_t src;
2922     uint32_t    *dst_line, *dst, d;
2923     uint16_t w;
2924     int dst_stride;
2925     __m128i xmm_src, xmm_alpha;
2926     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
2927
2928     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
2929
2930     if (src == 0)
2931         return;
2932
2933     PIXMAN_IMAGE_GET_LINE (
2934         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
2935
2936     xmm_src = expand_pixel_32_1x128 (src);
2937     xmm_alpha = expand_alpha_1x128 (xmm_src);
2938
2939     while (height--)
2940     {
2941         dst = dst_line;
2942
2943         /* call prefetch hint to optimize cache load*/
2944         cache_prefetch ((__m128i*)dst);
2945
2946         dst_line += dst_stride;
2947         w = width;
2948
2949         while (w && (unsigned long)dst & 15)
2950         {
2951             d = *dst;
2952             *dst++ = pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
2953                                               _mm_movepi64_pi64 (xmm_alpha),
2954                                               unpack_32_1x64 (d)));
2955             w--;
2956         }
2957
2958         cache_prefetch ((__m128i*)dst);
2959
2960         while (w >= 4)
2961         {
2962             /* fill cache line with next memory */
2963             cache_prefetch_next ((__m128i*)dst);
2964
2965             xmm_dst = load_128_aligned ((__m128i*)dst);
2966
2967             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
2968
2969             over_2x128 (&xmm_src, &xmm_src,
2970                         &xmm_alpha, &xmm_alpha,
2971                         &xmm_dst_lo, &xmm_dst_hi);
2972
2973             /* rebuid the 4 pixel data and save*/
2974             save_128_aligned (
2975                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2976
2977             w -= 4;
2978             dst += 4;
2979         }
2980
2981         while (w)
2982         {
2983             d = *dst;
2984             *dst++ = pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
2985                                               _mm_movepi64_pi64 (xmm_alpha),
2986                                               unpack_32_1x64 (d)));
2987             w--;
2988         }
2989
2990     }
2991     _mm_empty ();
2992 }
2993
2994 /* ---------------------------------------------------------------------
2995  * composite_over_n_0565
2996  */
2997 static void
2998 sse2_composite_over_n_0565 (pixman_implementation_t *imp,
2999                             pixman_op_t              op,
3000                             pixman_image_t *         src_image,
3001                             pixman_image_t *         mask_image,
3002                             pixman_image_t *         dst_image,
3003                             int32_t                  src_x,
3004                             int32_t                  src_y,
3005                             int32_t                  mask_x,
3006                             int32_t                  mask_y,
3007                             int32_t                  dest_x,
3008                             int32_t                  dest_y,
3009                             int32_t                  width,
3010                             int32_t                  height)
3011 {
3012     uint32_t src;
3013     uint16_t    *dst_line, *dst, d;
3014     uint16_t w;
3015     int dst_stride;
3016     __m128i xmm_src, xmm_alpha;
3017     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
3018
3019     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
3020
3021     if (src == 0)
3022         return;
3023
3024     PIXMAN_IMAGE_GET_LINE (
3025         dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
3026
3027     xmm_src = expand_pixel_32_1x128 (src);
3028     xmm_alpha = expand_alpha_1x128 (xmm_src);
3029
3030     while (height--)
3031     {
3032         dst = dst_line;
3033
3034         /* call prefetch hint to optimize cache load*/
3035         cache_prefetch ((__m128i*)dst);
3036
3037         dst_line += dst_stride;
3038         w = width;
3039
3040         while (w && (unsigned long)dst & 15)
3041         {
3042             d = *dst;
3043
3044             *dst++ = pack_565_32_16 (
3045                 pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
3046                                          _mm_movepi64_pi64 (xmm_alpha),
3047                                          expand565_16_1x64 (d))));
3048             w--;
3049         }
3050
3051         /* call prefetch hint to optimize cache load*/
3052         cache_prefetch ((__m128i*)dst);
3053
3054         while (w >= 8)
3055         {
3056             /* fill cache line with next memory */
3057             cache_prefetch_next ((__m128i*)dst);
3058
3059             xmm_dst = load_128_aligned ((__m128i*)dst);
3060
3061             unpack_565_128_4x128 (xmm_dst,
3062                                   &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
3063
3064             over_2x128 (&xmm_src, &xmm_src,
3065                         &xmm_alpha, &xmm_alpha,
3066                         &xmm_dst0, &xmm_dst1);
3067             over_2x128 (&xmm_src, &xmm_src,
3068                         &xmm_alpha, &xmm_alpha,
3069                         &xmm_dst2, &xmm_dst3);
3070
3071             xmm_dst = pack_565_4x128_128 (
3072                 &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
3073
3074             save_128_aligned ((__m128i*)dst, xmm_dst);
3075
3076             dst += 8;
3077             w -= 8;
3078         }
3079
3080         while (w--)
3081         {
3082             d = *dst;
3083             *dst++ = pack_565_32_16 (
3084                 pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
3085                                          _mm_movepi64_pi64 (xmm_alpha),
3086                                          expand565_16_1x64 (d))));
3087         }
3088     }
3089
3090     _mm_empty ();
3091 }
3092
3093 /* ---------------------------------------------------------------------------
3094  * composite_over_n_8888_8888_ca
3095  */
3096
3097 static void
3098 sse2_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
3099                                     pixman_op_t              op,
3100                                     pixman_image_t *         src_image,
3101                                     pixman_image_t *         mask_image,
3102                                     pixman_image_t *         dst_image,
3103                                     int32_t                  src_x,
3104                                     int32_t                  src_y,
3105                                     int32_t                  mask_x,
3106                                     int32_t                  mask_y,
3107                                     int32_t                  dest_x,
3108                                     int32_t                  dest_y,
3109                                     int32_t                  width,
3110                                     int32_t                  height)
3111 {
3112     uint32_t src;
3113     uint32_t    *dst_line, d;
3114     uint32_t    *mask_line, m;
3115     uint32_t pack_cmp;
3116     int dst_stride, mask_stride;
3117
3118     __m128i xmm_src, xmm_alpha;
3119     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
3120     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
3121
3122     __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
3123
3124     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
3125
3126     if (src == 0)
3127         return;
3128
3129     PIXMAN_IMAGE_GET_LINE (
3130         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3131     PIXMAN_IMAGE_GET_LINE (
3132         mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
3133
3134     xmm_src = _mm_unpacklo_epi8 (
3135         create_mask_2x32_128 (src, src), _mm_setzero_si128 ());
3136     xmm_alpha = expand_alpha_1x128 (xmm_src);
3137     mmx_src   = _mm_movepi64_pi64 (xmm_src);
3138     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
3139
3140     while (height--)
3141     {
3142         int w = width;
3143         const uint32_t *pm = (uint32_t *)mask_line;
3144         uint32_t *pd = (uint32_t *)dst_line;
3145
3146         dst_line += dst_stride;
3147         mask_line += mask_stride;
3148
3149         /* call prefetch hint to optimize cache load*/
3150         cache_prefetch ((__m128i*)pd);
3151         cache_prefetch ((__m128i*)pm);
3152
3153         while (w && (unsigned long)pd & 15)
3154         {
3155             m = *pm++;
3156
3157             if (m)
3158             {
3159                 d = *pd;
3160                 mmx_mask = unpack_32_1x64 (m);
3161                 mmx_dest = unpack_32_1x64 (d);
3162
3163                 *pd = pack_1x64_32 (in_over_1x64 (&mmx_src,
3164                                                   &mmx_alpha,
3165                                                   &mmx_mask,
3166                                                   &mmx_dest));
3167             }
3168
3169             pd++;
3170             w--;
3171         }
3172
3173         /* call prefetch hint to optimize cache load*/
3174         cache_prefetch ((__m128i*)pd);
3175         cache_prefetch ((__m128i*)pm);
3176
3177         while (w >= 4)
3178         {
3179             /* fill cache line with next memory */
3180             cache_prefetch_next ((__m128i*)pd);
3181             cache_prefetch_next ((__m128i*)pm);
3182
3183             xmm_mask = load_128_unaligned ((__m128i*)pm);
3184
3185             pack_cmp =
3186                 _mm_movemask_epi8 (
3187                     _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ()));
3188
3189             /* if all bits in mask are zero, pack_cmp are equal to 0xffff */
3190             if (pack_cmp != 0xffff)
3191             {
3192                 xmm_dst = load_128_aligned ((__m128i*)pd);
3193
3194                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
3195                 unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
3196
3197                 in_over_2x128 (&xmm_src, &xmm_src,
3198                                &xmm_alpha, &xmm_alpha,
3199                                &xmm_mask_lo, &xmm_mask_hi,
3200                                &xmm_dst_lo, &xmm_dst_hi);
3201
3202                 save_128_aligned (
3203                     (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
3204             }
3205
3206             pd += 4;
3207             pm += 4;
3208             w -= 4;
3209         }
3210
3211         while (w)
3212         {
3213             m = *pm++;
3214
3215             if (m)
3216             {
3217                 d = *pd;
3218                 mmx_mask = unpack_32_1x64 (m);
3219                 mmx_dest = unpack_32_1x64 (d);
3220
3221                 *pd = pack_1x64_32 (
3222                     in_over_1x64 (&mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest));
3223             }
3224
3225             pd++;
3226             w--;
3227         }
3228     }
3229
3230     _mm_empty ();
3231 }
3232
3233 /*---------------------------------------------------------------------
3234  * composite_over_8888_n_8888
3235  */
3236
3237 static void
3238 sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
3239                                  pixman_op_t              op,
3240                                  pixman_image_t *         src_image,
3241                                  pixman_image_t *         mask_image,
3242                                  pixman_image_t *         dst_image,
3243                                  int32_t                  src_x,
3244                                  int32_t                  src_y,
3245                                  int32_t                  mask_x,
3246                                  int32_t                  mask_y,
3247                                  int32_t                  dest_x,
3248                                  int32_t                  dest_y,
3249                                  int32_t                  width,
3250                                  int32_t                  height)
3251 {
3252     uint32_t    *dst_line, *dst;
3253     uint32_t    *src_line, *src;
3254     uint32_t mask;
3255     uint16_t w;
3256     int dst_stride, src_stride;
3257
3258     __m128i xmm_mask;
3259     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
3260     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
3261     __m128i xmm_alpha_lo, xmm_alpha_hi;
3262
3263     PIXMAN_IMAGE_GET_LINE (
3264         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3265     PIXMAN_IMAGE_GET_LINE (
3266         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3267
3268     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
3269
3270     xmm_mask = create_mask_16_128 (mask >> 24);
3271
3272     while (height--)
3273     {
3274         dst = dst_line;
3275         dst_line += dst_stride;
3276         src = src_line;
3277         src_line += src_stride;
3278         w = width;
3279
3280         /* call prefetch hint to optimize cache load*/
3281         cache_prefetch ((__m128i*)dst);
3282         cache_prefetch ((__m128i*)src);
3283
3284         while (w && (unsigned long)dst & 15)
3285         {
3286             uint32_t s = *src++;
3287             uint32_t d = *dst;
3288
3289             __m64 ms = unpack_32_1x64 (s);
3290             __m64 alpha    = expand_alpha_1x64 (ms);
3291             __m64 dest     = _mm_movepi64_pi64 (xmm_mask);
3292             __m64 alpha_dst = unpack_32_1x64 (d);
3293
3294             *dst++ = pack_1x64_32 (
3295                 in_over_1x64 (&ms, &alpha, &dest, &alpha_dst));
3296
3297             w--;
3298         }
3299
3300         /* call prefetch hint to optimize cache load*/
3301         cache_prefetch ((__m128i*)dst);
3302         cache_prefetch ((__m128i*)src);
3303
3304         while (w >= 4)
3305         {
3306             /* fill cache line with next memory */
3307             cache_prefetch_next ((__m128i*)dst);
3308             cache_prefetch_next ((__m128i*)src);
3309
3310             xmm_src = load_128_unaligned ((__m128i*)src);
3311             xmm_dst = load_128_aligned ((__m128i*)dst);
3312
3313             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
3314             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
3315             expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
3316                                 &xmm_alpha_lo, &xmm_alpha_hi);
3317
3318             in_over_2x128 (&xmm_src_lo, &xmm_src_hi,
3319                            &xmm_alpha_lo, &xmm_alpha_hi,
3320                            &xmm_mask, &xmm_mask,
3321                            &xmm_dst_lo, &xmm_dst_hi);
3322
3323             save_128_aligned (
3324                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
3325
3326             dst += 4;
3327             src += 4;
3328             w -= 4;
3329         }
3330
3331         while (w)
3332         {
3333             uint32_t s = *src++;
3334             uint32_t d = *dst;
3335
3336             __m64 ms = unpack_32_1x64 (s);
3337             __m64 alpha = expand_alpha_1x64 (ms);
3338             __m64 mask  = _mm_movepi64_pi64 (xmm_mask);
3339             __m64 dest  = unpack_32_1x64 (d);
3340
3341             *dst++ = pack_1x64_32 (
3342                 in_over_1x64 (&ms, &alpha, &mask, &dest));
3343
3344             w--;
3345         }
3346     }
3347
3348     _mm_empty ();
3349 }
3350
3351 /* ---------------------------------------------------------------------
3352  * composite_over_x888_n_8888
3353  */
3354 static void
3355 sse2_composite_over_x888_n_8888 (pixman_implementation_t *imp,
3356                                  pixman_op_t              op,
3357                                  pixman_image_t *         src_image,
3358                                  pixman_image_t *         mask_image,
3359                                  pixman_image_t *         dst_image,
3360                                  int32_t                  src_x,
3361                                  int32_t                  src_y,
3362                                  int32_t                  mask_x,
3363                                  int32_t                  mask_y,
3364                                  int32_t                  dest_x,
3365                                  int32_t                  dest_y,
3366                                  int32_t                  width,
3367                                  int32_t                  height)
3368 {
3369     uint32_t    *dst_line, *dst;
3370     uint32_t    *src_line, *src;
3371     uint32_t mask;
3372     int dst_stride, src_stride;
3373     uint16_t w;
3374
3375     __m128i xmm_mask, xmm_alpha;
3376     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
3377     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
3378
3379     PIXMAN_IMAGE_GET_LINE (
3380         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3381     PIXMAN_IMAGE_GET_LINE (
3382         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3383
3384     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
3385
3386     xmm_mask = create_mask_16_128 (mask >> 24);
3387     xmm_alpha = mask_00ff;
3388
3389     while (height--)
3390     {
3391         dst = dst_line;
3392         dst_line += dst_stride;
3393         src = src_line;
3394         src_line += src_stride;
3395         w = width;
3396
3397         /* call prefetch hint to optimize cache load*/
3398         cache_prefetch ((__m128i*)dst);
3399         cache_prefetch ((__m128i*)src);
3400
3401         while (w && (unsigned long)dst & 15)
3402         {
3403             uint32_t s = (*src++) | 0xff000000;
3404             uint32_t d = *dst;
3405
3406             __m64 src   = unpack_32_1x64 (s);
3407             __m64 alpha = _mm_movepi64_pi64 (xmm_alpha);
3408             __m64 mask  = _mm_movepi64_pi64 (xmm_mask);
3409             __m64 dest  = unpack_32_1x64 (d);
3410
3411             *dst++ = pack_1x64_32 (
3412                 in_over_1x64 (&src, &alpha, &mask, &dest));
3413
3414             w--;
3415         }
3416
3417         /* call prefetch hint to optimize cache load*/
3418         cache_prefetch ((__m128i*)dst);
3419         cache_prefetch ((__m128i*)src);
3420
3421         while (w >= 4)
3422         {
3423             /* fill cache line with next memory */
3424             cache_prefetch_next ((__m128i*)dst);
3425             cache_prefetch_next ((__m128i*)src);
3426
3427             xmm_src = _mm_or_si128 (
3428                 load_128_unaligned ((__m128i*)src), mask_ff000000);
3429             xmm_dst = load_128_aligned ((__m128i*)dst);
3430
3431             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
3432             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
3433
3434             in_over_2x128 (&xmm_src_lo, &xmm_src_hi,
3435                            &xmm_alpha, &xmm_alpha,
3436                            &xmm_mask, &xmm_mask,
3437                            &xmm_dst_lo, &xmm_dst_hi);
3438
3439             save_128_aligned (
3440                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
3441
3442             dst += 4;
3443             src += 4;
3444             w -= 4;
3445
3446         }
3447
3448         while (w)
3449         {
3450             uint32_t s = (*src++) | 0xff000000;
3451             uint32_t d = *dst;
3452
3453             __m64 src  = unpack_32_1x64 (s);
3454             __m64 alpha = _mm_movepi64_pi64 (xmm_alpha);
3455             __m64 mask  = _mm_movepi64_pi64 (xmm_mask);
3456             __m64 dest  = unpack_32_1x64 (d);
3457
3458             *dst++ = pack_1x64_32 (
3459                 in_over_1x64 (&src, &alpha, &mask, &dest));
3460
3461             w--;
3462         }
3463     }
3464
3465     _mm_empty ();
3466 }
3467
3468 /* --------------------------------------------------------------------
3469  * composite_over_8888_8888
3470  */
3471 static void
3472 sse2_composite_over_8888_8888 (pixman_implementation_t *imp,
3473                                pixman_op_t              op,
3474                                pixman_image_t *         src_image,
3475                                pixman_image_t *         mask_image,
3476                                pixman_image_t *         dst_image,
3477                                int32_t                  src_x,
3478                                int32_t                  src_y,
3479                                int32_t                  mask_x,
3480                                int32_t                  mask_y,
3481                                int32_t                  dest_x,
3482                                int32_t                  dest_y,
3483                                int32_t                  width,
3484                                int32_t                  height)
3485 {
3486     int dst_stride, src_stride;
3487     uint32_t    *dst_line, *dst;
3488     uint32_t    *src_line, *src;
3489
3490     PIXMAN_IMAGE_GET_LINE (
3491         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3492     PIXMAN_IMAGE_GET_LINE (
3493         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3494
3495     dst = dst_line;
3496     src = src_line;
3497
3498     while (height--)
3499     {
3500         core_combine_over_u_sse2 (dst, src, NULL, width);
3501
3502         dst += dst_stride;
3503         src += src_stride;
3504     }
3505     _mm_empty ();
3506 }
3507
3508 /* ------------------------------------------------------------------
3509  * composite_over_8888_0565
3510  */
3511 static force_inline uint16_t
3512 composite_over_8888_0565pixel (uint32_t src, uint16_t dst)
3513 {
3514     __m64 ms;
3515
3516     ms = unpack_32_1x64 (src);
3517     return pack_565_32_16 (
3518         pack_1x64_32 (
3519             over_1x64 (
3520                 ms, expand_alpha_1x64 (ms), expand565_16_1x64 (dst))));
3521 }
3522
3523 static void
3524 sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
3525                                pixman_op_t              op,
3526                                pixman_image_t *         src_image,
3527                                pixman_image_t *         mask_image,
3528                                pixman_image_t *         dst_image,
3529                                int32_t                  src_x,
3530                                int32_t                  src_y,
3531                                int32_t                  mask_x,
3532                                int32_t                  mask_y,
3533                                int32_t                  dest_x,
3534                                int32_t                  dest_y,
3535                                int32_t                  width,
3536                                int32_t                  height)
3537 {
3538     uint16_t    *dst_line, *dst, d;
3539     uint32_t    *src_line, *src, s;
3540     int dst_stride, src_stride;
3541     uint16_t w;
3542
3543     __m128i xmm_alpha_lo, xmm_alpha_hi;
3544     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
3545     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
3546
3547     PIXMAN_IMAGE_GET_LINE (
3548         dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
3549     PIXMAN_IMAGE_GET_LINE (
3550         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3551
3552 #if 0
3553     /* FIXME
3554      *
3555      * I copy the code from MMX one and keep the fixme.
3556      * If it's a problem there, probably is a problem here.
3557      */
3558     assert (src_image->drawable == mask_image->drawable);
3559 #endif
3560
3561     while (height--)
3562     {
3563         dst = dst_line;
3564         src = src_line;
3565
3566         /* call prefetch hint to optimize cache load*/
3567         cache_prefetch ((__m128i*)src);
3568         cache_prefetch ((__m128i*)dst);
3569
3570         dst_line += dst_stride;
3571         src_line += src_stride;
3572         w = width;
3573
3574         /* Align dst on a 16-byte boundary */
3575         while (w &&
3576                ((unsigned long)dst & 15))
3577         {
3578             s = *src++;
3579             d = *dst;
3580
3581             *dst++ = composite_over_8888_0565pixel (s, d);
3582             w--;
3583         }
3584
3585         /* call prefetch hint to optimize cache load*/
3586         cache_prefetch ((__m128i*)src);
3587         cache_prefetch ((__m128i*)dst);
3588
3589         /* It's a 8 pixel loop */
3590         while (w >= 8)
3591         {
3592             /* fill cache line with next memory */
3593             cache_prefetch_next ((__m128i*)src);
3594             cache_prefetch_next ((__m128i*)dst);
3595
3596             /* I'm loading unaligned because I'm not sure
3597              * about the address alignment.
3598              */
3599             xmm_src = load_128_unaligned ((__m128i*) src);
3600             xmm_dst = load_128_aligned ((__m128i*) dst);
3601
3602             /* Unpacking */
3603             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
3604             unpack_565_128_4x128 (xmm_dst,
3605                                   &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
3606             expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
3607                                 &xmm_alpha_lo, &xmm_alpha_hi);
3608
3609             /* I'm loading next 4 pixels from memory
3610              * before to optimze the memory read.
3611              */
3612             xmm_src = load_128_unaligned ((__m128i*) (src + 4));
3613
3614             over_2x128 (&xmm_src_lo, &xmm_src_hi,
3615                         &xmm_alpha_lo, &xmm_alpha_hi,
3616                         &xmm_dst0, &xmm_dst1);
3617
3618             /* Unpacking */
3619             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
3620             expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
3621                                 &xmm_alpha_lo, &xmm_alpha_hi);
3622
3623             over_2x128 (&xmm_src_lo, &xmm_src_hi,
3624                         &xmm_alpha_lo, &xmm_alpha_hi,
3625                         &xmm_dst2, &xmm_dst3);
3626
3627             save_128_aligned (
3628                 (__m128i*)dst, pack_565_4x128_128 (
3629                     &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
3630
3631             w -= 8;
3632             dst += 8;
3633             src += 8;
3634         }
3635
3636         while (w--)
3637         {
3638             s = *src++;
3639             d = *dst;
3640
3641             *dst++ = composite_over_8888_0565pixel (s, d);
3642         }
3643     }
3644
3645     _mm_empty ();
3646 }
3647
3648 /* -----------------------------------------------------------------
3649  * composite_over_n_8_8888
3650  */
3651
3652 static void
3653 sse2_composite_over_n_8_8888 (pixman_implementation_t *imp,
3654                               pixman_op_t              op,
3655                               pixman_image_t *         src_image,
3656                               pixman_image_t *         mask_image,
3657                               pixman_image_t *         dst_image,
3658                               int32_t                  src_x,
3659                               int32_t                  src_y,
3660                               int32_t                  mask_x,
3661                               int32_t                  mask_y,
3662                               int32_t                  dest_x,
3663                               int32_t                  dest_y,
3664                               int32_t                  width,
3665                               int32_t                  height)
3666 {
3667     uint32_t src, srca;
3668     uint32_t *dst_line, *dst;
3669     uint8_t *mask_line, *mask;
3670     int dst_stride, mask_stride;
3671     uint16_t w;
3672     uint32_t m, d;
3673
3674     __m128i xmm_src, xmm_alpha, xmm_def;
3675     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
3676     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
3677
3678     __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
3679
3680     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
3681
3682     srca = src >> 24;
3683     if (src == 0)
3684         return;
3685
3686     PIXMAN_IMAGE_GET_LINE (
3687         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3688     PIXMAN_IMAGE_GET_LINE (
3689         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
3690
3691     xmm_def = create_mask_2x32_128 (src, src);
3692     xmm_src = expand_pixel_32_1x128 (src);
3693     xmm_alpha = expand_alpha_1x128 (xmm_src);
3694     mmx_src   = _mm_movepi64_pi64 (xmm_src);
3695     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
3696
3697     while (height--)
3698     {
3699         dst = dst_line;
3700         dst_line += dst_stride;
3701         mask = mask_line;
3702         mask_line += mask_stride;
3703         w = width;
3704
3705         /* call prefetch hint to optimize cache load*/
3706         cache_prefetch ((__m128i*)mask);
3707         cache_prefetch ((__m128i*)dst);
3708
3709         while (w && (unsigned long)dst & 15)
3710         {
3711             uint8_t m = *mask++;
3712
3713             if (m)
3714             {
3715                 d = *dst;
3716                 mmx_mask = expand_pixel_8_1x64 (m);
3717                 mmx_dest = unpack_32_1x64 (d);
3718
3719                 *dst = pack_1x64_32 (in_over_1x64 (&mmx_src,
3720                                                    &mmx_alpha,
3721                                                    &mmx_mask,
3722                                                    &mmx_dest));
3723             }
3724
3725             w--;
3726             dst++;
3727         }
3728
3729         /* call prefetch hint to optimize cache load*/
3730         cache_prefetch ((__m128i*)mask);
3731         cache_prefetch ((__m128i*)dst);
3732
3733         while (w >= 4)
3734         {
3735             /* fill cache line with next memory */
3736             cache_prefetch_next ((__m128i*)mask);
3737             cache_prefetch_next ((__m128i*)dst);
3738
3739             m = *((uint32_t*)mask);
3740
3741             if (srca == 0xff && m == 0xffffffff)
3742             {
3743                 save_128_aligned ((__m128i*)dst, xmm_def);
3744             }
3745             else if (m)
3746             {
3747                 xmm_dst = load_128_aligned ((__m128i*) dst);
3748                 xmm_mask = unpack_32_1x128 (m);
3749                 xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ());
3750
3751                 /* Unpacking */
3752                 unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
3753                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
3754
3755                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
3756                                         &xmm_mask_lo, &xmm_mask_hi);
3757
3758                 in_over_2x128 (&xmm_src, &xmm_src,
3759                                &xmm_alpha, &xmm_alpha,
3760                                &xmm_mask_lo, &xmm_mask_hi,
3761                                &xmm_dst_lo, &xmm_dst_hi);
3762
3763                 save_128_aligned (
3764                     (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
3765             }
3766
3767             w -= 4;
3768             dst += 4;
3769             mask += 4;
3770         }
3771
3772         while (w)
3773         {
3774             uint8_t m = *mask++;
3775
3776             if (m)
3777             {
3778                 d = *dst;
3779                 mmx_mask = expand_pixel_8_1x64 (m);
3780                 mmx_dest = unpack_32_1x64 (d);
3781
3782                 *dst = pack_1x64_32 (in_over_1x64 (&mmx_src,
3783                                                    &mmx_alpha,
3784                                                    &mmx_mask,
3785                                                    &mmx_dest));
3786             }
3787
3788             w--;
3789             dst++;
3790         }
3791     }
3792
3793     _mm_empty ();
3794 }
3795
3796 /* ----------------------------------------------------------------
3797  * composite_over_n_8_8888
3798  */
3799
3800 pixman_bool_t
3801 pixman_fill_sse2 (uint32_t *bits,
3802                   int       stride,
3803                   int       bpp,
3804                   int       x,
3805                   int       y,
3806                   int       width,
3807                   int       height,
3808                   uint32_t  data)
3809 {
3810     uint32_t byte_width;
3811     uint8_t         *byte_line;
3812
3813     __m128i xmm_def;
3814
3815     if (bpp == 16 && (data >> 16 != (data & 0xffff)))
3816         return FALSE;
3817
3818     if (bpp != 16 && bpp != 32)
3819         return FALSE;
3820
3821     if (bpp == 16)
3822     {
3823         stride = stride * (int) sizeof (uint32_t) / 2;
3824         byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x);
3825         byte_width = 2 * width;
3826         stride *= 2;
3827     }
3828     else
3829     {
3830         stride = stride * (int) sizeof (uint32_t) / 4;
3831         byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x);
3832         byte_width = 4 * width;
3833         stride *= 4;
3834     }
3835
3836     cache_prefetch ((__m128i*)byte_line);
3837     xmm_def = create_mask_2x32_128 (data, data);
3838
3839     while (height--)
3840     {
3841         int w;
3842         uint8_t *d = byte_line;
3843         byte_line += stride;
3844         w = byte_width;
3845
3846
3847         cache_prefetch_next ((__m128i*)d);
3848
3849         while (w >= 2 && ((unsigned long)d & 3))
3850         {
3851             *(uint16_t *)d = data;
3852             w -= 2;
3853             d += 2;
3854         }
3855
3856         while (w >= 4 && ((unsigned long)d & 15))
3857         {
3858             *(uint32_t *)d = data;
3859
3860             w -= 4;
3861             d += 4;
3862         }
3863
3864         cache_prefetch_next ((__m128i*)d);
3865
3866         while (w >= 128)
3867         {
3868             cache_prefetch (((__m128i*)d) + 12);
3869
3870             save_128_aligned ((__m128i*)(d),     xmm_def);
3871             save_128_aligned ((__m128i*)(d + 16),  xmm_def);
3872             save_128_aligned ((__m128i*)(d + 32),  xmm_def);
3873             save_128_aligned ((__m128i*)(d + 48),  xmm_def);
3874             save_128_aligned ((__m128i*)(d + 64),  xmm_def);
3875             save_128_aligned ((__m128i*)(d + 80),  xmm_def);
3876             save_128_aligned ((__m128i*)(d + 96),  xmm_def);
3877             save_128_aligned ((__m128i*)(d + 112), xmm_def);
3878
3879             d += 128;
3880             w -= 128;
3881         }
3882
3883         if (w >= 64)
3884         {
3885             cache_prefetch (((__m128i*)d) + 8);
3886
3887             save_128_aligned ((__m128i*)(d),     xmm_def);
3888             save_128_aligned ((__m128i*)(d + 16),  xmm_def);
3889             save_128_aligned ((__m128i*)(d + 32),  xmm_def);
3890             save_128_aligned ((__m128i*)(d + 48),  xmm_def);
3891
3892             d += 64;
3893             w -= 64;
3894         }
3895
3896         cache_prefetch_next ((__m128i*)d);
3897
3898         if (w >= 32)
3899         {
3900             save_128_aligned ((__m128i*)(d),     xmm_def);
3901             save_128_aligned ((__m128i*)(d + 16),  xmm_def);
3902
3903             d += 32;
3904             w -= 32;
3905         }
3906
3907         if (w >= 16)
3908         {
3909             save_128_aligned ((__m128i*)(d),     xmm_def);
3910
3911             d += 16;
3912             w -= 16;
3913         }
3914
3915         cache_prefetch_next ((__m128i*)d);
3916
3917         while (w >= 4)
3918         {
3919             *(uint32_t *)d = data;
3920
3921             w -= 4;
3922             d += 4;
3923         }
3924
3925         if (w >= 2)
3926         {
3927             *(uint16_t *)d = data;
3928             w -= 2;
3929             d += 2;
3930         }
3931     }
3932
3933     _mm_empty ();
3934     return TRUE;
3935 }
3936
3937 static void
3938 sse2_composite_src_n_8_8888 (pixman_implementation_t *imp,
3939                              pixman_op_t              op,
3940                              pixman_image_t *         src_image,
3941                              pixman_image_t *         mask_image,
3942                              pixman_image_t *         dst_image,
3943                              int32_t                  src_x,
3944                              int32_t                  src_y,
3945                              int32_t                  mask_x,
3946                              int32_t                  mask_y,
3947                              int32_t                  dest_x,
3948                              int32_t                  dest_y,
3949                              int32_t                  width,
3950                              int32_t                  height)
3951 {
3952     uint32_t src, srca;
3953     uint32_t    *dst_line, *dst;
3954     uint8_t     *mask_line, *mask;
3955     int dst_stride, mask_stride;
3956     uint16_t w;
3957     uint32_t m;
3958
3959     __m128i xmm_src, xmm_def;
3960     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
3961
3962     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
3963
3964     srca = src >> 24;
3965     if (src == 0)
3966     {
3967         pixman_fill_sse2 (dst_image->bits.bits, dst_image->bits.rowstride,
3968                           PIXMAN_FORMAT_BPP (dst_image->bits.format),
3969                           dest_x, dest_y, width, height, 0);
3970         return;
3971     }
3972
3973     PIXMAN_IMAGE_GET_LINE (
3974         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3975     PIXMAN_IMAGE_GET_LINE (
3976         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
3977
3978     xmm_def = create_mask_2x32_128 (src, src);
3979     xmm_src = expand_pixel_32_1x128 (src);
3980
3981     while (height--)
3982     {
3983         dst = dst_line;
3984         dst_line += dst_stride;
3985         mask = mask_line;
3986         mask_line += mask_stride;
3987         w = width;
3988
3989         /* call prefetch hint to optimize cache load*/
3990         cache_prefetch ((__m128i*)mask);
3991         cache_prefetch ((__m128i*)dst);
3992
3993         while (w && (unsigned long)dst & 15)
3994         {
3995             uint8_t m = *mask++;
3996
3997             if (m)
3998             {
3999                 *dst = pack_1x64_32 (
4000                     pix_multiply_1x64 (
4001                         _mm_movepi64_pi64 (xmm_src), expand_pixel_8_1x64 (m)));
4002             }
4003             else
4004             {
4005                 *dst = 0;
4006             }
4007
4008             w--;
4009             dst++;
4010         }
4011
4012         /* call prefetch hint to optimize cache load*/
4013         cache_prefetch ((__m128i*)mask);
4014         cache_prefetch ((__m128i*)dst);
4015
4016         while (w >= 4)
4017         {
4018             /* fill cache line with next memory */
4019             cache_prefetch_next ((__m128i*)mask);
4020             cache_prefetch_next ((__m128i*)dst);
4021
4022             m = *((uint32_t*)mask);
4023
4024             if (srca == 0xff && m == 0xffffffff)
4025             {
4026                 save_128_aligned ((__m128i*)dst, xmm_def);
4027             }
4028             else if (m)
4029             {
4030                 xmm_mask = unpack_32_1x128 (m);
4031                 xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ());
4032
4033                 /* Unpacking */
4034                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4035
4036                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
4037                                         &xmm_mask_lo, &xmm_mask_hi);
4038
4039                 pix_multiply_2x128 (&xmm_src, &xmm_src,
4040                                     &xmm_mask_lo, &xmm_mask_hi,
4041                                     &xmm_mask_lo, &xmm_mask_hi);
4042
4043                 save_128_aligned (
4044                     (__m128i*)dst, pack_2x128_128 (xmm_mask_lo, xmm_mask_hi));
4045             }
4046             else
4047             {
4048                 save_128_aligned ((__m128i*)dst, _mm_setzero_si128 ());
4049             }
4050
4051             w -= 4;
4052             dst += 4;
4053             mask += 4;
4054         }
4055
4056         while (w)
4057         {
4058             uint8_t m = *mask++;
4059
4060             if (m)
4061             {
4062                 *dst = pack_1x64_32 (
4063                     pix_multiply_1x64 (
4064                         _mm_movepi64_pi64 (xmm_src), expand_pixel_8_1x64 (m)));
4065             }
4066             else
4067             {
4068                 *dst = 0;
4069             }
4070
4071             w--;
4072             dst++;
4073         }
4074     }
4075
4076     _mm_empty ();
4077 }
4078
4079 /*-----------------------------------------------------------------------
4080  * composite_over_n_8_0565
4081  */
4082
4083 static void
4084 sse2_composite_over_n_8_0565 (pixman_implementation_t *imp,
4085                               pixman_op_t              op,
4086                               pixman_image_t *         src_image,
4087                               pixman_image_t *         mask_image,
4088                               pixman_image_t *         dst_image,
4089                               int32_t                  src_x,
4090                               int32_t                  src_y,
4091                               int32_t                  mask_x,
4092                               int32_t                  mask_y,
4093                               int32_t                  dest_x,
4094                               int32_t                  dest_y,
4095                               int32_t                  width,
4096                               int32_t                  height)
4097 {
4098     uint32_t src, srca;
4099     uint16_t    *dst_line, *dst, d;
4100     uint8_t     *mask_line, *mask;
4101     int dst_stride, mask_stride;
4102     uint16_t w;
4103     uint32_t m;
4104     __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
4105
4106     __m128i xmm_src, xmm_alpha;
4107     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
4108     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
4109
4110     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
4111
4112     srca = src >> 24;
4113     if (src == 0)
4114         return;
4115
4116     PIXMAN_IMAGE_GET_LINE (
4117         dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
4118     PIXMAN_IMAGE_GET_LINE (
4119         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
4120
4121     xmm_src = expand_pixel_32_1x128 (src);
4122     xmm_alpha = expand_alpha_1x128 (xmm_src);
4123     mmx_src = _mm_movepi64_pi64 (xmm_src);
4124     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
4125
4126     while (height--)
4127     {
4128         dst = dst_line;
4129         dst_line += dst_stride;
4130         mask = mask_line;
4131         mask_line += mask_stride;
4132         w = width;
4133
4134         /* call prefetch hint to optimize cache load*/
4135         cache_prefetch ((__m128i*)mask);
4136         cache_prefetch ((__m128i*)dst);
4137
4138         while (w && (unsigned long)dst & 15)
4139         {
4140             m = *mask++;
4141
4142             if (m)
4143             {
4144                 d = *dst;
4145                 mmx_mask = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
4146                 mmx_dest = expand565_16_1x64 (d);
4147
4148                 *dst = pack_565_32_16 (
4149                     pack_1x64_32 (
4150                         in_over_1x64 (
4151                             &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)));
4152             }
4153
4154             w--;
4155             dst++;
4156         }
4157
4158         /* call prefetch hint to optimize cache load*/
4159         cache_prefetch ((__m128i*)mask);
4160         cache_prefetch ((__m128i*)dst);
4161
4162         while (w >= 8)
4163         {
4164             /* fill cache line with next memory */
4165             cache_prefetch_next ((__m128i*)mask);
4166             cache_prefetch_next ((__m128i*)dst);
4167
4168             xmm_dst = load_128_aligned ((__m128i*) dst);
4169             unpack_565_128_4x128 (xmm_dst,
4170                                   &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
4171
4172             m = *((uint32_t*)mask);
4173             mask += 4;
4174
4175             if (m)
4176             {
4177                 xmm_mask = unpack_32_1x128 (m);
4178                 xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ());
4179
4180                 /* Unpacking */
4181                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4182
4183                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
4184                                         &xmm_mask_lo, &xmm_mask_hi);
4185
4186                 in_over_2x128 (&xmm_src, &xmm_src,
4187                                &xmm_alpha, &xmm_alpha,
4188                                &xmm_mask_lo, &xmm_mask_hi,
4189                                &xmm_dst0, &xmm_dst1);
4190             }
4191
4192             m = *((uint32_t*)mask);
4193             mask += 4;
4194
4195             if (m)
4196             {
4197                 xmm_mask = unpack_32_1x128 (m);
4198                 xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ());
4199
4200                 /* Unpacking */
4201                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4202
4203                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
4204                                         &xmm_mask_lo, &xmm_mask_hi);
4205                 in_over_2x128 (&xmm_src, &xmm_src,
4206                                &xmm_alpha, &xmm_alpha,
4207                                &xmm_mask_lo, &xmm_mask_hi,
4208                                &xmm_dst2, &xmm_dst3);
4209             }
4210
4211             save_128_aligned (
4212                 (__m128i*)dst, pack_565_4x128_128 (
4213                     &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
4214
4215             w -= 8;
4216             dst += 8;
4217         }
4218
4219         while (w)
4220         {
4221             m = *mask++;
4222
4223             if (m)
4224             {
4225                 d = *dst;
4226                 mmx_mask = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
4227                 mmx_dest = expand565_16_1x64 (d);
4228
4229                 *dst = pack_565_32_16 (
4230                     pack_1x64_32 (
4231                         in_over_1x64 (
4232                             &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)));
4233             }
4234
4235             w--;
4236             dst++;
4237         }
4238     }
4239
4240     _mm_empty ();
4241 }
4242
4243 /* -----------------------------------------------------------------------
4244  * composite_over_pixbuf_0565
4245  */
4246
4247 static void
4248 sse2_composite_over_pixbuf_0565 (pixman_implementation_t *imp,
4249                                  pixman_op_t              op,
4250                                  pixman_image_t *         src_image,
4251                                  pixman_image_t *         mask_image,
4252                                  pixman_image_t *         dst_image,
4253                                  int32_t                  src_x,
4254                                  int32_t                  src_y,
4255                                  int32_t                  mask_x,
4256                                  int32_t                  mask_y,
4257                                  int32_t                  dest_x,
4258                                  int32_t                  dest_y,
4259                                  int32_t                  width,
4260                                  int32_t                  height)
4261 {
4262     uint16_t    *dst_line, *dst, d;
4263     uint32_t    *src_line, *src, s;
4264     int dst_stride, src_stride;
4265     uint16_t w;
4266     uint32_t opaque, zero;
4267
4268     __m64 ms;
4269     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
4270     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
4271
4272     PIXMAN_IMAGE_GET_LINE (
4273         dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
4274     PIXMAN_IMAGE_GET_LINE (
4275         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
4276
4277 #if 0
4278     /* FIXME
4279      *
4280      * I copy the code from MMX one and keep the fixme.
4281      * If it's a problem there, probably is a problem here.
4282      */
4283     assert (src_image->drawable == mask_image->drawable);
4284 #endif
4285
4286     while (height--)
4287     {
4288         dst = dst_line;
4289         dst_line += dst_stride;
4290         src = src_line;
4291         src_line += src_stride;
4292         w = width;
4293
4294         /* call prefetch hint to optimize cache load*/
4295         cache_prefetch ((__m128i*)src);
4296         cache_prefetch ((__m128i*)dst);
4297
4298         while (w && (unsigned long)dst & 15)
4299         {
4300             s = *src++;
4301             d = *dst;
4302
4303             ms = unpack_32_1x64 (s);
4304
4305             *dst++ = pack_565_32_16 (
4306                 pack_1x64_32 (
4307                     over_rev_non_pre_1x64 (ms, expand565_16_1x64 (d))));
4308             w--;
4309         }
4310
4311         /* call prefetch hint to optimize cache load*/
4312         cache_prefetch ((__m128i*)src);
4313         cache_prefetch ((__m128i*)dst);
4314
4315         while (w >= 8)
4316         {
4317             /* fill cache line with next memory */
4318             cache_prefetch_next ((__m128i*)src);
4319             cache_prefetch_next ((__m128i*)dst);
4320
4321             /* First round */
4322             xmm_src = load_128_unaligned ((__m128i*)src);
4323             xmm_dst = load_128_aligned  ((__m128i*)dst);
4324
4325             opaque = is_opaque (xmm_src);
4326             zero = is_zero (xmm_src);
4327
4328             unpack_565_128_4x128 (xmm_dst,
4329                                   &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
4330             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
4331
4332             /* preload next round*/
4333             xmm_src = load_128_unaligned ((__m128i*)(src + 4));
4334
4335             if (opaque)
4336             {
4337                 invert_colors_2x128 (xmm_src_lo, xmm_src_hi,
4338                                      &xmm_dst0, &xmm_dst1);
4339             }
4340             else if (!zero)
4341             {
4342                 over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi,
4343                                         &xmm_dst0, &xmm_dst1);
4344             }
4345
4346             /* Second round */
4347             opaque = is_opaque (xmm_src);
4348             zero = is_zero (xmm_src);
4349
4350             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
4351
4352             if (opaque)
4353             {
4354                 invert_colors_2x128 (xmm_src_lo, xmm_src_hi,
4355                                      &xmm_dst2, &xmm_dst3);
4356             }
4357             else if (zero)
4358             {
4359                 over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi,
4360                                         &xmm_dst2, &xmm_dst3);
4361             }
4362
4363             save_128_aligned (
4364                 (__m128i*)dst, pack_565_4x128_128 (
4365                     &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
4366
4367             w -= 8;
4368             src += 8;
4369             dst += 8;
4370         }
4371
4372         while (w)
4373         {
4374             s = *src++;
4375             d = *dst;
4376
4377             ms = unpack_32_1x64 (s);
4378
4379             *dst++ = pack_565_32_16 (
4380                 pack_1x64_32 (
4381                     over_rev_non_pre_1x64 (ms, expand565_16_1x64 (d))));
4382             w--;
4383         }
4384     }
4385
4386     _mm_empty ();
4387 }
4388
4389 /* -------------------------------------------------------------------------
4390  * composite_over_pixbuf_8888
4391  */
4392
4393 static void
4394 sse2_composite_over_pixbuf_8888 (pixman_implementation_t *imp,
4395                                  pixman_op_t              op,
4396                                  pixman_image_t *         src_image,
4397                                  pixman_image_t *         mask_image,
4398                                  pixman_image_t *         dst_image,
4399                                  int32_t                  src_x,
4400                                  int32_t                  src_y,
4401                                  int32_t                  mask_x,
4402                                  int32_t                  mask_y,
4403                                  int32_t                  dest_x,
4404                                  int32_t                  dest_y,
4405                                  int32_t                  width,
4406                                  int32_t                  height)
4407 {
4408     uint32_t    *dst_line, *dst, d;
4409     uint32_t    *src_line, *src, s;
4410     int dst_stride, src_stride;
4411     uint16_t w;
4412     uint32_t opaque, zero;
4413
4414     __m128i xmm_src_lo, xmm_src_hi;
4415     __m128i xmm_dst_lo, xmm_dst_hi;
4416
4417     PIXMAN_IMAGE_GET_LINE (
4418         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
4419     PIXMAN_IMAGE_GET_LINE (
4420         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
4421
4422 #if 0
4423     /* FIXME
4424      *
4425      * I copy the code from MMX one and keep the fixme.
4426      * If it's a problem there, probably is a problem here.
4427      */
4428     assert (src_image->drawable == mask_image->drawable);
4429 #endif
4430
4431     while (height--)
4432     {
4433         dst = dst_line;
4434         dst_line += dst_stride;
4435         src = src_line;
4436         src_line += src_stride;
4437         w = width;
4438
4439         /* call prefetch hint to optimize cache load*/
4440         cache_prefetch ((__m128i*)src);
4441         cache_prefetch ((__m128i*)dst);
4442
4443         while (w && (unsigned long)dst & 15)
4444         {
4445             s = *src++;
4446             d = *dst;
4447
4448             *dst++ = pack_1x64_32 (
4449                 over_rev_non_pre_1x64 (
4450                     unpack_32_1x64 (s), unpack_32_1x64 (d)));
4451
4452             w--;
4453         }
4454
4455         /* call prefetch hint to optimize cache load*/
4456         cache_prefetch ((__m128i*)src);
4457         cache_prefetch ((__m128i*)dst);
4458
4459         while (w >= 4)
4460         {
4461             /* fill cache line with next memory */
4462             cache_prefetch_next ((__m128i*)src);
4463             cache_prefetch_next ((__m128i*)dst);
4464
4465             xmm_src_hi = load_128_unaligned ((__m128i*)src);
4466
4467             opaque = is_opaque (xmm_src_hi);
4468             zero = is_zero (xmm_src_hi);
4469
4470             unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
4471
4472             if (opaque)
4473             {
4474                 invert_colors_2x128 (xmm_src_lo, xmm_src_hi,
4475                                      &xmm_dst_lo, &xmm_dst_hi);
4476
4477                 save_128_aligned (
4478                     (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4479             }
4480             else if (!zero)
4481             {
4482                 xmm_dst_hi = load_128_aligned  ((__m128i*)dst);
4483
4484                 unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
4485
4486                 over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi,
4487                                         &xmm_dst_lo, &xmm_dst_hi);
4488
4489                 save_128_aligned (
4490                     (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4491             }
4492
4493             w -= 4;
4494             dst += 4;
4495             src += 4;
4496         }
4497
4498         while (w)
4499         {
4500             s = *src++;
4501             d = *dst;
4502
4503             *dst++ = pack_1x64_32 (
4504                 over_rev_non_pre_1x64 (
4505                     unpack_32_1x64 (s), unpack_32_1x64 (d)));
4506
4507             w--;
4508         }
4509     }
4510
4511     _mm_empty ();
4512 }
4513
4514 /* -------------------------------------------------------------------------------------------------
4515  * composite_over_n_8888_0565_ca
4516  */
4517
4518 static void
4519 sse2_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
4520                                     pixman_op_t              op,
4521                                     pixman_image_t *         src_image,
4522                                     pixman_image_t *         mask_image,
4523                                     pixman_image_t *         dst_image,
4524                                     int32_t                  src_x,
4525                                     int32_t                  src_y,
4526                                     int32_t                  mask_x,
4527                                     int32_t                  mask_y,
4528                                     int32_t                  dest_x,
4529                                     int32_t                  dest_y,
4530                                     int32_t                  width,
4531                                     int32_t                  height)
4532 {
4533     uint32_t src;
4534     uint16_t    *dst_line, *dst, d;
4535     uint32_t    *mask_line, *mask, m;
4536     int dst_stride, mask_stride;
4537     int w;
4538     uint32_t pack_cmp;
4539
4540     __m128i xmm_src, xmm_alpha;
4541     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
4542     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
4543
4544     __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
4545
4546     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
4547
4548     if (src == 0)
4549         return;
4550
4551     PIXMAN_IMAGE_GET_LINE (
4552         dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
4553     PIXMAN_IMAGE_GET_LINE (
4554         mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
4555
4556     xmm_src = expand_pixel_32_1x128 (src);
4557     xmm_alpha = expand_alpha_1x128 (xmm_src);
4558     mmx_src = _mm_movepi64_pi64 (xmm_src);
4559     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
4560
4561     while (height--)
4562     {
4563         w = width;
4564         mask = mask_line;
4565         dst = dst_line;
4566         mask_line += mask_stride;
4567         dst_line += dst_stride;
4568
4569         /* call prefetch hint to optimize cache load*/
4570         cache_prefetch ((__m128i*)mask);
4571         cache_prefetch ((__m128i*)dst);
4572
4573         while (w && ((unsigned long)dst & 15))
4574         {
4575             m = *(uint32_t *) mask;
4576
4577             if (m)
4578             {
4579                 d = *dst;
4580                 mmx_mask = unpack_32_1x64 (m);
4581                 mmx_dest = expand565_16_1x64 (d);
4582
4583                 *dst = pack_565_32_16 (
4584                     pack_1x64_32 (
4585                         in_over_1x64 (
4586                             &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)));
4587             }
4588
4589             w--;
4590             dst++;
4591             mask++;
4592         }
4593
4594         /* call prefetch hint to optimize cache load*/
4595         cache_prefetch ((__m128i*)mask);
4596         cache_prefetch ((__m128i*)dst);
4597
4598         while (w >= 8)
4599         {
4600             /* fill cache line with next memory */
4601             cache_prefetch_next ((__m128i*)mask);
4602             cache_prefetch_next ((__m128i*)dst);
4603
4604             /* First round */
4605             xmm_mask = load_128_unaligned ((__m128i*)mask);
4606             xmm_dst = load_128_aligned ((__m128i*)dst);
4607
4608             pack_cmp = _mm_movemask_epi8 (
4609                 _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ()));
4610
4611             unpack_565_128_4x128 (xmm_dst,
4612                                   &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
4613             unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4614
4615             /* preload next round */
4616             xmm_mask = load_128_unaligned ((__m128i*)(mask + 4));
4617
4618             /* preload next round */
4619             if (pack_cmp != 0xffff)
4620             {
4621                 in_over_2x128 (&xmm_src, &xmm_src,
4622                                &xmm_alpha, &xmm_alpha,
4623                                &xmm_mask_lo, &xmm_mask_hi,
4624                                &xmm_dst0, &xmm_dst1);
4625             }
4626
4627             /* Second round */
4628             pack_cmp = _mm_movemask_epi8 (
4629                 _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ()));
4630
4631             unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4632
4633             if (pack_cmp != 0xffff)
4634             {
4635                 in_over_2x128 (&xmm_src, &xmm_src,
4636                                &xmm_alpha, &xmm_alpha,
4637                                &xmm_mask_lo, &xmm_mask_hi,
4638                                &xmm_dst2, &xmm_dst3);
4639             }
4640
4641             save_128_aligned (
4642                 (__m128i*)dst, pack_565_4x128_128 (
4643                     &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
4644
4645             w -= 8;
4646             dst += 8;
4647             mask += 8;
4648         }
4649
4650         while (w)
4651         {
4652             m = *(uint32_t *) mask;
4653
4654             if (m)
4655             {
4656                 d = *dst;
4657                 mmx_mask = unpack_32_1x64 (m);
4658                 mmx_dest = expand565_16_1x64 (d);
4659
4660                 *dst = pack_565_32_16 (
4661                     pack_1x64_32 (
4662                         in_over_1x64 (
4663                             &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)));
4664             }
4665
4666             w--;
4667             dst++;
4668             mask++;
4669         }
4670     }
4671
4672     _mm_empty ();
4673 }
4674
4675 /* -----------------------------------------------------------------------
4676  * composite_in_n_8_8
4677  */
4678
4679 static void
4680 sse2_composite_in_n_8_8 (pixman_implementation_t *imp,
4681                          pixman_op_t              op,
4682                          pixman_image_t *         src_image,
4683                          pixman_image_t *         mask_image,
4684                          pixman_image_t *         dst_image,
4685                          int32_t                  src_x,
4686                          int32_t                  src_y,
4687                          int32_t                  mask_x,
4688                          int32_t                  mask_y,
4689                          int32_t                  dest_x,
4690                          int32_t                  dest_y,
4691                          int32_t                  width,
4692                          int32_t                  height)
4693 {
4694     uint8_t     *dst_line, *dst;
4695     uint8_t     *mask_line, *mask;
4696     int dst_stride, mask_stride;
4697     uint16_t w, d, m;
4698     uint32_t src;
4699     uint8_t sa;
4700
4701     __m128i xmm_alpha;
4702     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
4703     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
4704
4705     PIXMAN_IMAGE_GET_LINE (
4706         dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
4707     PIXMAN_IMAGE_GET_LINE (
4708         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
4709
4710     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
4711
4712     sa = src >> 24;
4713     if (sa == 0)
4714         return;
4715
4716     xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src));
4717
4718     while (height--)
4719     {
4720         dst = dst_line;
4721         dst_line += dst_stride;
4722         mask = mask_line;
4723         mask_line += mask_stride;
4724         w = width;
4725
4726         /* call prefetch hint to optimize cache load*/
4727         cache_prefetch ((__m128i*)mask);
4728         cache_prefetch ((__m128i*)dst);
4729
4730         while (w && ((unsigned long)dst & 15))
4731         {
4732             m = (uint32_t) *mask++;
4733             d = (uint32_t) *dst;
4734
4735             *dst++ = (uint8_t) pack_1x64_32 (
4736                 pix_multiply_1x64 (
4737                     pix_multiply_1x64 (_mm_movepi64_pi64 (xmm_alpha),
4738                                        unpack_32_1x64 (m)),
4739                     unpack_32_1x64 (d)));
4740             w--;
4741         }
4742
4743         /* call prefetch hint to optimize cache load*/
4744         cache_prefetch ((__m128i*)mask);
4745         cache_prefetch ((__m128i*)dst);
4746
4747         while (w >= 16)
4748         {
4749             /* fill cache line with next memory */
4750             cache_prefetch_next ((__m128i*)mask);
4751             cache_prefetch_next ((__m128i*)dst);
4752
4753             xmm_mask = load_128_unaligned ((__m128i*)mask);
4754             xmm_dst = load_128_aligned ((__m128i*)dst);
4755
4756             unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4757             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
4758
4759             pix_multiply_2x128 (&xmm_alpha, &xmm_alpha,
4760                                 &xmm_mask_lo, &xmm_mask_hi,
4761                                 &xmm_mask_lo, &xmm_mask_hi);
4762
4763             pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
4764                                 &xmm_dst_lo, &xmm_dst_hi,
4765                                 &xmm_dst_lo, &xmm_dst_hi);
4766
4767             save_128_aligned (
4768                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4769
4770             mask += 16;
4771             dst += 16;
4772             w -= 16;
4773         }
4774
4775         while (w)
4776         {
4777             m = (uint32_t) *mask++;
4778             d = (uint32_t) *dst;
4779
4780             *dst++ = (uint8_t) pack_1x64_32 (
4781                 pix_multiply_1x64 (
4782                     pix_multiply_1x64 (
4783                         _mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
4784                     unpack_32_1x64 (d)));
4785             w--;
4786         }
4787     }
4788
4789     _mm_empty ();
4790 }
4791
4792 /* ---------------------------------------------------------------------------
4793  * composite_in_8_8
4794  */
4795
4796 static void
4797 sse2_composite_in_8_8 (pixman_implementation_t *imp,
4798                        pixman_op_t              op,
4799                        pixman_image_t *         src_image,
4800                        pixman_image_t *         mask_image,
4801                        pixman_image_t *         dst_image,
4802                        int32_t                  src_x,
4803                        int32_t                  src_y,
4804                        int32_t                  mask_x,
4805                        int32_t                  mask_y,
4806                        int32_t                  dest_x,
4807                        int32_t                  dest_y,
4808                        int32_t                  width,
4809                        int32_t                  height)
4810 {
4811     uint8_t     *dst_line, *dst;
4812     uint8_t     *src_line, *src;
4813     int src_stride, dst_stride;
4814     uint16_t w;
4815     uint32_t s, d;
4816
4817     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
4818     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
4819
4820     PIXMAN_IMAGE_GET_LINE (
4821         dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
4822     PIXMAN_IMAGE_GET_LINE (
4823         src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
4824
4825     while (height--)
4826     {
4827         dst = dst_line;
4828         dst_line += dst_stride;
4829         src = src_line;
4830         src_line += src_stride;
4831         w = width;
4832
4833         /* call prefetch hint to optimize cache load*/
4834         cache_prefetch ((__m128i*)src);
4835         cache_prefetch ((__m128i*)dst);
4836
4837         while (w && ((unsigned long)dst & 15))
4838         {
4839             s = (uint32_t) *src++;
4840             d = (uint32_t) *dst;
4841
4842             *dst++ = (uint8_t) pack_1x64_32 (
4843                 pix_multiply_1x64 (
4844                     unpack_32_1x64 (s), unpack_32_1x64 (d)));
4845             w--;
4846         }
4847
4848         /* call prefetch hint to optimize cache load*/
4849         cache_prefetch ((__m128i*)src);
4850         cache_prefetch ((__m128i*)dst);
4851
4852         while (w >= 16)
4853         {
4854             /* fill cache line with next memory */
4855             cache_prefetch_next ((__m128i*)src);
4856             cache_prefetch_next ((__m128i*)dst);
4857
4858             xmm_src = load_128_unaligned ((__m128i*)src);
4859             xmm_dst = load_128_aligned ((__m128i*)dst);
4860
4861             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
4862             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
4863
4864             pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
4865                                 &xmm_dst_lo, &xmm_dst_hi,
4866                                 &xmm_dst_lo, &xmm_dst_hi);
4867
4868             save_128_aligned (
4869                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4870
4871             src += 16;
4872             dst += 16;
4873             w -= 16;
4874         }
4875
4876         while (w)
4877         {
4878             s = (uint32_t) *src++;
4879             d = (uint32_t) *dst;
4880
4881             *dst++ = (uint8_t) pack_1x64_32 (
4882                 pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (d)));
4883             w--;
4884         }
4885     }
4886
4887     _mm_empty ();
4888 }
4889
4890 /* -------------------------------------------------------------------------
4891  * composite_add_8888_8_8
4892  */
4893
4894 static void
4895 sse2_composite_add_8888_8_8 (pixman_implementation_t *imp,
4896                              pixman_op_t              op,
4897                              pixman_image_t *         src_image,
4898                              pixman_image_t *         mask_image,
4899                              pixman_image_t *         dst_image,
4900                              int32_t                  src_x,
4901                              int32_t                  src_y,
4902                              int32_t                  mask_x,
4903                              int32_t                  mask_y,
4904                              int32_t                  dest_x,
4905                              int32_t                  dest_y,
4906                              int32_t                  width,
4907                              int32_t                  height)
4908 {
4909     uint8_t     *dst_line, *dst;
4910     uint8_t     *mask_line, *mask;
4911     int dst_stride, mask_stride;
4912     uint16_t w;
4913     uint32_t src;
4914     uint8_t sa;
4915     uint32_t m, d;
4916
4917     __m128i xmm_alpha;
4918     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
4919     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
4920
4921     PIXMAN_IMAGE_GET_LINE (
4922         dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
4923     PIXMAN_IMAGE_GET_LINE (
4924         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
4925
4926     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
4927
4928     sa = src >> 24;
4929     if (sa == 0)
4930         return;
4931
4932     xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src));
4933
4934     while (height--)
4935     {
4936         dst = dst_line;
4937         dst_line += dst_stride;
4938         mask = mask_line;
4939         mask_line += mask_stride;
4940         w = width;
4941
4942         /* call prefetch hint to optimize cache load*/
4943         cache_prefetch ((__m128i*)mask);
4944         cache_prefetch ((__m128i*)dst);
4945
4946         while (w && ((unsigned long)dst & 15))
4947         {
4948             m = (uint32_t) *mask++;
4949             d = (uint32_t) *dst;
4950
4951             *dst++ = (uint8_t) pack_1x64_32 (
4952                 _mm_adds_pu16 (
4953                     pix_multiply_1x64 (
4954                         _mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
4955                     unpack_32_1x64 (d)));
4956             w--;
4957         }
4958
4959         /* call prefetch hint to optimize cache load*/
4960         cache_prefetch ((__m128i*)mask);
4961         cache_prefetch ((__m128i*)dst);
4962
4963         while (w >= 16)
4964         {
4965             /* fill cache line with next memory */
4966             cache_prefetch_next ((__m128i*)mask);
4967             cache_prefetch_next ((__m128i*)dst);
4968
4969             xmm_mask = load_128_unaligned ((__m128i*)mask);
4970             xmm_dst = load_128_aligned ((__m128i*)dst);
4971
4972             unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4973             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
4974
4975             pix_multiply_2x128 (&xmm_alpha, &xmm_alpha,
4976                                 &xmm_mask_lo, &xmm_mask_hi,
4977                                 &xmm_mask_lo, &xmm_mask_hi);
4978
4979             xmm_dst_lo = _mm_adds_epu16 (xmm_mask_lo, xmm_dst_lo);
4980             xmm_dst_hi = _mm_adds_epu16 (xmm_mask_hi, xmm_dst_hi);
4981
4982             save_128_aligned (
4983                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4984
4985             mask += 16;
4986             dst += 16;
4987             w -= 16;
4988         }
4989
4990         while (w)
4991         {
4992             m = (uint32_t) *mask++;
4993             d = (uint32_t) *dst;
4994
4995             *dst++ = (uint8_t) pack_1x64_32 (
4996                 _mm_adds_pu16 (
4997                     pix_multiply_1x64 (
4998                         _mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
4999                     unpack_32_1x64 (d)));
5000
5001             w--;
5002         }
5003     }
5004
5005     _mm_empty ();
5006 }
5007
5008 /* ----------------------------------------------------------------------
5009  * composite_add_8000_8000
5010  */
5011
5012 static void
5013 sse2_composite_add_8000_8000 (pixman_implementation_t *imp,
5014                               pixman_op_t              op,
5015                               pixman_image_t *         src_image,
5016                               pixman_image_t *         mask_image,
5017                               pixman_image_t *         dst_image,
5018                               int32_t                  src_x,
5019                               int32_t                  src_y,
5020                               int32_t                  mask_x,
5021                               int32_t                  mask_y,
5022                               int32_t                  dest_x,
5023                               int32_t                  dest_y,
5024                               int32_t                  width,
5025                               int32_t                  height)
5026 {
5027     uint8_t     *dst_line, *dst;
5028     uint8_t     *src_line, *src;
5029     int dst_stride, src_stride;
5030     uint16_t w;
5031     uint16_t t;
5032
5033     PIXMAN_IMAGE_GET_LINE (
5034         src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
5035     PIXMAN_IMAGE_GET_LINE (
5036         dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
5037
5038     while (height--)
5039     {
5040         dst = dst_line;
5041         src = src_line;
5042
5043         /* call prefetch hint to optimize cache load*/
5044         cache_prefetch ((__m128i*)src);
5045         cache_prefetch ((__m128i*)dst);
5046
5047         dst_line += dst_stride;
5048         src_line += src_stride;
5049         w = width;
5050
5051         /* Small head */
5052         while (w && (unsigned long)dst & 3)
5053         {
5054             t = (*dst) + (*src++);
5055             *dst++ = t | (0 - (t >> 8));
5056             w--;
5057         }
5058
5059         core_combine_add_u_sse2 ((uint32_t*)dst, (uint32_t*)src, NULL, w >> 2);
5060
5061         /* Small tail */
5062         dst += w & 0xfffc;
5063         src += w & 0xfffc;
5064
5065         w &= 3;
5066
5067         while (w)
5068         {
5069             t = (*dst) + (*src++);
5070             *dst++ = t | (0 - (t >> 8));
5071             w--;
5072         }
5073     }
5074
5075     _mm_empty ();
5076 }
5077
5078 /* ---------------------------------------------------------------------
5079  * composite_add_8888_8888
5080  */
5081 static void
5082 sse2_composite_add_8888_8888 (pixman_implementation_t *imp,
5083                               pixman_op_t              op,
5084                               pixman_image_t *         src_image,
5085                               pixman_image_t *         mask_image,
5086                               pixman_image_t *         dst_image,
5087                               int32_t                  src_x,
5088                               int32_t                  src_y,
5089                               int32_t                  mask_x,
5090                               int32_t                  mask_y,
5091                               int32_t                  dest_x,
5092                               int32_t                  dest_y,
5093                               int32_t                  width,
5094                               int32_t                  height)
5095 {
5096     uint32_t    *dst_line, *dst;
5097     uint32_t    *src_line, *src;
5098     int dst_stride, src_stride;
5099
5100     PIXMAN_IMAGE_GET_LINE (
5101         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
5102     PIXMAN_IMAGE_GET_LINE (
5103         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
5104
5105     while (height--)
5106     {
5107         dst = dst_line;
5108         dst_line += dst_stride;
5109         src = src_line;
5110         src_line += src_stride;
5111
5112         core_combine_add_u_sse2 (dst, src, NULL, width);
5113     }
5114
5115     _mm_empty ();
5116 }
5117
5118 /* -------------------------------------------------------------------------------------------------
5119  * sse2_composite_copy_area
5120  */
5121
5122 static pixman_bool_t
5123 pixman_blt_sse2 (uint32_t *src_bits,
5124                  uint32_t *dst_bits,
5125                  int       src_stride,
5126                  int       dst_stride,
5127                  int       src_bpp,
5128                  int       dst_bpp,
5129                  int       src_x,
5130                  int       src_y,
5131                  int       dst_x,
5132                  int       dst_y,
5133                  int       width,
5134                  int       height)
5135 {
5136     uint8_t *   src_bytes;
5137     uint8_t *   dst_bytes;
5138     int byte_width;
5139
5140     if (src_bpp != dst_bpp)
5141         return FALSE;
5142
5143     if (src_bpp == 16)
5144     {
5145         src_stride = src_stride * (int) sizeof (uint32_t) / 2;
5146         dst_stride = dst_stride * (int) sizeof (uint32_t) / 2;
5147         src_bytes =(uint8_t *)(((uint16_t *)src_bits) + src_stride * (src_y) + (src_x));
5148         dst_bytes = (uint8_t *)(((uint16_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
5149         byte_width = 2 * width;
5150         src_stride *= 2;
5151         dst_stride *= 2;
5152     }
5153     else if (src_bpp == 32)
5154     {
5155         src_stride = src_stride * (int) sizeof (uint32_t) / 4;
5156         dst_stride = dst_stride * (int) sizeof (uint32_t) / 4;
5157         src_bytes = (uint8_t *)(((uint32_t *)src_bits) + src_stride * (src_y) + (src_x));
5158         dst_bytes = (uint8_t *)(((uint32_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
5159         byte_width = 4 * width;
5160         src_stride *= 4;
5161         dst_stride *= 4;
5162     }
5163     else
5164     {
5165         return FALSE;
5166     }
5167
5168     cache_prefetch ((__m128i*)src_bytes);
5169     cache_prefetch ((__m128i*)dst_bytes);
5170
5171     while (height--)
5172     {
5173         int w;
5174         uint8_t *s = src_bytes;
5175         uint8_t *d = dst_bytes;
5176         src_bytes += src_stride;
5177         dst_bytes += dst_stride;
5178         w = byte_width;
5179
5180         cache_prefetch_next ((__m128i*)s);
5181         cache_prefetch_next ((__m128i*)d);
5182
5183         while (w >= 2 && ((unsigned long)d & 3))
5184         {
5185             *(uint16_t *)d = *(uint16_t *)s;
5186             w -= 2;
5187             s += 2;
5188             d += 2;
5189         }
5190
5191         while (w >= 4 && ((unsigned long)d & 15))
5192         {
5193             *(uint32_t *)d = *(uint32_t *)s;
5194
5195             w -= 4;
5196             s += 4;
5197             d += 4;
5198         }
5199
5200         cache_prefetch_next ((__m128i*)s);
5201         cache_prefetch_next ((__m128i*)d);
5202
5203         while (w >= 64)
5204         {
5205             __m128i xmm0, xmm1, xmm2, xmm3;
5206
5207             /* 128 bytes ahead */
5208             cache_prefetch (((__m128i*)s) + 8);
5209             cache_prefetch (((__m128i*)d) + 8);
5210
5211             xmm0 = load_128_unaligned ((__m128i*)(s));
5212             xmm1 = load_128_unaligned ((__m128i*)(s + 16));
5213             xmm2 = load_128_unaligned ((__m128i*)(s + 32));
5214             xmm3 = load_128_unaligned ((__m128i*)(s + 48));
5215
5216             save_128_aligned ((__m128i*)(d),    xmm0);
5217             save_128_aligned ((__m128i*)(d + 16), xmm1);
5218             save_128_aligned ((__m128i*)(d + 32), xmm2);
5219             save_128_aligned ((__m128i*)(d + 48), xmm3);
5220
5221             s += 64;
5222             d += 64;
5223             w -= 64;
5224         }
5225
5226         cache_prefetch_next ((__m128i*)s);
5227         cache_prefetch_next ((__m128i*)d);
5228
5229         while (w >= 16)
5230         {
5231             save_128_aligned ((__m128i*)d, load_128_unaligned ((__m128i*)s) );
5232
5233             w -= 16;
5234             d += 16;
5235             s += 16;
5236         }
5237
5238         cache_prefetch_next ((__m128i*)s);
5239         cache_prefetch_next ((__m128i*)d);
5240
5241         while (w >= 4)
5242         {
5243             *(uint32_t *)d = *(uint32_t *)s;
5244
5245             w -= 4;
5246             s += 4;
5247             d += 4;
5248         }
5249
5250         if (w >= 2)
5251         {
5252             *(uint16_t *)d = *(uint16_t *)s;
5253             w -= 2;
5254             s += 2;
5255             d += 2;
5256         }
5257     }
5258
5259     _mm_empty ();
5260
5261     return TRUE;
5262 }
5263
5264 static void
5265 sse2_composite_copy_area (pixman_implementation_t *imp,
5266                           pixman_op_t              op,
5267                           pixman_image_t *         src_image,
5268                           pixman_image_t *         mask_image,
5269                           pixman_image_t *         dst_image,
5270                           int32_t                  src_x,
5271                           int32_t                  src_y,
5272                           int32_t                  mask_x,
5273                           int32_t                  mask_y,
5274                           int32_t                  dest_x,
5275                           int32_t                  dest_y,
5276                           int32_t                  width,
5277                           int32_t                  height)
5278 {
5279     pixman_blt_sse2 (src_image->bits.bits,
5280                      dst_image->bits.bits,
5281                      src_image->bits.rowstride,
5282                      dst_image->bits.rowstride,
5283                      PIXMAN_FORMAT_BPP (src_image->bits.format),
5284                      PIXMAN_FORMAT_BPP (dst_image->bits.format),
5285                      src_x, src_y, dest_x, dest_y, width, height);
5286 }
5287
5288 #if 0
5289 /* This code are buggy in MMX version, now the bug was translated to SSE2 version */
5290 void
5291 sse2_composite_over_x888_8_8888 (pixman_implementation_t *imp,
5292                                  pixman_op_t              op,
5293                                  pixman_image_t *         src_image,
5294                                  pixman_image_t *         mask_image,
5295                                  pixman_image_t *         dst_image,
5296                                  int32_t                  src_x,
5297                                  int32_t                  src_y,
5298                                  int32_t                  mask_x,
5299                                  int32_t                  mask_y,
5300                                  int32_t                  dest_x,
5301                                  int32_t                  dest_y,
5302                                  int32_t                  width,
5303                                  int32_t                  height)
5304 {
5305     uint32_t    *src, *src_line, s;
5306     uint32_t    *dst, *dst_line, d;
5307     uint8_t         *mask, *mask_line;
5308     uint32_t m;
5309     int src_stride, mask_stride, dst_stride;
5310     uint16_t w;
5311
5312     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
5313     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
5314     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
5315
5316     PIXMAN_IMAGE_GET_LINE (
5317         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
5318     PIXMAN_IMAGE_GET_LINE (
5319         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
5320     PIXMAN_IMAGE_GET_LINE (
5321         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
5322
5323     while (height--)
5324     {
5325         src = src_line;
5326         src_line += src_stride;
5327         dst = dst_line;
5328         dst_line += dst_stride;
5329         mask = mask_line;
5330         mask_line += mask_stride;
5331
5332         w = width;
5333
5334         /* call prefetch hint to optimize cache load*/
5335         cache_prefetch ((__m128i*)src);
5336         cache_prefetch ((__m128i*)dst);
5337         cache_prefetch ((__m128i*)mask);
5338
5339         while (w && (unsigned long)dst & 15)
5340         {
5341             s = 0xff000000 | *src++;
5342             m = (uint32_t) *mask++;
5343             d = *dst;
5344
5345             __m64 ms = unpack_32_1x64 (s);
5346
5347             if (m != 0xff)
5348             {
5349                 ms = in_over_1x64 (ms,
5350                                    mask_x00ff,
5351                                    expand_alpha_rev_1x64 (unpack_32_1x64 (m)),
5352                                    unpack_32_1x64 (d));
5353             }
5354
5355             *dst++ = pack_1x64_32 (ms);
5356             w--;
5357         }
5358
5359         /* call prefetch hint to optimize cache load*/
5360         cache_prefetch ((__m128i*)src);
5361         cache_prefetch ((__m128i*)dst);
5362         cache_prefetch ((__m128i*)mask);
5363
5364         while (w >= 4)
5365         {
5366             /* fill cache line with next memory */
5367             cache_prefetch_next ((__m128i*)src);
5368             cache_prefetch_next ((__m128i*)dst);
5369             cache_prefetch_next ((__m128i*)mask);
5370
5371             m = *(uint32_t*) mask;
5372             xmm_src = _mm_or_si128 (load_128_unaligned ((__m128i*)src), mask_ff000000);
5373
5374             if (m == 0xffffffff)
5375             {
5376                 save_128_aligned ((__m128i*)dst, xmm_src);
5377             }
5378             else
5379             {
5380                 xmm_dst = load_128_aligned ((__m128i*)dst);
5381
5382                 xmm_mask = _mm_unpacklo_epi16 (
5383                     unpack_32_1x128 (m), _mm_setzero_si128 ());
5384
5385                 unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
5386                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
5387                 unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
5388
5389                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
5390                                         &xmm_mask_lo, &xmm_mask_hi);
5391
5392                 in_over_2x128 (xmm_src_lo, xmm_src_hi,
5393                                mask_00ff, mask_00ff,
5394                                xmm_mask_lo, xmm_mask_hi,
5395                                &xmm_dst_lo, &xmm_dst_hi);
5396
5397                 save_128_aligned (
5398                     (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
5399             }
5400
5401             src += 4;
5402             dst += 4;
5403             mask += 4;
5404             w -= 4;
5405         }
5406
5407         while (w)
5408         {
5409             m = (uint32_t) *mask++;
5410
5411             if (m)
5412             {
5413                 s = 0xff000000 | *src;
5414
5415                 if (m == 0xff)
5416                 {
5417                     *dst = s;
5418                 }
5419                 else
5420                 {
5421                     d = *dst;
5422
5423                     *dst = pack_1x64_32 (
5424                         in_over_1x64 (
5425                             unpack_32_1x64 (s),
5426                             mask_x00ff,
5427                             expand_alpha_rev_1x64 (unpack_32_1x64 (m)),
5428                             unpack_32_1x64 (d)));
5429                 }
5430
5431             }
5432
5433             src++;
5434             dst++;
5435             w--;
5436         }
5437     }
5438
5439     _mm_empty ();
5440 }
5441
5442 #endif
5443
5444 static const pixman_fast_path_t sse2_fast_paths[] =
5445 {
5446     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   sse2_composite_over_n_8_0565,       0 },
5447     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   sse2_composite_over_n_8_0565,       0 },
5448     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_over_n_8888,         0 },
5449     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_over_n_8888,         0 },
5450     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   sse2_composite_over_n_0565,         0 },
5451     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_over_8888_8888,      0 },
5452     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_over_8888_8888,      0 },
5453     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_composite_over_8888_8888,      0 },
5454     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_over_8888_8888,      0 },
5455     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   sse2_composite_over_8888_0565,      0 },
5456     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   sse2_composite_over_8888_0565,      0 },
5457     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_n_8_8888,       0 },
5458     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_n_8_8888,       0 },
5459     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_over_n_8_8888,       0 },
5460     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_n_8_8888,       0 },
5461 #if 0
5462     /* FIXME: This code are buggy in MMX version, now the bug was translated to SSE2 version */
5463     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_x888_8_8888,    0 },
5464     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_x888_8_8888,    0 },
5465     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_x888_8_8888,    0 },
5466     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_x888_8_8888,    0 },
5467 #endif
5468     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_x888_n_8888,    NEED_SOLID_MASK },
5469     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_x888_n_8888,    NEED_SOLID_MASK },
5470     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_over_x888_n_8888,    NEED_SOLID_MASK },
5471     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_x888_n_8888,    NEED_SOLID_MASK },
5472     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_8888_n_8888,    NEED_SOLID_MASK },
5473     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_8888_n_8888,    NEED_SOLID_MASK },
5474     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_over_8888_n_8888,    NEED_SOLID_MASK },
5475     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_8888_n_8888,    NEED_SOLID_MASK },
5476     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
5477     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
5478     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
5479     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
5480     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   sse2_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
5481     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   sse2_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
5482     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5483     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5484     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5485     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5486     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5487     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5488     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5489     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5490     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   sse2_composite_over_pixbuf_0565,    NEED_PIXBUF },
5491     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5,   sse2_composite_over_pixbuf_0565,    NEED_PIXBUF },
5492     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5,   sse2_composite_over_pixbuf_0565,    NEED_PIXBUF },
5493     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   sse2_composite_over_pixbuf_0565,    NEED_PIXBUF },
5494     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_copy_area,           0 },
5495     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_copy_area,           0 },
5496
5497     { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       sse2_composite_add_8000_8000,       0 },
5498     { PIXMAN_OP_ADD,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_add_8888_8888,       0 },
5499     { PIXMAN_OP_ADD,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_composite_add_8888_8888,       0 },
5500     { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       sse2_composite_add_8888_8_8,        0 },
5501
5502     { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_src_n_8_8888,        0 },
5503     { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_src_n_8_8888,        0 },
5504     { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_src_n_8_8888,        0 },
5505     { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_src_n_8_8888,        0 },
5506     { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_copy_area,           0 },
5507     { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_composite_copy_area,           0 },
5508     { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_copy_area,           0 },
5509     { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_copy_area,           0 },
5510     { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_copy_area,           0 },
5511     { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_copy_area,           0 },
5512     { PIXMAN_OP_SRC, PIXMAN_r5g6b5,    PIXMAN_null,     PIXMAN_r5g6b5,   sse2_composite_copy_area,           0 },
5513     { PIXMAN_OP_SRC, PIXMAN_b5g6r5,    PIXMAN_null,     PIXMAN_b5g6r5,   sse2_composite_copy_area,           0 },
5514
5515     { PIXMAN_OP_IN,  PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       sse2_composite_in_8_8,              0 },
5516     { PIXMAN_OP_IN,  PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8,       sse2_composite_in_n_8_8,            0 },
5517
5518     { PIXMAN_OP_NONE },
5519 };
5520
5521 /*
5522  * Work around GCC bug causing crashes in Mozilla with SSE2
5523  *
5524  * When using -msse, gcc generates movdqa instructions assuming that
5525  * the stack is 16 byte aligned. Unfortunately some applications, such
5526  * as Mozilla and Mono, end up aligning the stack to 4 bytes, which
5527  * causes the movdqa instructions to fail.
5528  *
5529  * The __force_align_arg_pointer__ makes gcc generate a prologue that
5530  * realigns the stack pointer to 16 bytes.
5531  *
5532  * On x86-64 this is not necessary because the standard ABI already
5533  * calls for a 16 byte aligned stack.
5534  *
5535  * See https://bugs.freedesktop.org/show_bug.cgi?id=15693
5536  */
5537 #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
5538 __attribute__((__force_align_arg_pointer__))
5539 #endif
5540 static void
5541 sse2_composite (pixman_implementation_t *imp,
5542                 pixman_op_t              op,
5543                 pixman_image_t *         src,
5544                 pixman_image_t *         mask,
5545                 pixman_image_t *         dest,
5546                 int32_t                  src_x,
5547                 int32_t                  src_y,
5548                 int32_t                  mask_x,
5549                 int32_t                  mask_y,
5550                 int32_t                  dest_x,
5551                 int32_t                  dest_y,
5552                 int32_t                  width,
5553                 int32_t                  height)
5554 {
5555     if (_pixman_run_fast_path (sse2_fast_paths, imp,
5556                                op, src, mask, dest,
5557                                src_x, src_y,
5558                                mask_x, mask_y,
5559                                dest_x, dest_y,
5560                                width, height))
5561     {
5562         return;
5563     }
5564
5565     _pixman_implementation_composite (imp->delegate, op,
5566                                       src, mask, dest,
5567                                       src_x, src_y,
5568                                       mask_x, mask_y,
5569                                       dest_x, dest_y,
5570                                       width, height);
5571 }
5572
5573 #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
5574 __attribute__((__force_align_arg_pointer__))
5575 #endif
5576 static pixman_bool_t
5577 sse2_blt (pixman_implementation_t *imp,
5578           uint32_t *               src_bits,
5579           uint32_t *               dst_bits,
5580           int                      src_stride,
5581           int                      dst_stride,
5582           int                      src_bpp,
5583           int                      dst_bpp,
5584           int                      src_x,
5585           int                      src_y,
5586           int                      dst_x,
5587           int                      dst_y,
5588           int                      width,
5589           int                      height)
5590 {
5591     if (!pixman_blt_sse2 (
5592             src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
5593             src_x, src_y, dst_x, dst_y, width, height))
5594
5595     {
5596         return _pixman_implementation_blt (
5597             imp->delegate,
5598             src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
5599             src_x, src_y, dst_x, dst_y, width, height);
5600     }
5601
5602     return TRUE;
5603 }
5604
5605 #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
5606 __attribute__((__force_align_arg_pointer__))
5607 #endif
5608 static pixman_bool_t
5609 sse2_fill (pixman_implementation_t *imp,
5610            uint32_t *               bits,
5611            int                      stride,
5612            int                      bpp,
5613            int                      x,
5614            int                      y,
5615            int                      width,
5616            int                      height,
5617            uint32_t xor)
5618 {
5619     if (!pixman_fill_sse2 (bits, stride, bpp, x, y, width, height, xor))
5620     {
5621         return _pixman_implementation_fill (
5622             imp->delegate, bits, stride, bpp, x, y, width, height, xor);
5623     }
5624
5625     return TRUE;
5626 }
5627
5628 pixman_implementation_t *
5629 _pixman_implementation_create_sse2 (void)
5630 {
5631     pixman_implementation_t *mmx = _pixman_implementation_create_mmx ();
5632     pixman_implementation_t *imp = _pixman_implementation_create (mmx);
5633
5634     /* SSE2 constants */
5635     mask_565_r  = create_mask_2x32_128 (0x00f80000, 0x00f80000);
5636     mask_565_g1 = create_mask_2x32_128 (0x00070000, 0x00070000);
5637     mask_565_g2 = create_mask_2x32_128 (0x000000e0, 0x000000e0);
5638     mask_565_b  = create_mask_2x32_128 (0x0000001f, 0x0000001f);
5639     mask_red   = create_mask_2x32_128 (0x00f80000, 0x00f80000);
5640     mask_green = create_mask_2x32_128 (0x0000fc00, 0x0000fc00);
5641     mask_blue  = create_mask_2x32_128 (0x000000f8, 0x000000f8);
5642     mask_565_fix_rb = create_mask_2x32_128 (0x00e000e0, 0x00e000e0);
5643     mask_565_fix_g = create_mask_2x32_128  (0x0000c000, 0x0000c000);
5644     mask_0080 = create_mask_16_128 (0x0080);
5645     mask_00ff = create_mask_16_128 (0x00ff);
5646     mask_0101 = create_mask_16_128 (0x0101);
5647     mask_ffff = create_mask_16_128 (0xffff);
5648     mask_ff000000 = create_mask_2x32_128 (0xff000000, 0xff000000);
5649     mask_alpha = create_mask_2x32_128 (0x00ff0000, 0x00000000);
5650
5651     /* MMX constants */
5652     mask_x565_rgb = create_mask_2x32_64 (0x000001f0, 0x003f001f);
5653     mask_x565_unpack = create_mask_2x32_64 (0x00000084, 0x04100840);
5654
5655     mask_x0080 = create_mask_16_64 (0x0080);
5656     mask_x00ff = create_mask_16_64 (0x00ff);
5657     mask_x0101 = create_mask_16_64 (0x0101);
5658     mask_x_alpha = create_mask_2x32_64 (0x00ff0000, 0x00000000);
5659
5660     _mm_empty ();
5661
5662     /* Set up function pointers */
5663
5664     /* SSE code patch for fbcompose.c */
5665     imp->combine_32[PIXMAN_OP_OVER] = sse2_combine_over_u;
5666     imp->combine_32[PIXMAN_OP_OVER_REVERSE] = sse2_combine_over_reverse_u;
5667     imp->combine_32[PIXMAN_OP_IN] = sse2_combine_in_u;
5668     imp->combine_32[PIXMAN_OP_IN_REVERSE] = sse2_combine_in_reverse_u;
5669     imp->combine_32[PIXMAN_OP_OUT] = sse2_combine_out_u;
5670     imp->combine_32[PIXMAN_OP_OUT_REVERSE] = sse2_combine_out_reverse_u;
5671     imp->combine_32[PIXMAN_OP_ATOP] = sse2_combine_atop_u;
5672     imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = sse2_combine_atop_reverse_u;
5673     imp->combine_32[PIXMAN_OP_XOR] = sse2_combine_xor_u;
5674     imp->combine_32[PIXMAN_OP_ADD] = sse2_combine_add_u;
5675
5676     imp->combine_32[PIXMAN_OP_SATURATE] = sse2_combine_saturate_u;
5677
5678     imp->combine_32_ca[PIXMAN_OP_SRC] = sse2_combine_src_ca;
5679     imp->combine_32_ca[PIXMAN_OP_OVER] = sse2_combine_over_ca;
5680     imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = sse2_combine_over_reverse_ca;
5681     imp->combine_32_ca[PIXMAN_OP_IN] = sse2_combine_in_ca;
5682     imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = sse2_combine_in_reverse_ca;
5683     imp->combine_32_ca[PIXMAN_OP_OUT] = sse2_combine_out_ca;
5684     imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = sse2_combine_out_reverse_ca;
5685     imp->combine_32_ca[PIXMAN_OP_ATOP] = sse2_combine_atop_ca;
5686     imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = sse2_combine_atop_reverse_ca;
5687     imp->combine_32_ca[PIXMAN_OP_XOR] = sse2_combine_xor_ca;
5688     imp->combine_32_ca[PIXMAN_OP_ADD] = sse2_combine_add_ca;
5689
5690     imp->composite = sse2_composite;
5691     imp->blt = sse2_blt;
5692     imp->fill = sse2_fill;
5693
5694     return imp;
5695 }
5696
5697 #endif /* USE_SSE2 */