Add implementation of MMX __m64 functions for MSVC x64.
[profile/ivi/pixman.git] / pixman / pixman-sse2.c
1 /*
2  * Copyright © 2008 Rodrigo Kumpera
3  * Copyright © 2008 André Tupinambá
4  *
5  * Permission to use, copy, modify, distribute, and sell this software and its
6  * documentation for any purpose is hereby granted without fee, provided that
7  * the above copyright notice appear in all copies and that both that
8  * copyright notice and this permission notice appear in supporting
9  * documentation, and that the name of Red Hat not be used in advertising or
10  * publicity pertaining to distribution of the software without specific,
11  * written prior permission.  Red Hat makes no representations about the
12  * suitability of this software for any purpose.  It is provided "as is"
13  * without express or implied warranty.
14  *
15  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
16  * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
17  * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
18  * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
20  * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
21  * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
22  * SOFTWARE.
23  *
24  * Author:  Rodrigo Kumpera (kumpera@gmail.com)
25  *          André Tupinambá (andrelrt@gmail.com)
26  *
27  * Based on work by Owen Taylor and Søren Sandmann
28  */
29 #ifdef HAVE_CONFIG_H
30 #include <config.h>
31 #endif
32
33 #include <mmintrin.h>
34 #include <xmmintrin.h> /* for _mm_shuffle_pi16 and _MM_SHUFFLE */
35 #include <emmintrin.h> /* for SSE2 intrinsics */
36 #include "pixman-private.h"
37 #include "pixman-combine32.h"
38
39 #if defined(_MSC_VER) && defined(_M_AMD64)
40 /* Windows 64 doesn't allow MMX to be used, so
41  * the pixman-x64-mmx-emulation.h file contains
42  * implementations of those MMX intrinsics that
43  * are used in the SSE2 implementation.
44  */
45 #   include "pixman-x64-mmx-emulation.h"
46 #endif
47
48 #ifdef USE_SSE2
49
50 /* --------------------------------------------------------------------
51  * Locals
52  */
53
54 static __m64 mask_x0080;
55 static __m64 mask_x00ff;
56 static __m64 mask_x0101;
57 static __m64 mask_x_alpha;
58
59 static __m64 mask_x565_rgb;
60 static __m64 mask_x565_unpack;
61
62 static __m128i mask_0080;
63 static __m128i mask_00ff;
64 static __m128i mask_0101;
65 static __m128i mask_ffff;
66 static __m128i mask_ff000000;
67 static __m128i mask_alpha;
68
69 static __m128i mask_565_r;
70 static __m128i mask_565_g1, mask_565_g2;
71 static __m128i mask_565_b;
72 static __m128i mask_red;
73 static __m128i mask_green;
74 static __m128i mask_blue;
75
76 static __m128i mask_565_fix_rb;
77 static __m128i mask_565_fix_g;
78
79 /* ----------------------------------------------------------------------
80  * SSE2 Inlines
81  */
82 static force_inline __m128i
83 unpack_32_1x128 (uint32_t data)
84 {
85     return _mm_unpacklo_epi8 (_mm_cvtsi32_si128 (data), _mm_setzero_si128 ());
86 }
87
88 static force_inline void
89 unpack_128_2x128 (__m128i data, __m128i* data_lo, __m128i* data_hi)
90 {
91     *data_lo = _mm_unpacklo_epi8 (data, _mm_setzero_si128 ());
92     *data_hi = _mm_unpackhi_epi8 (data, _mm_setzero_si128 ());
93 }
94
95 static force_inline __m128i
96 unpack_565_to_8888 (__m128i lo)
97 {
98     __m128i r, g, b, rb, t;
99
100     r = _mm_and_si128 (_mm_slli_epi32 (lo, 8), mask_red);
101     g = _mm_and_si128 (_mm_slli_epi32 (lo, 5), mask_green);
102     b = _mm_and_si128 (_mm_slli_epi32 (lo, 3), mask_blue);
103
104     rb = _mm_or_si128 (r, b);
105     t  = _mm_and_si128 (rb, mask_565_fix_rb);
106     t  = _mm_srli_epi32 (t, 5);
107     rb = _mm_or_si128 (rb, t);
108
109     t  = _mm_and_si128 (g, mask_565_fix_g);
110     t  = _mm_srli_epi32 (t, 6);
111     g  = _mm_or_si128 (g, t);
112
113     return _mm_or_si128 (rb, g);
114 }
115
116 static force_inline void
117 unpack_565_128_4x128 (__m128i  data,
118                       __m128i* data0,
119                       __m128i* data1,
120                       __m128i* data2,
121                       __m128i* data3)
122 {
123     __m128i lo, hi;
124
125     lo = _mm_unpacklo_epi16 (data, _mm_setzero_si128 ());
126     hi = _mm_unpackhi_epi16 (data, _mm_setzero_si128 ());
127
128     lo = unpack_565_to_8888 (lo);
129     hi = unpack_565_to_8888 (hi);
130
131     unpack_128_2x128 (lo, data0, data1);
132     unpack_128_2x128 (hi, data2, data3);
133 }
134
135 static force_inline uint16_t
136 pack_565_32_16 (uint32_t pixel)
137 {
138     return (uint16_t) (((pixel >> 8) & 0xf800) |
139                        ((pixel >> 5) & 0x07e0) |
140                        ((pixel >> 3) & 0x001f));
141 }
142
143 static force_inline __m128i
144 pack_2x128_128 (__m128i lo, __m128i hi)
145 {
146     return _mm_packus_epi16 (lo, hi);
147 }
148
149 static force_inline __m128i
150 pack_565_2x128_128 (__m128i lo, __m128i hi)
151 {
152     __m128i data;
153     __m128i r, g1, g2, b;
154
155     data = pack_2x128_128 (lo, hi);
156
157     r  = _mm_and_si128 (data, mask_565_r);
158     g1 = _mm_and_si128 (_mm_slli_epi32 (data, 3), mask_565_g1);
159     g2 = _mm_and_si128 (_mm_srli_epi32 (data, 5), mask_565_g2);
160     b  = _mm_and_si128 (_mm_srli_epi32 (data, 3), mask_565_b);
161
162     return _mm_or_si128 (_mm_or_si128 (_mm_or_si128 (r, g1), g2), b);
163 }
164
165 static force_inline __m128i
166 pack_565_4x128_128 (__m128i* xmm0, __m128i* xmm1, __m128i* xmm2, __m128i* xmm3)
167 {
168     return _mm_packus_epi16 (pack_565_2x128_128 (*xmm0, *xmm1),
169                              pack_565_2x128_128 (*xmm2, *xmm3));
170 }
171
172 static force_inline int
173 is_opaque (__m128i x)
174 {
175     __m128i ffs = _mm_cmpeq_epi8 (x, x);
176
177     return (_mm_movemask_epi8 (_mm_cmpeq_epi8 (x, ffs)) & 0x8888) == 0x8888;
178 }
179
180 static force_inline int
181 is_zero (__m128i x)
182 {
183     return _mm_movemask_epi8 (
184         _mm_cmpeq_epi8 (x, _mm_setzero_si128 ())) == 0xffff;
185 }
186
187 static force_inline int
188 is_transparent (__m128i x)
189 {
190     return (_mm_movemask_epi8 (
191                 _mm_cmpeq_epi8 (x, _mm_setzero_si128 ())) & 0x8888) == 0x8888;
192 }
193
194 static force_inline __m128i
195 expand_pixel_32_1x128 (uint32_t data)
196 {
197     return _mm_shuffle_epi32 (unpack_32_1x128 (data), _MM_SHUFFLE (1, 0, 1, 0));
198 }
199
200 static force_inline __m128i
201 expand_alpha_1x128 (__m128i data)
202 {
203     return _mm_shufflehi_epi16 (_mm_shufflelo_epi16 (data,
204                                                      _MM_SHUFFLE (3, 3, 3, 3)),
205                                 _MM_SHUFFLE (3, 3, 3, 3));
206 }
207
208 static force_inline void
209 expand_alpha_2x128 (__m128i  data_lo,
210                     __m128i  data_hi,
211                     __m128i* alpha_lo,
212                     __m128i* alpha_hi)
213 {
214     __m128i lo, hi;
215
216     lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (3, 3, 3, 3));
217     hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (3, 3, 3, 3));
218
219     *alpha_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (3, 3, 3, 3));
220     *alpha_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (3, 3, 3, 3));
221 }
222
223 static force_inline void
224 expand_alpha_rev_2x128 (__m128i  data_lo,
225                         __m128i  data_hi,
226                         __m128i* alpha_lo,
227                         __m128i* alpha_hi)
228 {
229     __m128i lo, hi;
230
231     lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (0, 0, 0, 0));
232     hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (0, 0, 0, 0));
233     *alpha_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (0, 0, 0, 0));
234     *alpha_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (0, 0, 0, 0));
235 }
236
237 static force_inline void
238 pix_multiply_2x128 (__m128i* data_lo,
239                     __m128i* data_hi,
240                     __m128i* alpha_lo,
241                     __m128i* alpha_hi,
242                     __m128i* ret_lo,
243                     __m128i* ret_hi)
244 {
245     __m128i lo, hi;
246
247     lo = _mm_mullo_epi16 (*data_lo, *alpha_lo);
248     hi = _mm_mullo_epi16 (*data_hi, *alpha_hi);
249     lo = _mm_adds_epu16 (lo, mask_0080);
250     hi = _mm_adds_epu16 (hi, mask_0080);
251     *ret_lo = _mm_mulhi_epu16 (lo, mask_0101);
252     *ret_hi = _mm_mulhi_epu16 (hi, mask_0101);
253 }
254
255 static force_inline void
256 pix_add_multiply_2x128 (__m128i* src_lo,
257                         __m128i* src_hi,
258                         __m128i* alpha_dst_lo,
259                         __m128i* alpha_dst_hi,
260                         __m128i* dst_lo,
261                         __m128i* dst_hi,
262                         __m128i* alpha_src_lo,
263                         __m128i* alpha_src_hi,
264                         __m128i* ret_lo,
265                         __m128i* ret_hi)
266 {
267     __m128i lo, hi;
268     __m128i mul_lo, mul_hi;
269
270     lo = _mm_mullo_epi16 (*src_lo, *alpha_dst_lo);
271     hi = _mm_mullo_epi16 (*src_hi, *alpha_dst_hi);
272     mul_lo = _mm_mullo_epi16 (*dst_lo, *alpha_src_lo);
273     mul_hi = _mm_mullo_epi16 (*dst_hi, *alpha_src_hi);
274     lo = _mm_adds_epu16 (lo, mask_0080);
275     hi = _mm_adds_epu16 (hi, mask_0080);
276     lo = _mm_adds_epu16 (lo, mul_lo);
277     hi = _mm_adds_epu16 (hi, mul_hi);
278     *ret_lo = _mm_mulhi_epu16 (lo, mask_0101);
279     *ret_hi = _mm_mulhi_epu16 (hi, mask_0101);
280 }
281
282 static force_inline void
283 negate_2x128 (__m128i  data_lo,
284               __m128i  data_hi,
285               __m128i* neg_lo,
286               __m128i* neg_hi)
287 {
288     *neg_lo = _mm_xor_si128 (data_lo, mask_00ff);
289     *neg_hi = _mm_xor_si128 (data_hi, mask_00ff);
290 }
291
292 static force_inline void
293 invert_colors_2x128 (__m128i  data_lo,
294                      __m128i  data_hi,
295                      __m128i* inv_lo,
296                      __m128i* inv_hi)
297 {
298     __m128i lo, hi;
299
300     lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE (3, 0, 1, 2));
301     hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE (3, 0, 1, 2));
302     *inv_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE (3, 0, 1, 2));
303     *inv_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE (3, 0, 1, 2));
304 }
305
306 static force_inline void
307 over_2x128 (__m128i* src_lo,
308             __m128i* src_hi,
309             __m128i* alpha_lo,
310             __m128i* alpha_hi,
311             __m128i* dst_lo,
312             __m128i* dst_hi)
313 {
314     __m128i t1, t2;
315
316     negate_2x128 (*alpha_lo, *alpha_hi, &t1, &t2);
317
318     pix_multiply_2x128 (dst_lo, dst_hi, &t1, &t2, dst_lo, dst_hi);
319
320     *dst_lo = _mm_adds_epu8 (*src_lo, *dst_lo);
321     *dst_hi = _mm_adds_epu8 (*src_hi, *dst_hi);
322 }
323
324 static force_inline void
325 over_rev_non_pre_2x128 (__m128i  src_lo,
326                         __m128i  src_hi,
327                         __m128i* dst_lo,
328                         __m128i* dst_hi)
329 {
330     __m128i lo, hi;
331     __m128i alpha_lo, alpha_hi;
332
333     expand_alpha_2x128 (src_lo, src_hi, &alpha_lo, &alpha_hi);
334
335     lo = _mm_or_si128 (alpha_lo, mask_alpha);
336     hi = _mm_or_si128 (alpha_hi, mask_alpha);
337
338     invert_colors_2x128 (src_lo, src_hi, &src_lo, &src_hi);
339
340     pix_multiply_2x128 (&src_lo, &src_hi, &lo, &hi, &lo, &hi);
341
342     over_2x128 (&lo, &hi, &alpha_lo, &alpha_hi, dst_lo, dst_hi);
343 }
344
345 static force_inline void
346 in_over_2x128 (__m128i* src_lo,
347                __m128i* src_hi,
348                __m128i* alpha_lo,
349                __m128i* alpha_hi,
350                __m128i* mask_lo,
351                __m128i* mask_hi,
352                __m128i* dst_lo,
353                __m128i* dst_hi)
354 {
355     __m128i s_lo, s_hi;
356     __m128i a_lo, a_hi;
357
358     pix_multiply_2x128 (src_lo,   src_hi, mask_lo, mask_hi, &s_lo, &s_hi);
359     pix_multiply_2x128 (alpha_lo, alpha_hi, mask_lo, mask_hi, &a_lo, &a_hi);
360
361     over_2x128 (&s_lo, &s_hi, &a_lo, &a_hi, dst_lo, dst_hi);
362 }
363
364 static force_inline void
365 cache_prefetch (__m128i* addr)
366 {
367     _mm_prefetch (addr, _MM_HINT_T0);
368 }
369
370 static force_inline void
371 cache_prefetch_next (__m128i* addr)
372 {
373     _mm_prefetch (addr + 4, _MM_HINT_T0); /* 64 bytes ahead */
374 }
375
376 /* load 4 pixels from a 16-byte boundary aligned address */
377 static force_inline __m128i
378 load_128_aligned (__m128i* src)
379 {
380     return _mm_load_si128 (src);
381 }
382
383 /* load 4 pixels from a unaligned address */
384 static force_inline __m128i
385 load_128_unaligned (const __m128i* src)
386 {
387     return _mm_loadu_si128 (src);
388 }
389
390 /* save 4 pixels using Write Combining memory on a 16-byte
391  * boundary aligned address
392  */
393 static force_inline void
394 save_128_write_combining (__m128i* dst,
395                           __m128i  data)
396 {
397     _mm_stream_si128 (dst, data);
398 }
399
400 /* save 4 pixels on a 16-byte boundary aligned address */
401 static force_inline void
402 save_128_aligned (__m128i* dst,
403                   __m128i  data)
404 {
405     _mm_store_si128 (dst, data);
406 }
407
408 /* save 4 pixels on a unaligned address */
409 static force_inline void
410 save_128_unaligned (__m128i* dst,
411                     __m128i  data)
412 {
413     _mm_storeu_si128 (dst, data);
414 }
415
416 /* ------------------------------------------------------------------
417  * MMX inlines
418  */
419
420 static force_inline __m64
421 unpack_32_1x64 (uint32_t data)
422 {
423     return _mm_unpacklo_pi8 (_mm_cvtsi32_si64 (data), _mm_setzero_si64 ());
424 }
425
426 static force_inline __m64
427 expand_alpha_1x64 (__m64 data)
428 {
429     return _mm_shuffle_pi16 (data, _MM_SHUFFLE (3, 3, 3, 3));
430 }
431
432 static force_inline __m64
433 expand_alpha_rev_1x64 (__m64 data)
434 {
435     return _mm_shuffle_pi16 (data, _MM_SHUFFLE (0, 0, 0, 0));
436 }
437
438 static force_inline __m64
439 expand_pixel_8_1x64 (uint8_t data)
440 {
441     return _mm_shuffle_pi16 (
442         unpack_32_1x64 ((uint32_t)data), _MM_SHUFFLE (0, 0, 0, 0));
443 }
444
445 static force_inline __m64
446 pix_multiply_1x64 (__m64 data,
447                    __m64 alpha)
448 {
449     return _mm_mulhi_pu16 (_mm_adds_pu16 (_mm_mullo_pi16 (data, alpha),
450                                           mask_x0080),
451                            mask_x0101);
452 }
453
454 static force_inline __m64
455 pix_add_multiply_1x64 (__m64* src,
456                        __m64* alpha_dst,
457                        __m64* dst,
458                        __m64* alpha_src)
459 {
460     return _mm_mulhi_pu16 (
461         _mm_adds_pu16 (_mm_adds_pu16 (_mm_mullo_pi16 (*src, *alpha_dst),
462                                       mask_x0080),
463                        _mm_mullo_pi16 (*dst, *alpha_src)),
464         mask_x0101);
465 }
466
467 static force_inline __m64
468 negate_1x64 (__m64 data)
469 {
470     return _mm_xor_si64 (data, mask_x00ff);
471 }
472
473 static force_inline __m64
474 invert_colors_1x64 (__m64 data)
475 {
476     return _mm_shuffle_pi16 (data, _MM_SHUFFLE (3, 0, 1, 2));
477 }
478
479 static force_inline __m64
480 over_1x64 (__m64 src, __m64 alpha, __m64 dst)
481 {
482     return _mm_adds_pu8 (src, pix_multiply_1x64 (dst, negate_1x64 (alpha)));
483 }
484
485 static force_inline __m64
486 in_over_1x64 (__m64* src, __m64* alpha, __m64* mask, __m64* dst)
487 {
488     return over_1x64 (pix_multiply_1x64 (*src, *mask),
489                       pix_multiply_1x64 (*alpha, *mask),
490                       *dst);
491 }
492
493 static force_inline __m64
494 over_rev_non_pre_1x64 (__m64 src, __m64 dst)
495 {
496     __m64 alpha = expand_alpha_1x64 (src);
497
498     return over_1x64 (pix_multiply_1x64 (invert_colors_1x64 (src),
499                                          _mm_or_si64 (alpha, mask_x_alpha)),
500                       alpha,
501                       dst);
502 }
503
504 static force_inline uint32_t
505 pack_1x64_32 (__m64 data)
506 {
507     return _mm_cvtsi64_si32 (_mm_packs_pu16 (data, _mm_setzero_si64 ()));
508 }
509
510 /* Expand 16 bits positioned at @pos (0-3) of a mmx register into
511  *
512  *    00RR00GG00BB
513  *
514  * --- Expanding 565 in the low word ---
515  *
516  * m = (m << (32 - 3)) | (m << (16 - 5)) | m;
517  * m = m & (01f0003f001f);
518  * m = m * (008404100840);
519  * m = m >> 8;
520  *
521  * Note the trick here - the top word is shifted by another nibble to
522  * avoid it bumping into the middle word
523  */
524 static force_inline __m64
525 expand565_16_1x64 (uint16_t pixel)
526 {
527     __m64 p;
528     __m64 t1, t2;
529
530     p = _mm_cvtsi32_si64 ((uint32_t) pixel);
531
532     t1 = _mm_slli_si64 (p, 36 - 11);
533     t2 = _mm_slli_si64 (p, 16 - 5);
534
535     p = _mm_or_si64 (t1, p);
536     p = _mm_or_si64 (t2, p);
537     p = _mm_and_si64 (p, mask_x565_rgb);
538     p = _mm_mullo_pi16 (p, mask_x565_unpack);
539
540     return _mm_srli_pi16 (p, 8);
541 }
542
543 /* ----------------------------------------------------------------------------
544  * Compose Core transformations
545  */
546 static force_inline uint32_t
547 core_combine_over_u_pixel_sse2 (uint32_t src, uint32_t dst)
548 {
549     uint8_t a;
550     __m64 ms;
551
552     a = src >> 24;
553
554     if (a == 0xff)
555     {
556         return src;
557     }
558     else if (src)
559     {
560         ms = unpack_32_1x64 (src);
561         return pack_1x64_32 (
562             over_1x64 (ms, expand_alpha_1x64 (ms), unpack_32_1x64 (dst)));
563     }
564
565     return dst;
566 }
567
568 static force_inline uint32_t
569 combine1 (const uint32_t *ps, const uint32_t *pm)
570 {
571     uint32_t s = *ps;
572
573     if (pm)
574     {
575         __m64 ms, mm;
576
577         mm = unpack_32_1x64 (*pm);
578         mm = expand_alpha_1x64 (mm);
579
580         ms = unpack_32_1x64 (s);
581         ms = pix_multiply_1x64 (ms, mm);
582
583         s = pack_1x64_32 (ms);
584     }
585
586     return s;
587 }
588
589 static force_inline __m128i
590 combine4 (const __m128i *ps, const __m128i *pm)
591 {
592     __m128i xmm_src_lo, xmm_src_hi;
593     __m128i xmm_msk_lo, xmm_msk_hi;
594     __m128i s;
595
596     if (pm)
597     {
598         xmm_msk_lo = load_128_unaligned (pm);
599
600         if (is_transparent (xmm_msk_lo))
601             return _mm_setzero_si128 ();
602     }
603
604     s = load_128_unaligned (ps);
605
606     if (pm)
607     {
608         unpack_128_2x128 (s, &xmm_src_lo, &xmm_src_hi);
609         unpack_128_2x128 (xmm_msk_lo, &xmm_msk_lo, &xmm_msk_hi);
610
611         expand_alpha_2x128 (xmm_msk_lo, xmm_msk_hi, &xmm_msk_lo, &xmm_msk_hi);
612
613         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
614                             &xmm_msk_lo, &xmm_msk_hi,
615                             &xmm_src_lo, &xmm_src_hi);
616
617         s = pack_2x128_128 (xmm_src_lo, xmm_src_hi);
618     }
619
620     return s;
621 }
622
623 static force_inline void
624 core_combine_over_u_sse2 (uint32_t*       pd,
625                           const uint32_t* ps,
626                           const uint32_t* pm,
627                           int             w)
628 {
629     uint32_t s, d;
630
631     __m128i xmm_dst_lo, xmm_dst_hi;
632     __m128i xmm_src_lo, xmm_src_hi;
633     __m128i xmm_alpha_lo, xmm_alpha_hi;
634
635     /* call prefetch hint to optimize cache load*/
636     cache_prefetch ((__m128i*)ps);
637     cache_prefetch ((__m128i*)pd);
638     cache_prefetch ((__m128i*)pm);
639
640     /* Align dst on a 16-byte boundary */
641     while (w && ((unsigned long)pd & 15))
642     {
643         d = *pd;
644         s = combine1 (ps, pm);
645
646         *pd++ = core_combine_over_u_pixel_sse2 (s, d);
647         ps++;
648         if (pm)
649             pm++;
650         w--;
651     }
652
653     /* call prefetch hint to optimize cache load*/
654     cache_prefetch ((__m128i*)ps);
655     cache_prefetch ((__m128i*)pd);
656     cache_prefetch ((__m128i*)pm);
657
658     while (w >= 4)
659     {
660         /* fill cache line with next memory */
661         cache_prefetch_next ((__m128i*)ps);
662         cache_prefetch_next ((__m128i*)pd);
663         cache_prefetch_next ((__m128i*)pm);
664
665         /* I'm loading unaligned because I'm not sure about
666          * the address alignment.
667          */
668         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
669
670         if (is_opaque (xmm_src_hi))
671         {
672             save_128_aligned ((__m128i*)pd, xmm_src_hi);
673         }
674         else if (!is_zero (xmm_src_hi))
675         {
676             xmm_dst_hi = load_128_aligned ((__m128i*) pd);
677
678             unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
679             unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
680
681             expand_alpha_2x128 (
682                 xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
683
684             over_2x128 (&xmm_src_lo, &xmm_src_hi,
685                         &xmm_alpha_lo, &xmm_alpha_hi,
686                         &xmm_dst_lo, &xmm_dst_hi);
687
688             /* rebuid the 4 pixel data and save*/
689             save_128_aligned ((__m128i*)pd,
690                               pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
691         }
692
693         w -= 4;
694         ps += 4;
695         pd += 4;
696         if (pm)
697             pm += 4;
698     }
699
700     while (w)
701     {
702         d = *pd;
703         s = combine1 (ps, pm);
704
705         *pd++ = core_combine_over_u_pixel_sse2 (s, d);
706         ps++;
707         if (pm)
708             pm++;
709
710         w--;
711     }
712 }
713
714 static force_inline void
715 core_combine_over_reverse_u_sse2 (uint32_t*       pd,
716                                   const uint32_t* ps,
717                                   const uint32_t* pm,
718                                   int             w)
719 {
720     uint32_t s, d;
721
722     __m128i xmm_dst_lo, xmm_dst_hi;
723     __m128i xmm_src_lo, xmm_src_hi;
724     __m128i xmm_alpha_lo, xmm_alpha_hi;
725
726     /* call prefetch hint to optimize cache load*/
727     cache_prefetch ((__m128i*)ps);
728     cache_prefetch ((__m128i*)pd);
729     cache_prefetch ((__m128i*)pm);
730
731     /* Align dst on a 16-byte boundary */
732     while (w &&
733            ((unsigned long)pd & 15))
734     {
735         d = *pd;
736         s = combine1 (ps, pm);
737
738         *pd++ = core_combine_over_u_pixel_sse2 (d, s);
739         w--;
740         ps++;
741         if (pm)
742             pm++;
743     }
744
745     /* call prefetch hint to optimize cache load*/
746     cache_prefetch ((__m128i*)ps);
747     cache_prefetch ((__m128i*)pd);
748     cache_prefetch ((__m128i*)pm);
749
750     while (w >= 4)
751     {
752         /* fill cache line with next memory */
753         cache_prefetch_next ((__m128i*)ps);
754         cache_prefetch_next ((__m128i*)pd);
755         cache_prefetch_next ((__m128i*)pm);
756
757         /* I'm loading unaligned because I'm not sure
758          * about the address alignment.
759          */
760         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
761         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
762
763         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
764         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
765
766         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
767                             &xmm_alpha_lo, &xmm_alpha_hi);
768
769         over_2x128 (&xmm_dst_lo, &xmm_dst_hi,
770                     &xmm_alpha_lo, &xmm_alpha_hi,
771                     &xmm_src_lo, &xmm_src_hi);
772
773         /* rebuid the 4 pixel data and save*/
774         save_128_aligned ((__m128i*)pd,
775                           pack_2x128_128 (xmm_src_lo, xmm_src_hi));
776
777         w -= 4;
778         ps += 4;
779         pd += 4;
780
781         if (pm)
782             pm += 4;
783     }
784
785     while (w)
786     {
787         d = *pd;
788         s = combine1 (ps, pm);
789
790         *pd++ = core_combine_over_u_pixel_sse2 (d, s);
791         ps++;
792         w--;
793         if (pm)
794             pm++;
795     }
796 }
797
798 static force_inline uint32_t
799 core_combine_in_u_pixelsse2 (uint32_t src, uint32_t dst)
800 {
801     uint32_t maska = src >> 24;
802
803     if (maska == 0)
804     {
805         return 0;
806     }
807     else if (maska != 0xff)
808     {
809         return pack_1x64_32 (
810             pix_multiply_1x64 (unpack_32_1x64 (dst),
811                                expand_alpha_1x64 (unpack_32_1x64 (src))));
812     }
813
814     return dst;
815 }
816
817 static force_inline void
818 core_combine_in_u_sse2 (uint32_t*       pd,
819                         const uint32_t* ps,
820                         const uint32_t* pm,
821                         int             w)
822 {
823     uint32_t s, d;
824
825     __m128i xmm_src_lo, xmm_src_hi;
826     __m128i xmm_dst_lo, xmm_dst_hi;
827
828     /* call prefetch hint to optimize cache load*/
829     cache_prefetch ((__m128i*)ps);
830     cache_prefetch ((__m128i*)pd);
831     cache_prefetch ((__m128i*)pm);
832
833     while (w && ((unsigned long) pd & 15))
834     {
835         s = combine1 (ps, pm);
836         d = *pd;
837
838         *pd++ = core_combine_in_u_pixelsse2 (d, s);
839         w--;
840         ps++;
841         if (pm)
842             pm++;
843     }
844
845     /* call prefetch hint to optimize cache load*/
846     cache_prefetch ((__m128i*)ps);
847     cache_prefetch ((__m128i*)pd);
848     cache_prefetch ((__m128i*)pm);
849
850     while (w >= 4)
851     {
852         /* fill cache line with next memory */
853         cache_prefetch_next ((__m128i*)ps);
854         cache_prefetch_next ((__m128i*)pd);
855         cache_prefetch_next ((__m128i*)pm);
856
857         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
858         xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*) pm);
859
860         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
861         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
862
863         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
864         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
865                             &xmm_dst_lo, &xmm_dst_hi,
866                             &xmm_dst_lo, &xmm_dst_hi);
867
868         save_128_aligned ((__m128i*)pd,
869                           pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
870
871         ps += 4;
872         pd += 4;
873         w -= 4;
874         if (pm)
875             pm += 4;
876     }
877
878     while (w)
879     {
880         s = combine1 (ps, pm);
881         d = *pd;
882
883         *pd++ = core_combine_in_u_pixelsse2 (d, s);
884         w--;
885         ps++;
886         if (pm)
887             pm++;
888     }
889 }
890
891 static force_inline void
892 core_combine_reverse_in_u_sse2 (uint32_t*       pd,
893                                 const uint32_t* ps,
894                                 const uint32_t *pm,
895                                 int             w)
896 {
897     uint32_t s, d;
898
899     __m128i xmm_src_lo, xmm_src_hi;
900     __m128i xmm_dst_lo, xmm_dst_hi;
901
902     /* call prefetch hint to optimize cache load*/
903     cache_prefetch ((__m128i*)ps);
904     cache_prefetch ((__m128i*)pd);
905     cache_prefetch ((__m128i*)pm);
906
907     while (w && ((unsigned long) pd & 15))
908     {
909         s = combine1 (ps, pm);
910         d = *pd;
911
912         *pd++ = core_combine_in_u_pixelsse2 (s, d);
913         ps++;
914         w--;
915         if (pm)
916             pm++;
917     }
918
919     /* call prefetch hint to optimize cache load*/
920     cache_prefetch ((__m128i*)ps);
921     cache_prefetch ((__m128i*)pd);
922     cache_prefetch ((__m128i*)pm);
923
924     while (w >= 4)
925     {
926         /* fill cache line with next memory */
927         cache_prefetch_next ((__m128i*)ps);
928         cache_prefetch_next ((__m128i*)pd);
929         cache_prefetch_next ((__m128i*)pm);
930
931         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
932         xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm);
933
934         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
935         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
936
937         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
938         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
939                             &xmm_src_lo, &xmm_src_hi,
940                             &xmm_dst_lo, &xmm_dst_hi);
941
942         save_128_aligned (
943             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
944
945         ps += 4;
946         pd += 4;
947         w -= 4;
948         if (pm)
949             pm += 4;
950     }
951
952     while (w)
953     {
954         s = combine1 (ps, pm);
955         d = *pd;
956
957         *pd++ = core_combine_in_u_pixelsse2 (s, d);
958         w--;
959         ps++;
960         if (pm)
961             pm++;
962     }
963 }
964
965 static force_inline void
966 core_combine_reverse_out_u_sse2 (uint32_t*       pd,
967                                  const uint32_t* ps,
968                                  const uint32_t* pm,
969                                  int             w)
970 {
971     /* call prefetch hint to optimize cache load*/
972     cache_prefetch ((__m128i*)ps);
973     cache_prefetch ((__m128i*)pd);
974     cache_prefetch ((__m128i*)pm);
975
976     while (w && ((unsigned long) pd & 15))
977     {
978         uint32_t s = combine1 (ps, pm);
979         uint32_t d = *pd;
980
981         *pd++ = pack_1x64_32 (
982             pix_multiply_1x64 (
983                 unpack_32_1x64 (d), negate_1x64 (
984                     expand_alpha_1x64 (unpack_32_1x64 (s)))));
985         
986         if (pm)
987             pm++;
988         ps++;
989         w--;
990     }
991
992     /* call prefetch hint to optimize cache load*/
993     cache_prefetch ((__m128i*)ps);
994     cache_prefetch ((__m128i*)pd);
995     cache_prefetch ((__m128i*)pm);
996
997     while (w >= 4)
998     {
999         __m128i xmm_src_lo, xmm_src_hi;
1000         __m128i xmm_dst_lo, xmm_dst_hi;
1001
1002         /* fill cache line with next memory */
1003         cache_prefetch_next ((__m128i*)ps);
1004         cache_prefetch_next ((__m128i*)pd);
1005         cache_prefetch_next ((__m128i*)pm);
1006
1007         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
1008         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
1009
1010         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1011         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1012
1013         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1014         negate_2x128       (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1015
1016         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
1017                             &xmm_src_lo, &xmm_src_hi,
1018                             &xmm_dst_lo, &xmm_dst_hi);
1019
1020         save_128_aligned (
1021             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1022
1023         ps += 4;
1024         pd += 4;
1025         if (pm)
1026             pm += 4;
1027
1028         w -= 4;
1029     }
1030
1031     while (w)
1032     {
1033         uint32_t s = combine1 (ps, pm);
1034         uint32_t d = *pd;
1035
1036         *pd++ = pack_1x64_32 (
1037             pix_multiply_1x64 (
1038                 unpack_32_1x64 (d), negate_1x64 (
1039                     expand_alpha_1x64 (unpack_32_1x64 (s)))));
1040         ps++;
1041         if (pm)
1042             pm++;
1043         w--;
1044     }
1045 }
1046
1047 static force_inline void
1048 core_combine_out_u_sse2 (uint32_t*       pd,
1049                          const uint32_t* ps,
1050                          const uint32_t* pm,
1051                          int             w)
1052 {
1053     /* call prefetch hint to optimize cache load*/
1054     cache_prefetch ((__m128i*)ps);
1055     cache_prefetch ((__m128i*)pd);
1056     cache_prefetch ((__m128i*)pm);
1057
1058     while (w && ((unsigned long) pd & 15))
1059     {
1060         uint32_t s = combine1 (ps, pm);
1061         uint32_t d = *pd;
1062
1063         *pd++ = pack_1x64_32 (
1064             pix_multiply_1x64 (
1065                 unpack_32_1x64 (s), negate_1x64 (
1066                     expand_alpha_1x64 (unpack_32_1x64 (d)))));
1067         w--;
1068         ps++;
1069         if (pm)
1070             pm++;
1071     }
1072
1073     /* call prefetch hint to optimize cache load*/
1074     cache_prefetch ((__m128i*)ps);
1075     cache_prefetch ((__m128i*)pd);
1076     cache_prefetch ((__m128i*)pm);
1077
1078     while (w >= 4)
1079     {
1080         __m128i xmm_src_lo, xmm_src_hi;
1081         __m128i xmm_dst_lo, xmm_dst_hi;
1082
1083         /* fill cache line with next memory */
1084         cache_prefetch_next ((__m128i*)ps);
1085         cache_prefetch_next ((__m128i*)pd);
1086         cache_prefetch_next ((__m128i*)pm);
1087
1088         xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm);
1089         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
1090
1091         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1092         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1093
1094         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1095         negate_2x128       (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1096
1097         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
1098                             &xmm_dst_lo, &xmm_dst_hi,
1099                             &xmm_dst_lo, &xmm_dst_hi);
1100
1101         save_128_aligned (
1102             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1103
1104         ps += 4;
1105         pd += 4;
1106         w -= 4;
1107         if (pm)
1108             pm += 4;
1109     }
1110
1111     while (w)
1112     {
1113         uint32_t s = combine1 (ps, pm);
1114         uint32_t d = *pd;
1115
1116         *pd++ = pack_1x64_32 (
1117             pix_multiply_1x64 (
1118                 unpack_32_1x64 (s), negate_1x64 (
1119                     expand_alpha_1x64 (unpack_32_1x64 (d)))));
1120         w--;
1121         ps++;
1122         if (pm)
1123             pm++;
1124     }
1125 }
1126
1127 static force_inline uint32_t
1128 core_combine_atop_u_pixel_sse2 (uint32_t src,
1129                                 uint32_t dst)
1130 {
1131     __m64 s = unpack_32_1x64 (src);
1132     __m64 d = unpack_32_1x64 (dst);
1133
1134     __m64 sa = negate_1x64 (expand_alpha_1x64 (s));
1135     __m64 da = expand_alpha_1x64 (d);
1136
1137     return pack_1x64_32 (pix_add_multiply_1x64 (&s, &da, &d, &sa));
1138 }
1139
1140 static force_inline void
1141 core_combine_atop_u_sse2 (uint32_t*       pd,
1142                           const uint32_t* ps,
1143                           const uint32_t* pm,
1144                           int             w)
1145 {
1146     uint32_t s, d;
1147
1148     __m128i xmm_src_lo, xmm_src_hi;
1149     __m128i xmm_dst_lo, xmm_dst_hi;
1150     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
1151     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
1152
1153     /* call prefetch hint to optimize cache load*/
1154     cache_prefetch ((__m128i*)ps);
1155     cache_prefetch ((__m128i*)pd);
1156     cache_prefetch ((__m128i*)pm);
1157
1158     while (w && ((unsigned long) pd & 15))
1159     {
1160         s = combine1 (ps, pm);
1161         d = *pd;
1162
1163         *pd++ = core_combine_atop_u_pixel_sse2 (s, d);
1164         w--;
1165         ps++;
1166         if (pm)
1167             pm++;
1168     }
1169
1170     /* call prefetch hint to optimize cache load*/
1171     cache_prefetch ((__m128i*)ps);
1172     cache_prefetch ((__m128i*)pd);
1173     cache_prefetch ((__m128i*)pm);
1174
1175     while (w >= 4)
1176     {
1177         /* fill cache line with next memory */
1178         cache_prefetch_next ((__m128i*)ps);
1179         cache_prefetch_next ((__m128i*)pd);
1180         cache_prefetch_next ((__m128i*)pm);
1181
1182         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
1183         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
1184
1185         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1186         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1187
1188         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
1189                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1190         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
1191                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1192
1193         negate_2x128 (xmm_alpha_src_lo, xmm_alpha_src_hi,
1194                       &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1195
1196         pix_add_multiply_2x128 (
1197             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
1198             &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
1199             &xmm_dst_lo, &xmm_dst_hi);
1200
1201         save_128_aligned (
1202             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1203
1204         ps += 4;
1205         pd += 4;
1206         w -= 4;
1207         if (pm)
1208             pm += 4;
1209     }
1210
1211     while (w)
1212     {
1213         s = combine1 (ps, pm);
1214         d = *pd;
1215
1216         *pd++ = core_combine_atop_u_pixel_sse2 (s, d);
1217         w--;
1218         ps++;
1219         if (pm)
1220             pm++;
1221     }
1222 }
1223
1224 static force_inline uint32_t
1225 core_combine_reverse_atop_u_pixel_sse2 (uint32_t src,
1226                                         uint32_t dst)
1227 {
1228     __m64 s = unpack_32_1x64 (src);
1229     __m64 d = unpack_32_1x64 (dst);
1230
1231     __m64 sa = expand_alpha_1x64 (s);
1232     __m64 da = negate_1x64 (expand_alpha_1x64 (d));
1233
1234     return pack_1x64_32 (pix_add_multiply_1x64 (&s, &da, &d, &sa));
1235 }
1236
1237 static force_inline void
1238 core_combine_reverse_atop_u_sse2 (uint32_t*       pd,
1239                                   const uint32_t* ps,
1240                                   const uint32_t* pm,
1241                                   int             w)
1242 {
1243     uint32_t s, d;
1244
1245     __m128i xmm_src_lo, xmm_src_hi;
1246     __m128i xmm_dst_lo, xmm_dst_hi;
1247     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
1248     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
1249
1250     /* call prefetch hint to optimize cache load*/
1251     cache_prefetch ((__m128i*)ps);
1252     cache_prefetch ((__m128i*)pd);
1253     cache_prefetch ((__m128i*)pm);
1254
1255     while (w && ((unsigned long) pd & 15))
1256     {
1257         s = combine1 (ps, pm);
1258         d = *pd;
1259
1260         *pd++ = core_combine_reverse_atop_u_pixel_sse2 (s, d);
1261         ps++;
1262         w--;
1263         if (pm)
1264             pm++;
1265     }
1266
1267     /* call prefetch hint to optimize cache load*/
1268     cache_prefetch ((__m128i*)ps);
1269     cache_prefetch ((__m128i*)pd);
1270     cache_prefetch ((__m128i*)pm);
1271
1272     while (w >= 4)
1273     {
1274         /* fill cache line with next memory */
1275         cache_prefetch_next ((__m128i*)ps);
1276         cache_prefetch_next ((__m128i*)pd);
1277         cache_prefetch_next ((__m128i*)pm);
1278
1279         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
1280         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
1281
1282         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1283         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1284
1285         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
1286                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1287         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
1288                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1289
1290         negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi,
1291                       &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1292
1293         pix_add_multiply_2x128 (
1294             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
1295             &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
1296             &xmm_dst_lo, &xmm_dst_hi);
1297
1298         save_128_aligned (
1299             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1300
1301         ps += 4;
1302         pd += 4;
1303         w -= 4;
1304         if (pm)
1305             pm += 4;
1306     }
1307
1308     while (w)
1309     {
1310         s = combine1 (ps, pm);
1311         d = *pd;
1312
1313         *pd++ = core_combine_reverse_atop_u_pixel_sse2 (s, d);
1314         ps++;
1315         w--;
1316         if (pm)
1317             pm++;
1318     }
1319 }
1320
1321 static force_inline uint32_t
1322 core_combine_xor_u_pixel_sse2 (uint32_t src,
1323                                uint32_t dst)
1324 {
1325     __m64 s = unpack_32_1x64 (src);
1326     __m64 d = unpack_32_1x64 (dst);
1327
1328     __m64 neg_d = negate_1x64 (expand_alpha_1x64 (d));
1329     __m64 neg_s = negate_1x64 (expand_alpha_1x64 (s));
1330
1331     return pack_1x64_32 (pix_add_multiply_1x64 (&s, &neg_d, &d, &neg_s));
1332 }
1333
1334 static force_inline void
1335 core_combine_xor_u_sse2 (uint32_t*       dst,
1336                          const uint32_t* src,
1337                          const uint32_t *mask,
1338                          int             width)
1339 {
1340     int w = width;
1341     uint32_t s, d;
1342     uint32_t* pd = dst;
1343     const uint32_t* ps = src;
1344     const uint32_t* pm = mask;
1345
1346     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
1347     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
1348     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
1349     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
1350
1351     /* call prefetch hint to optimize cache load*/
1352     cache_prefetch ((__m128i*)ps);
1353     cache_prefetch ((__m128i*)pd);
1354     cache_prefetch ((__m128i*)pm);
1355
1356     while (w && ((unsigned long) pd & 15))
1357     {
1358         s = combine1 (ps, pm);
1359         d = *pd;
1360
1361         *pd++ = core_combine_xor_u_pixel_sse2 (s, d);
1362         w--;
1363         ps++;
1364         if (pm)
1365             pm++;
1366     }
1367
1368     /* call prefetch hint to optimize cache load*/
1369     cache_prefetch ((__m128i*)ps);
1370     cache_prefetch ((__m128i*)pd);
1371     cache_prefetch ((__m128i*)pm);
1372
1373     while (w >= 4)
1374     {
1375         /* fill cache line with next memory */
1376         cache_prefetch_next ((__m128i*)ps);
1377         cache_prefetch_next ((__m128i*)pd);
1378         cache_prefetch_next ((__m128i*)pm);
1379
1380         xmm_src = combine4 ((__m128i*) ps, (__m128i*) pm);
1381         xmm_dst = load_128_aligned ((__m128i*) pd);
1382
1383         unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
1384         unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
1385
1386         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
1387                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1388         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
1389                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1390
1391         negate_2x128 (xmm_alpha_src_lo, xmm_alpha_src_hi,
1392                       &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1393         negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi,
1394                       &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1395
1396         pix_add_multiply_2x128 (
1397             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
1398             &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
1399             &xmm_dst_lo, &xmm_dst_hi);
1400
1401         save_128_aligned (
1402             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1403
1404         ps += 4;
1405         pd += 4;
1406         w -= 4;
1407         if (pm)
1408             pm += 4;
1409     }
1410
1411     while (w)
1412     {
1413         s = combine1 (ps, pm);
1414         d = *pd;
1415
1416         *pd++ = core_combine_xor_u_pixel_sse2 (s, d);
1417         w--;
1418         ps++;
1419         if (pm)
1420             pm++;
1421     }
1422 }
1423
1424 static force_inline void
1425 core_combine_add_u_sse2 (uint32_t*       dst,
1426                          const uint32_t* src,
1427                          const uint32_t* mask,
1428                          int             width)
1429 {
1430     int w = width;
1431     uint32_t s, d;
1432     uint32_t* pd = dst;
1433     const uint32_t* ps = src;
1434     const uint32_t* pm = mask;
1435
1436     /* call prefetch hint to optimize cache load*/
1437     cache_prefetch ((__m128i*)ps);
1438     cache_prefetch ((__m128i*)pd);
1439     cache_prefetch ((__m128i*)pm);
1440
1441     while (w && (unsigned long)pd & 15)
1442     {
1443         s = combine1 (ps, pm);
1444         d = *pd;
1445
1446         ps++;
1447         if (pm)
1448             pm++;
1449         *pd++ = _mm_cvtsi64_si32 (
1450             _mm_adds_pu8 (_mm_cvtsi32_si64 (s), _mm_cvtsi32_si64 (d)));
1451         w--;
1452     }
1453
1454     /* call prefetch hint to optimize cache load*/
1455     cache_prefetch ((__m128i*)ps);
1456     cache_prefetch ((__m128i*)pd);
1457     cache_prefetch ((__m128i*)pm);
1458
1459     while (w >= 4)
1460     {
1461         __m128i s;
1462
1463         /* fill cache line with next memory */
1464         cache_prefetch_next ((__m128i*)ps);
1465         cache_prefetch_next ((__m128i*)pd);
1466         cache_prefetch_next ((__m128i*)pm);
1467
1468         s = combine4 ((__m128i*)ps, (__m128i*)pm);
1469
1470         save_128_aligned (
1471             (__m128i*)pd, _mm_adds_epu8 (s, load_128_aligned  ((__m128i*)pd)));
1472
1473         pd += 4;
1474         ps += 4;
1475         if (pm)
1476             pm += 4;
1477         w -= 4;
1478     }
1479
1480     while (w--)
1481     {
1482         s = combine1 (ps, pm);
1483         d = *pd;
1484
1485         ps++;
1486         *pd++ = _mm_cvtsi64_si32 (
1487             _mm_adds_pu8 (_mm_cvtsi32_si64 (s), _mm_cvtsi32_si64 (d)));
1488         if (pm)
1489             pm++;
1490     }
1491 }
1492
1493 static force_inline uint32_t
1494 core_combine_saturate_u_pixel_sse2 (uint32_t src,
1495                                     uint32_t dst)
1496 {
1497     __m64 ms = unpack_32_1x64 (src);
1498     __m64 md = unpack_32_1x64 (dst);
1499     uint32_t sa = src >> 24;
1500     uint32_t da = ~dst >> 24;
1501
1502     if (sa > da)
1503     {
1504         ms = pix_multiply_1x64 (
1505             ms, expand_alpha_1x64 (unpack_32_1x64 (DIV_UN8 (da, sa) << 24)));
1506     }
1507
1508     return pack_1x64_32 (_mm_adds_pu16 (md, ms));
1509 }
1510
1511 static force_inline void
1512 core_combine_saturate_u_sse2 (uint32_t *      pd,
1513                               const uint32_t *ps,
1514                               const uint32_t *pm,
1515                               int             w)
1516 {
1517     uint32_t s, d;
1518
1519     uint32_t pack_cmp;
1520     __m128i xmm_src, xmm_dst;
1521
1522     /* call prefetch hint to optimize cache load*/
1523     cache_prefetch ((__m128i*)ps);
1524     cache_prefetch ((__m128i*)pd);
1525     cache_prefetch ((__m128i*)pm);
1526
1527     while (w && (unsigned long)pd & 15)
1528     {
1529         s = combine1 (ps, pm);
1530         d = *pd;
1531
1532         *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1533         w--;
1534         ps++;
1535         if (pm)
1536             pm++;
1537     }
1538
1539     /* call prefetch hint to optimize cache load*/
1540     cache_prefetch ((__m128i*)ps);
1541     cache_prefetch ((__m128i*)pd);
1542     cache_prefetch ((__m128i*)pm);
1543
1544     while (w >= 4)
1545     {
1546         /* fill cache line with next memory */
1547         cache_prefetch_next ((__m128i*)ps);
1548         cache_prefetch_next ((__m128i*)pd);
1549         cache_prefetch_next ((__m128i*)pm);
1550
1551         xmm_dst = load_128_aligned  ((__m128i*)pd);
1552         xmm_src = combine4 ((__m128i*)ps, (__m128i*)pm);
1553
1554         pack_cmp = _mm_movemask_epi8 (
1555             _mm_cmpgt_epi32 (
1556                 _mm_srli_epi32 (xmm_src, 24),
1557                 _mm_srli_epi32 (_mm_xor_si128 (xmm_dst, mask_ff000000), 24)));
1558
1559         /* if some alpha src is grater than respective ~alpha dst */
1560         if (pack_cmp)
1561         {
1562             s = combine1 (ps++, pm);
1563             d = *pd;
1564             *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1565             if (pm)
1566                 pm++;
1567
1568             s = combine1 (ps++, pm);
1569             d = *pd;
1570             *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1571             if (pm)
1572                 pm++;
1573
1574             s = combine1 (ps++, pm);
1575             d = *pd;
1576             *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1577             if (pm)
1578                 pm++;
1579
1580             s = combine1 (ps++, pm);
1581             d = *pd;
1582             *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1583             if (pm)
1584                 pm++;
1585         }
1586         else
1587         {
1588             save_128_aligned ((__m128i*)pd, _mm_adds_epu8 (xmm_dst, xmm_src));
1589
1590             pd += 4;
1591             ps += 4;
1592             if (pm)
1593                 pm += 4;
1594         }
1595
1596         w -= 4;
1597     }
1598
1599     while (w--)
1600     {
1601         s = combine1 (ps, pm);
1602         d = *pd;
1603
1604         *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1605         ps++;
1606         if (pm)
1607             pm++;
1608     }
1609 }
1610
1611 static force_inline void
1612 core_combine_src_ca_sse2 (uint32_t*       pd,
1613                           const uint32_t* ps,
1614                           const uint32_t *pm,
1615                           int             w)
1616 {
1617     uint32_t s, m;
1618
1619     __m128i xmm_src_lo, xmm_src_hi;
1620     __m128i xmm_mask_lo, xmm_mask_hi;
1621     __m128i xmm_dst_lo, xmm_dst_hi;
1622
1623     /* call prefetch hint to optimize cache load*/
1624     cache_prefetch ((__m128i*)ps);
1625     cache_prefetch ((__m128i*)pd);
1626     cache_prefetch ((__m128i*)pm);
1627
1628     while (w && (unsigned long)pd & 15)
1629     {
1630         s = *ps++;
1631         m = *pm++;
1632         *pd++ = pack_1x64_32 (
1633             pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)));
1634         w--;
1635     }
1636
1637     /* call prefetch hint to optimize cache load*/
1638     cache_prefetch ((__m128i*)ps);
1639     cache_prefetch ((__m128i*)pd);
1640     cache_prefetch ((__m128i*)pm);
1641
1642     while (w >= 4)
1643     {
1644         /* fill cache line with next memory */
1645         cache_prefetch_next ((__m128i*)ps);
1646         cache_prefetch_next ((__m128i*)pd);
1647         cache_prefetch_next ((__m128i*)pm);
1648
1649         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1650         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1651
1652         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1653         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1654
1655         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
1656                             &xmm_mask_lo, &xmm_mask_hi,
1657                             &xmm_dst_lo, &xmm_dst_hi);
1658
1659         save_128_aligned (
1660             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1661
1662         ps += 4;
1663         pd += 4;
1664         pm += 4;
1665         w -= 4;
1666     }
1667
1668     while (w)
1669     {
1670         s = *ps++;
1671         m = *pm++;
1672         *pd++ = pack_1x64_32 (
1673             pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)));
1674         w--;
1675     }
1676 }
1677
1678 static force_inline uint32_t
1679 core_combine_over_ca_pixel_sse2 (uint32_t src,
1680                                  uint32_t mask,
1681                                  uint32_t dst)
1682 {
1683     __m64 s = unpack_32_1x64 (src);
1684     __m64 expAlpha = expand_alpha_1x64 (s);
1685     __m64 unpk_mask = unpack_32_1x64 (mask);
1686     __m64 unpk_dst  = unpack_32_1x64 (dst);
1687
1688     return pack_1x64_32 (in_over_1x64 (&s, &expAlpha, &unpk_mask, &unpk_dst));
1689 }
1690
1691 static force_inline void
1692 core_combine_over_ca_sse2 (uint32_t*       pd,
1693                            const uint32_t* ps,
1694                            const uint32_t *pm,
1695                            int             w)
1696 {
1697     uint32_t s, m, d;
1698
1699     __m128i xmm_alpha_lo, xmm_alpha_hi;
1700     __m128i xmm_src_lo, xmm_src_hi;
1701     __m128i xmm_dst_lo, xmm_dst_hi;
1702     __m128i xmm_mask_lo, xmm_mask_hi;
1703
1704     /* call prefetch hint to optimize cache load*/
1705     cache_prefetch ((__m128i*)ps);
1706     cache_prefetch ((__m128i*)pd);
1707     cache_prefetch ((__m128i*)pm);
1708
1709     while (w && (unsigned long)pd & 15)
1710     {
1711         s = *ps++;
1712         m = *pm++;
1713         d = *pd;
1714
1715         *pd++ = core_combine_over_ca_pixel_sse2 (s, m, d);
1716         w--;
1717     }
1718
1719     /* call prefetch hint to optimize cache load*/
1720     cache_prefetch ((__m128i*)ps);
1721     cache_prefetch ((__m128i*)pd);
1722     cache_prefetch ((__m128i*)pm);
1723
1724     while (w >= 4)
1725     {
1726         /* fill cache line with next memory */
1727         cache_prefetch_next ((__m128i*)ps);
1728         cache_prefetch_next ((__m128i*)pd);
1729         cache_prefetch_next ((__m128i*)pm);
1730
1731         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1732         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1733         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1734
1735         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1736         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1737         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1738
1739         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
1740                             &xmm_alpha_lo, &xmm_alpha_hi);
1741
1742         in_over_2x128 (&xmm_src_lo, &xmm_src_hi,
1743                        &xmm_alpha_lo, &xmm_alpha_hi,
1744                        &xmm_mask_lo, &xmm_mask_hi,
1745                        &xmm_dst_lo, &xmm_dst_hi);
1746
1747         save_128_aligned (
1748             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1749
1750         ps += 4;
1751         pd += 4;
1752         pm += 4;
1753         w -= 4;
1754     }
1755
1756     while (w)
1757     {
1758         s = *ps++;
1759         m = *pm++;
1760         d = *pd;
1761
1762         *pd++ = core_combine_over_ca_pixel_sse2 (s, m, d);
1763         w--;
1764     }
1765 }
1766
1767 static force_inline uint32_t
1768 core_combine_over_reverse_ca_pixel_sse2 (uint32_t src,
1769                                          uint32_t mask,
1770                                          uint32_t dst)
1771 {
1772     __m64 d = unpack_32_1x64 (dst);
1773
1774     return pack_1x64_32 (
1775         over_1x64 (d, expand_alpha_1x64 (d),
1776                    pix_multiply_1x64 (unpack_32_1x64 (src),
1777                                       unpack_32_1x64 (mask))));
1778 }
1779
1780 static force_inline void
1781 core_combine_over_reverse_ca_sse2 (uint32_t*       pd,
1782                                    const uint32_t* ps,
1783                                    const uint32_t *pm,
1784                                    int             w)
1785 {
1786     uint32_t s, m, d;
1787
1788     __m128i xmm_alpha_lo, xmm_alpha_hi;
1789     __m128i xmm_src_lo, xmm_src_hi;
1790     __m128i xmm_dst_lo, xmm_dst_hi;
1791     __m128i xmm_mask_lo, xmm_mask_hi;
1792
1793     /* call prefetch hint to optimize cache load*/
1794     cache_prefetch ((__m128i*)ps);
1795     cache_prefetch ((__m128i*)pd);
1796     cache_prefetch ((__m128i*)pm);
1797
1798     while (w && (unsigned long)pd & 15)
1799     {
1800         s = *ps++;
1801         m = *pm++;
1802         d = *pd;
1803
1804         *pd++ = core_combine_over_reverse_ca_pixel_sse2 (s, m, d);
1805         w--;
1806     }
1807
1808     /* call prefetch hint to optimize cache load*/
1809     cache_prefetch ((__m128i*)ps);
1810     cache_prefetch ((__m128i*)pd);
1811     cache_prefetch ((__m128i*)pm);
1812
1813     while (w >= 4)
1814     {
1815         /* fill cache line with next memory */
1816         cache_prefetch_next ((__m128i*)ps);
1817         cache_prefetch_next ((__m128i*)pd);
1818         cache_prefetch_next ((__m128i*)pm);
1819
1820         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1821         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1822         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1823
1824         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1825         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1826         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1827
1828         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
1829                             &xmm_alpha_lo, &xmm_alpha_hi);
1830         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
1831                             &xmm_mask_lo, &xmm_mask_hi,
1832                             &xmm_mask_lo, &xmm_mask_hi);
1833
1834         over_2x128 (&xmm_dst_lo, &xmm_dst_hi,
1835                     &xmm_alpha_lo, &xmm_alpha_hi,
1836                     &xmm_mask_lo, &xmm_mask_hi);
1837
1838         save_128_aligned (
1839             (__m128i*)pd, pack_2x128_128 (xmm_mask_lo, xmm_mask_hi));
1840
1841         ps += 4;
1842         pd += 4;
1843         pm += 4;
1844         w -= 4;
1845     }
1846
1847     while (w)
1848     {
1849         s = *ps++;
1850         m = *pm++;
1851         d = *pd;
1852
1853         *pd++ = core_combine_over_reverse_ca_pixel_sse2 (s, m, d);
1854         w--;
1855     }
1856 }
1857
1858 static force_inline void
1859 core_combine_in_ca_sse2 (uint32_t *      pd,
1860                          const uint32_t *ps,
1861                          const uint32_t *pm,
1862                          int             w)
1863 {
1864     uint32_t s, m, d;
1865
1866     __m128i xmm_alpha_lo, xmm_alpha_hi;
1867     __m128i xmm_src_lo, xmm_src_hi;
1868     __m128i xmm_dst_lo, xmm_dst_hi;
1869     __m128i xmm_mask_lo, xmm_mask_hi;
1870
1871     /* call prefetch hint to optimize cache load*/
1872     cache_prefetch ((__m128i*)ps);
1873     cache_prefetch ((__m128i*)pd);
1874     cache_prefetch ((__m128i*)pm);
1875
1876     while (w && (unsigned long)pd & 15)
1877     {
1878         s = *ps++;
1879         m = *pm++;
1880         d = *pd;
1881
1882         *pd++ = pack_1x64_32 (
1883             pix_multiply_1x64 (
1884                 pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)),
1885                 expand_alpha_1x64 (unpack_32_1x64 (d))));
1886
1887         w--;
1888     }
1889
1890     /* call prefetch hint to optimize cache load*/
1891     cache_prefetch ((__m128i*)ps);
1892     cache_prefetch ((__m128i*)pd);
1893     cache_prefetch ((__m128i*)pm);
1894
1895     while (w >= 4)
1896     {
1897         /* fill cache line with next memory */
1898         cache_prefetch_next ((__m128i*)ps);
1899         cache_prefetch_next ((__m128i*)pd);
1900         cache_prefetch_next ((__m128i*)pm);
1901
1902         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1903         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1904         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1905
1906         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1907         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1908         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1909
1910         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
1911                             &xmm_alpha_lo, &xmm_alpha_hi);
1912
1913         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
1914                             &xmm_mask_lo, &xmm_mask_hi,
1915                             &xmm_dst_lo, &xmm_dst_hi);
1916
1917         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
1918                             &xmm_alpha_lo, &xmm_alpha_hi,
1919                             &xmm_dst_lo, &xmm_dst_hi);
1920
1921         save_128_aligned (
1922             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1923
1924         ps += 4;
1925         pd += 4;
1926         pm += 4;
1927         w -= 4;
1928     }
1929
1930     while (w)
1931     {
1932         s = *ps++;
1933         m = *pm++;
1934         d = *pd;
1935
1936         *pd++ = pack_1x64_32 (
1937             pix_multiply_1x64 (
1938                 pix_multiply_1x64 (
1939                     unpack_32_1x64 (s), unpack_32_1x64 (m)),
1940                 expand_alpha_1x64 (unpack_32_1x64 (d))));
1941
1942         w--;
1943     }
1944 }
1945
1946 static force_inline void
1947 core_combine_in_reverse_ca_sse2 (uint32_t *      pd,
1948                                  const uint32_t *ps,
1949                                  const uint32_t *pm,
1950                                  int             w)
1951 {
1952     uint32_t s, m, d;
1953
1954     __m128i xmm_alpha_lo, xmm_alpha_hi;
1955     __m128i xmm_src_lo, xmm_src_hi;
1956     __m128i xmm_dst_lo, xmm_dst_hi;
1957     __m128i xmm_mask_lo, xmm_mask_hi;
1958
1959     /* call prefetch hint to optimize cache load*/
1960     cache_prefetch ((__m128i*)ps);
1961     cache_prefetch ((__m128i*)pd);
1962     cache_prefetch ((__m128i*)pm);
1963
1964     while (w && (unsigned long)pd & 15)
1965     {
1966         s = *ps++;
1967         m = *pm++;
1968         d = *pd;
1969
1970         *pd++ = pack_1x64_32 (
1971             pix_multiply_1x64 (
1972                 unpack_32_1x64 (d),
1973                 pix_multiply_1x64 (unpack_32_1x64 (m),
1974                                    expand_alpha_1x64 (unpack_32_1x64 (s)))));
1975         w--;
1976     }
1977
1978     /* call prefetch hint to optimize cache load*/
1979     cache_prefetch ((__m128i*)ps);
1980     cache_prefetch ((__m128i*)pd);
1981     cache_prefetch ((__m128i*)pm);
1982
1983     while (w >= 4)
1984     {
1985         /* fill cache line with next memory */
1986         cache_prefetch_next ((__m128i*)ps);
1987         cache_prefetch_next ((__m128i*)pd);
1988         cache_prefetch_next ((__m128i*)pm);
1989
1990         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1991         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1992         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1993
1994         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1995         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1996         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1997
1998         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
1999                             &xmm_alpha_lo, &xmm_alpha_hi);
2000         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
2001                             &xmm_alpha_lo, &xmm_alpha_hi,
2002                             &xmm_alpha_lo, &xmm_alpha_hi);
2003
2004         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
2005                             &xmm_alpha_lo, &xmm_alpha_hi,
2006                             &xmm_dst_lo, &xmm_dst_hi);
2007
2008         save_128_aligned (
2009             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2010
2011         ps += 4;
2012         pd += 4;
2013         pm += 4;
2014         w -= 4;
2015     }
2016
2017     while (w)
2018     {
2019         s = *ps++;
2020         m = *pm++;
2021         d = *pd;
2022
2023         *pd++ = pack_1x64_32 (
2024             pix_multiply_1x64 (
2025                 unpack_32_1x64 (d),
2026                 pix_multiply_1x64 (unpack_32_1x64 (m),
2027                                    expand_alpha_1x64 (unpack_32_1x64 (s)))));
2028         w--;
2029     }
2030 }
2031
2032 static force_inline void
2033 core_combine_out_ca_sse2 (uint32_t *      pd,
2034                           const uint32_t *ps,
2035                           const uint32_t *pm,
2036                           int             w)
2037 {
2038     uint32_t s, m, d;
2039
2040     __m128i xmm_alpha_lo, xmm_alpha_hi;
2041     __m128i xmm_src_lo, xmm_src_hi;
2042     __m128i xmm_dst_lo, xmm_dst_hi;
2043     __m128i xmm_mask_lo, xmm_mask_hi;
2044
2045     /* call prefetch hint to optimize cache load*/
2046     cache_prefetch ((__m128i*)ps);
2047     cache_prefetch ((__m128i*)pd);
2048     cache_prefetch ((__m128i*)pm);
2049
2050     while (w && (unsigned long)pd & 15)
2051     {
2052         s = *ps++;
2053         m = *pm++;
2054         d = *pd;
2055
2056         *pd++ = pack_1x64_32 (
2057             pix_multiply_1x64 (
2058                 pix_multiply_1x64 (
2059                     unpack_32_1x64 (s), unpack_32_1x64 (m)),
2060                 negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d)))));
2061         w--;
2062     }
2063
2064     /* call prefetch hint to optimize cache load*/
2065     cache_prefetch ((__m128i*)ps);
2066     cache_prefetch ((__m128i*)pd);
2067     cache_prefetch ((__m128i*)pm);
2068
2069     while (w >= 4)
2070     {
2071         /* fill cache line with next memory */
2072         cache_prefetch_next ((__m128i*)ps);
2073         cache_prefetch_next ((__m128i*)pd);
2074         cache_prefetch_next ((__m128i*)pm);
2075
2076         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2077         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2078         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2079
2080         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2081         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2082         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2083
2084         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
2085                             &xmm_alpha_lo, &xmm_alpha_hi);
2086         negate_2x128 (xmm_alpha_lo, xmm_alpha_hi,
2087                       &xmm_alpha_lo, &xmm_alpha_hi);
2088
2089         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
2090                             &xmm_mask_lo, &xmm_mask_hi,
2091                             &xmm_dst_lo, &xmm_dst_hi);
2092         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
2093                             &xmm_alpha_lo, &xmm_alpha_hi,
2094                             &xmm_dst_lo, &xmm_dst_hi);
2095
2096         save_128_aligned (
2097             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2098
2099         ps += 4;
2100         pd += 4;
2101         pm += 4;
2102         w -= 4;
2103     }
2104
2105     while (w)
2106     {
2107         s = *ps++;
2108         m = *pm++;
2109         d = *pd;
2110
2111         *pd++ = pack_1x64_32 (
2112             pix_multiply_1x64 (
2113                 pix_multiply_1x64 (
2114                     unpack_32_1x64 (s), unpack_32_1x64 (m)),
2115                 negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d)))));
2116
2117         w--;
2118     }
2119 }
2120
2121 static force_inline void
2122 core_combine_out_reverse_ca_sse2 (uint32_t *      pd,
2123                                   const uint32_t *ps,
2124                                   const uint32_t *pm,
2125                                   int             w)
2126 {
2127     uint32_t s, m, d;
2128
2129     __m128i xmm_alpha_lo, xmm_alpha_hi;
2130     __m128i xmm_src_lo, xmm_src_hi;
2131     __m128i xmm_dst_lo, xmm_dst_hi;
2132     __m128i xmm_mask_lo, xmm_mask_hi;
2133
2134     /* call prefetch hint to optimize cache load*/
2135     cache_prefetch ((__m128i*)ps);
2136     cache_prefetch ((__m128i*)pd);
2137     cache_prefetch ((__m128i*)pm);
2138
2139     while (w && (unsigned long)pd & 15)
2140     {
2141         s = *ps++;
2142         m = *pm++;
2143         d = *pd;
2144
2145         *pd++ = pack_1x64_32 (
2146             pix_multiply_1x64 (
2147                 unpack_32_1x64 (d),
2148                 negate_1x64 (pix_multiply_1x64 (
2149                                  unpack_32_1x64 (m),
2150                                  expand_alpha_1x64 (unpack_32_1x64 (s))))));
2151         w--;
2152     }
2153
2154     /* call prefetch hint to optimize cache load*/
2155     cache_prefetch ((__m128i*)ps);
2156     cache_prefetch ((__m128i*)pd);
2157     cache_prefetch ((__m128i*)pm);
2158
2159     while (w >= 4)
2160     {
2161         /* fill cache line with next memory */
2162         cache_prefetch_next ((__m128i*)ps);
2163         cache_prefetch_next ((__m128i*)pd);
2164         cache_prefetch_next ((__m128i*)pm);
2165
2166         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2167         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2168         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2169
2170         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2171         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2172         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2173
2174         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
2175                             &xmm_alpha_lo, &xmm_alpha_hi);
2176
2177         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
2178                             &xmm_alpha_lo, &xmm_alpha_hi,
2179                             &xmm_mask_lo, &xmm_mask_hi);
2180
2181         negate_2x128 (xmm_mask_lo, xmm_mask_hi,
2182                       &xmm_mask_lo, &xmm_mask_hi);
2183
2184         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi,
2185                             &xmm_mask_lo, &xmm_mask_hi,
2186                             &xmm_dst_lo, &xmm_dst_hi);
2187
2188         save_128_aligned (
2189             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2190
2191         ps += 4;
2192         pd += 4;
2193         pm += 4;
2194         w -= 4;
2195     }
2196
2197     while (w)
2198     {
2199         s = *ps++;
2200         m = *pm++;
2201         d = *pd;
2202
2203         *pd++ = pack_1x64_32 (
2204             pix_multiply_1x64 (
2205                 unpack_32_1x64 (d),
2206                 negate_1x64 (pix_multiply_1x64 (
2207                                  unpack_32_1x64 (m),
2208                                  expand_alpha_1x64 (unpack_32_1x64 (s))))));
2209         w--;
2210     }
2211 }
2212
2213 static force_inline uint32_t
2214 core_combine_atop_ca_pixel_sse2 (uint32_t src,
2215                                  uint32_t mask,
2216                                  uint32_t dst)
2217 {
2218     __m64 m = unpack_32_1x64 (mask);
2219     __m64 s = unpack_32_1x64 (src);
2220     __m64 d = unpack_32_1x64 (dst);
2221     __m64 sa = expand_alpha_1x64 (s);
2222     __m64 da = expand_alpha_1x64 (d);
2223
2224     s = pix_multiply_1x64 (s, m);
2225     m = negate_1x64 (pix_multiply_1x64 (m, sa));
2226
2227     return pack_1x64_32 (pix_add_multiply_1x64 (&d, &m, &s, &da));
2228 }
2229
2230 static force_inline void
2231 core_combine_atop_ca_sse2 (uint32_t *      pd,
2232                            const uint32_t *ps,
2233                            const uint32_t *pm,
2234                            int             w)
2235 {
2236     uint32_t s, m, d;
2237
2238     __m128i xmm_src_lo, xmm_src_hi;
2239     __m128i xmm_dst_lo, xmm_dst_hi;
2240     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
2241     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
2242     __m128i xmm_mask_lo, xmm_mask_hi;
2243
2244     /* call prefetch hint to optimize cache load*/
2245     cache_prefetch ((__m128i*)ps);
2246     cache_prefetch ((__m128i*)pd);
2247     cache_prefetch ((__m128i*)pm);
2248
2249     while (w && (unsigned long)pd & 15)
2250     {
2251         s = *ps++;
2252         m = *pm++;
2253         d = *pd;
2254
2255         *pd++ = core_combine_atop_ca_pixel_sse2 (s, m, d);
2256         w--;
2257     }
2258
2259     /* call prefetch hint to optimize cache load*/
2260     cache_prefetch ((__m128i*)ps);
2261     cache_prefetch ((__m128i*)pd);
2262     cache_prefetch ((__m128i*)pm);
2263
2264     while (w >= 4)
2265     {
2266         /* fill cache line with next memory */
2267         cache_prefetch_next ((__m128i*)ps);
2268         cache_prefetch_next ((__m128i*)pd);
2269         cache_prefetch_next ((__m128i*)pm);
2270
2271         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2272         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2273         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2274
2275         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2276         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2277         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2278
2279         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
2280                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
2281         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
2282                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2283
2284         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
2285                             &xmm_mask_lo, &xmm_mask_hi,
2286                             &xmm_src_lo, &xmm_src_hi);
2287         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
2288                             &xmm_alpha_src_lo, &xmm_alpha_src_hi,
2289                             &xmm_mask_lo, &xmm_mask_hi);
2290
2291         negate_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2292
2293         pix_add_multiply_2x128 (
2294             &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
2295             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
2296             &xmm_dst_lo, &xmm_dst_hi);
2297
2298         save_128_aligned (
2299             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2300
2301         ps += 4;
2302         pd += 4;
2303         pm += 4;
2304         w -= 4;
2305     }
2306
2307     while (w)
2308     {
2309         s = *ps++;
2310         m = *pm++;
2311         d = *pd;
2312
2313         *pd++ = core_combine_atop_ca_pixel_sse2 (s, m, d);
2314         w--;
2315     }
2316 }
2317
2318 static force_inline uint32_t
2319 core_combine_reverse_atop_ca_pixel_sse2 (uint32_t src,
2320                                          uint32_t mask,
2321                                          uint32_t dst)
2322 {
2323     __m64 m = unpack_32_1x64 (mask);
2324     __m64 s = unpack_32_1x64 (src);
2325     __m64 d = unpack_32_1x64 (dst);
2326
2327     __m64 da = negate_1x64 (expand_alpha_1x64 (d));
2328     __m64 sa = expand_alpha_1x64 (s);
2329
2330     s = pix_multiply_1x64 (s, m);
2331     m = pix_multiply_1x64 (m, sa);
2332
2333     return pack_1x64_32 (pix_add_multiply_1x64 (&d, &m, &s, &da));
2334 }
2335
2336 static force_inline void
2337 core_combine_reverse_atop_ca_sse2 (uint32_t *      pd,
2338                                    const uint32_t *ps,
2339                                    const uint32_t *pm,
2340                                    int             w)
2341 {
2342     uint32_t s, m, d;
2343
2344     __m128i xmm_src_lo, xmm_src_hi;
2345     __m128i xmm_dst_lo, xmm_dst_hi;
2346     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
2347     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
2348     __m128i xmm_mask_lo, xmm_mask_hi;
2349
2350     /* call prefetch hint to optimize cache load*/
2351     cache_prefetch ((__m128i*)ps);
2352     cache_prefetch ((__m128i*)pd);
2353     cache_prefetch ((__m128i*)pm);
2354
2355     while (w && (unsigned long)pd & 15)
2356     {
2357         s = *ps++;
2358         m = *pm++;
2359         d = *pd;
2360
2361         *pd++ = core_combine_reverse_atop_ca_pixel_sse2 (s, m, d);
2362         w--;
2363     }
2364
2365     /* call prefetch hint to optimize cache load*/
2366     cache_prefetch ((__m128i*)ps);
2367     cache_prefetch ((__m128i*)pd);
2368     cache_prefetch ((__m128i*)pm);
2369
2370     while (w >= 4)
2371     {
2372         /* fill cache line with next memory */
2373         cache_prefetch_next ((__m128i*)ps);
2374         cache_prefetch_next ((__m128i*)pd);
2375         cache_prefetch_next ((__m128i*)pm);
2376
2377         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2378         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2379         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2380
2381         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2382         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2383         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2384
2385         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
2386                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
2387         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
2388                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2389
2390         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
2391                             &xmm_mask_lo, &xmm_mask_hi,
2392                             &xmm_src_lo, &xmm_src_hi);
2393         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
2394                             &xmm_alpha_src_lo, &xmm_alpha_src_hi,
2395                             &xmm_mask_lo, &xmm_mask_hi);
2396
2397         negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi,
2398                       &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2399
2400         pix_add_multiply_2x128 (
2401             &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
2402             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
2403             &xmm_dst_lo, &xmm_dst_hi);
2404
2405         save_128_aligned (
2406             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2407
2408         ps += 4;
2409         pd += 4;
2410         pm += 4;
2411         w -= 4;
2412     }
2413
2414     while (w)
2415     {
2416         s = *ps++;
2417         m = *pm++;
2418         d = *pd;
2419
2420         *pd++ = core_combine_reverse_atop_ca_pixel_sse2 (s, m, d);
2421         w--;
2422     }
2423 }
2424
2425 static force_inline uint32_t
2426 core_combine_xor_ca_pixel_sse2 (uint32_t src,
2427                                 uint32_t mask,
2428                                 uint32_t dst)
2429 {
2430     __m64 a = unpack_32_1x64 (mask);
2431     __m64 s = unpack_32_1x64 (src);
2432     __m64 d = unpack_32_1x64 (dst);
2433
2434     __m64 alpha_dst = negate_1x64 (pix_multiply_1x64 (
2435                                        a, expand_alpha_1x64 (s)));
2436     __m64 dest      = pix_multiply_1x64 (s, a);
2437     __m64 alpha_src = negate_1x64 (expand_alpha_1x64 (d));
2438
2439     return pack_1x64_32 (pix_add_multiply_1x64 (&d,
2440                                                 &alpha_dst,
2441                                                 &dest,
2442                                                 &alpha_src));
2443 }
2444
2445 static force_inline void
2446 core_combine_xor_ca_sse2 (uint32_t *      pd,
2447                           const uint32_t *ps,
2448                           const uint32_t *pm,
2449                           int             w)
2450 {
2451     uint32_t s, m, d;
2452
2453     __m128i xmm_src_lo, xmm_src_hi;
2454     __m128i xmm_dst_lo, xmm_dst_hi;
2455     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
2456     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
2457     __m128i xmm_mask_lo, xmm_mask_hi;
2458
2459     /* call prefetch hint to optimize cache load*/
2460     cache_prefetch ((__m128i*)ps);
2461     cache_prefetch ((__m128i*)pd);
2462     cache_prefetch ((__m128i*)pm);
2463
2464     while (w && (unsigned long)pd & 15)
2465     {
2466         s = *ps++;
2467         m = *pm++;
2468         d = *pd;
2469
2470         *pd++ = core_combine_xor_ca_pixel_sse2 (s, m, d);
2471         w--;
2472     }
2473
2474     /* call prefetch hint to optimize cache load*/
2475     cache_prefetch ((__m128i*)ps);
2476     cache_prefetch ((__m128i*)pd);
2477     cache_prefetch ((__m128i*)pm);
2478
2479     while (w >= 4)
2480     {
2481         /* fill cache line with next memory */
2482         cache_prefetch_next ((__m128i*)ps);
2483         cache_prefetch_next ((__m128i*)pd);
2484         cache_prefetch_next ((__m128i*)pm);
2485
2486         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2487         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2488         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2489
2490         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2491         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2492         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2493
2494         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
2495                             &xmm_alpha_src_lo, &xmm_alpha_src_hi);
2496         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi,
2497                             &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2498
2499         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
2500                             &xmm_mask_lo, &xmm_mask_hi,
2501                             &xmm_src_lo, &xmm_src_hi);
2502         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
2503                             &xmm_alpha_src_lo, &xmm_alpha_src_hi,
2504                             &xmm_mask_lo, &xmm_mask_hi);
2505
2506         negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi,
2507                       &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2508         negate_2x128 (xmm_mask_lo, xmm_mask_hi,
2509                       &xmm_mask_lo, &xmm_mask_hi);
2510
2511         pix_add_multiply_2x128 (
2512             &xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
2513             &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
2514             &xmm_dst_lo, &xmm_dst_hi);
2515
2516         save_128_aligned (
2517             (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2518
2519         ps += 4;
2520         pd += 4;
2521         pm += 4;
2522         w -= 4;
2523     }
2524
2525     while (w)
2526     {
2527         s = *ps++;
2528         m = *pm++;
2529         d = *pd;
2530
2531         *pd++ = core_combine_xor_ca_pixel_sse2 (s, m, d);
2532         w--;
2533     }
2534 }
2535
2536 static force_inline void
2537 core_combine_add_ca_sse2 (uint32_t *      pd,
2538                           const uint32_t *ps,
2539                           const uint32_t *pm,
2540                           int             w)
2541 {
2542     uint32_t s, m, d;
2543
2544     __m128i xmm_src_lo, xmm_src_hi;
2545     __m128i xmm_dst_lo, xmm_dst_hi;
2546     __m128i xmm_mask_lo, xmm_mask_hi;
2547
2548     /* call prefetch hint to optimize cache load*/
2549     cache_prefetch ((__m128i*)ps);
2550     cache_prefetch ((__m128i*)pd);
2551     cache_prefetch ((__m128i*)pm);
2552
2553     while (w && (unsigned long)pd & 15)
2554     {
2555         s = *ps++;
2556         m = *pm++;
2557         d = *pd;
2558
2559         *pd++ = pack_1x64_32 (
2560             _mm_adds_pu8 (pix_multiply_1x64 (unpack_32_1x64 (s),
2561                                              unpack_32_1x64 (m)),
2562                           unpack_32_1x64 (d)));
2563         w--;
2564     }
2565
2566     /* call prefetch hint to optimize cache load*/
2567     cache_prefetch ((__m128i*)ps);
2568     cache_prefetch ((__m128i*)pd);
2569     cache_prefetch ((__m128i*)pm);
2570
2571     while (w >= 4)
2572     {
2573         /* fill cache line with next memory */
2574         cache_prefetch_next ((__m128i*)ps);
2575         cache_prefetch_next ((__m128i*)pd);
2576         cache_prefetch_next ((__m128i*)pm);
2577
2578         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2579         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2580         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2581
2582         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2583         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2584         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2585
2586         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
2587                             &xmm_mask_lo, &xmm_mask_hi,
2588                             &xmm_src_lo, &xmm_src_hi);
2589
2590         save_128_aligned (
2591             (__m128i*)pd, pack_2x128_128 (
2592                 _mm_adds_epu8 (xmm_src_lo, xmm_dst_lo),
2593                 _mm_adds_epu8 (xmm_src_hi, xmm_dst_hi)));
2594
2595         ps += 4;
2596         pd += 4;
2597         pm += 4;
2598         w -= 4;
2599     }
2600
2601     while (w)
2602     {
2603         s = *ps++;
2604         m = *pm++;
2605         d = *pd;
2606
2607         *pd++ = pack_1x64_32 (
2608             _mm_adds_pu8 (pix_multiply_1x64 (unpack_32_1x64 (s),
2609                                              unpack_32_1x64 (m)),
2610                           unpack_32_1x64 (d)));
2611         w--;
2612     }
2613 }
2614
2615 /* ---------------------------------------------------
2616  * fb_compose_setup_sSE2
2617  */
2618 static force_inline __m64
2619 create_mask_16_64 (uint16_t mask)
2620 {
2621     return _mm_set1_pi16 (mask);
2622 }
2623
2624 static force_inline __m128i
2625 create_mask_16_128 (uint16_t mask)
2626 {
2627     return _mm_set1_epi16 (mask);
2628 }
2629
2630 static force_inline __m64
2631 create_mask_2x32_64 (uint32_t mask0,
2632                      uint32_t mask1)
2633 {
2634     return _mm_set_pi32 (mask0, mask1);
2635 }
2636
2637 static force_inline __m128i
2638 create_mask_2x32_128 (uint32_t mask0,
2639                       uint32_t mask1)
2640 {
2641     return _mm_set_epi32 (mask0, mask1, mask0, mask1);
2642 }
2643
2644 /* SSE2 code patch for fbcompose.c */
2645
2646 static void
2647 sse2_combine_over_u (pixman_implementation_t *imp,
2648                      pixman_op_t              op,
2649                      uint32_t *               dst,
2650                      const uint32_t *         src,
2651                      const uint32_t *         mask,
2652                      int                      width)
2653 {
2654     core_combine_over_u_sse2 (dst, src, mask, width);
2655     _mm_empty ();
2656 }
2657
2658 static void
2659 sse2_combine_over_reverse_u (pixman_implementation_t *imp,
2660                              pixman_op_t              op,
2661                              uint32_t *               dst,
2662                              const uint32_t *         src,
2663                              const uint32_t *         mask,
2664                              int                      width)
2665 {
2666     core_combine_over_reverse_u_sse2 (dst, src, mask, width);
2667     _mm_empty ();
2668 }
2669
2670 static void
2671 sse2_combine_in_u (pixman_implementation_t *imp,
2672                    pixman_op_t              op,
2673                    uint32_t *               dst,
2674                    const uint32_t *         src,
2675                    const uint32_t *         mask,
2676                    int                      width)
2677 {
2678     core_combine_in_u_sse2 (dst, src, mask, width);
2679     _mm_empty ();
2680 }
2681
2682 static void
2683 sse2_combine_in_reverse_u (pixman_implementation_t *imp,
2684                            pixman_op_t              op,
2685                            uint32_t *               dst,
2686                            const uint32_t *         src,
2687                            const uint32_t *         mask,
2688                            int                      width)
2689 {
2690     core_combine_reverse_in_u_sse2 (dst, src, mask, width);
2691     _mm_empty ();
2692 }
2693
2694 static void
2695 sse2_combine_out_u (pixman_implementation_t *imp,
2696                     pixman_op_t              op,
2697                     uint32_t *               dst,
2698                     const uint32_t *         src,
2699                     const uint32_t *         mask,
2700                     int                      width)
2701 {
2702     core_combine_out_u_sse2 (dst, src, mask, width);
2703     _mm_empty ();
2704 }
2705
2706 static void
2707 sse2_combine_out_reverse_u (pixman_implementation_t *imp,
2708                             pixman_op_t              op,
2709                             uint32_t *               dst,
2710                             const uint32_t *         src,
2711                             const uint32_t *         mask,
2712                             int                      width)
2713 {
2714     core_combine_reverse_out_u_sse2 (dst, src, mask, width);
2715     _mm_empty ();
2716 }
2717
2718 static void
2719 sse2_combine_atop_u (pixman_implementation_t *imp,
2720                      pixman_op_t              op,
2721                      uint32_t *               dst,
2722                      const uint32_t *         src,
2723                      const uint32_t *         mask,
2724                      int                      width)
2725 {
2726     core_combine_atop_u_sse2 (dst, src, mask, width);
2727     _mm_empty ();
2728 }
2729
2730 static void
2731 sse2_combine_atop_reverse_u (pixman_implementation_t *imp,
2732                              pixman_op_t              op,
2733                              uint32_t *               dst,
2734                              const uint32_t *         src,
2735                              const uint32_t *         mask,
2736                              int                      width)
2737 {
2738     core_combine_reverse_atop_u_sse2 (dst, src, mask, width);
2739     _mm_empty ();
2740 }
2741
2742 static void
2743 sse2_combine_xor_u (pixman_implementation_t *imp,
2744                     pixman_op_t              op,
2745                     uint32_t *               dst,
2746                     const uint32_t *         src,
2747                     const uint32_t *         mask,
2748                     int                      width)
2749 {
2750     core_combine_xor_u_sse2 (dst, src, mask, width);
2751     _mm_empty ();
2752 }
2753
2754 static void
2755 sse2_combine_add_u (pixman_implementation_t *imp,
2756                     pixman_op_t              op,
2757                     uint32_t *               dst,
2758                     const uint32_t *         src,
2759                     const uint32_t *         mask,
2760                     int                      width)
2761 {
2762     core_combine_add_u_sse2 (dst, src, mask, width);
2763     _mm_empty ();
2764 }
2765
2766 static void
2767 sse2_combine_saturate_u (pixman_implementation_t *imp,
2768                          pixman_op_t              op,
2769                          uint32_t *               dst,
2770                          const uint32_t *         src,
2771                          const uint32_t *         mask,
2772                          int                      width)
2773 {
2774     core_combine_saturate_u_sse2 (dst, src, mask, width);
2775     _mm_empty ();
2776 }
2777
2778 static void
2779 sse2_combine_src_ca (pixman_implementation_t *imp,
2780                      pixman_op_t              op,
2781                      uint32_t *               dst,
2782                      const uint32_t *         src,
2783                      const uint32_t *         mask,
2784                      int                      width)
2785 {
2786     core_combine_src_ca_sse2 (dst, src, mask, width);
2787     _mm_empty ();
2788 }
2789
2790 static void
2791 sse2_combine_over_ca (pixman_implementation_t *imp,
2792                       pixman_op_t              op,
2793                       uint32_t *               dst,
2794                       const uint32_t *         src,
2795                       const uint32_t *         mask,
2796                       int                      width)
2797 {
2798     core_combine_over_ca_sse2 (dst, src, mask, width);
2799     _mm_empty ();
2800 }
2801
2802 static void
2803 sse2_combine_over_reverse_ca (pixman_implementation_t *imp,
2804                               pixman_op_t              op,
2805                               uint32_t *               dst,
2806                               const uint32_t *         src,
2807                               const uint32_t *         mask,
2808                               int                      width)
2809 {
2810     core_combine_over_reverse_ca_sse2 (dst, src, mask, width);
2811     _mm_empty ();
2812 }
2813
2814 static void
2815 sse2_combine_in_ca (pixman_implementation_t *imp,
2816                     pixman_op_t              op,
2817                     uint32_t *               dst,
2818                     const uint32_t *         src,
2819                     const uint32_t *         mask,
2820                     int                      width)
2821 {
2822     core_combine_in_ca_sse2 (dst, src, mask, width);
2823     _mm_empty ();
2824 }
2825
2826 static void
2827 sse2_combine_in_reverse_ca (pixman_implementation_t *imp,
2828                             pixman_op_t              op,
2829                             uint32_t *               dst,
2830                             const uint32_t *         src,
2831                             const uint32_t *         mask,
2832                             int                      width)
2833 {
2834     core_combine_in_reverse_ca_sse2 (dst, src, mask, width);
2835     _mm_empty ();
2836 }
2837
2838 static void
2839 sse2_combine_out_ca (pixman_implementation_t *imp,
2840                      pixman_op_t              op,
2841                      uint32_t *               dst,
2842                      const uint32_t *         src,
2843                      const uint32_t *         mask,
2844                      int                      width)
2845 {
2846     core_combine_out_ca_sse2 (dst, src, mask, width);
2847     _mm_empty ();
2848 }
2849
2850 static void
2851 sse2_combine_out_reverse_ca (pixman_implementation_t *imp,
2852                              pixman_op_t              op,
2853                              uint32_t *               dst,
2854                              const uint32_t *         src,
2855                              const uint32_t *         mask,
2856                              int                      width)
2857 {
2858     core_combine_out_reverse_ca_sse2 (dst, src, mask, width);
2859     _mm_empty ();
2860 }
2861
2862 static void
2863 sse2_combine_atop_ca (pixman_implementation_t *imp,
2864                       pixman_op_t              op,
2865                       uint32_t *               dst,
2866                       const uint32_t *         src,
2867                       const uint32_t *         mask,
2868                       int                      width)
2869 {
2870     core_combine_atop_ca_sse2 (dst, src, mask, width);
2871     _mm_empty ();
2872 }
2873
2874 static void
2875 sse2_combine_atop_reverse_ca (pixman_implementation_t *imp,
2876                               pixman_op_t              op,
2877                               uint32_t *               dst,
2878                               const uint32_t *         src,
2879                               const uint32_t *         mask,
2880                               int                      width)
2881 {
2882     core_combine_reverse_atop_ca_sse2 (dst, src, mask, width);
2883     _mm_empty ();
2884 }
2885
2886 static void
2887 sse2_combine_xor_ca (pixman_implementation_t *imp,
2888                      pixman_op_t              op,
2889                      uint32_t *               dst,
2890                      const uint32_t *         src,
2891                      const uint32_t *         mask,
2892                      int                      width)
2893 {
2894     core_combine_xor_ca_sse2 (dst, src, mask, width);
2895     _mm_empty ();
2896 }
2897
2898 static void
2899 sse2_combine_add_ca (pixman_implementation_t *imp,
2900                      pixman_op_t              op,
2901                      uint32_t *               dst,
2902                      const uint32_t *         src,
2903                      const uint32_t *         mask,
2904                      int                      width)
2905 {
2906     core_combine_add_ca_sse2 (dst, src, mask, width);
2907     _mm_empty ();
2908 }
2909
2910 /* -------------------------------------------------------------------
2911  * composite_over_n_8888
2912  */
2913
2914 static void
2915 sse2_composite_over_n_8888 (pixman_implementation_t *imp,
2916                             pixman_op_t              op,
2917                             pixman_image_t *         src_image,
2918                             pixman_image_t *         mask_image,
2919                             pixman_image_t *         dst_image,
2920                             int32_t                  src_x,
2921                             int32_t                  src_y,
2922                             int32_t                  mask_x,
2923                             int32_t                  mask_y,
2924                             int32_t                  dest_x,
2925                             int32_t                  dest_y,
2926                             int32_t                  width,
2927                             int32_t                  height)
2928 {
2929     uint32_t src;
2930     uint32_t    *dst_line, *dst, d;
2931     uint16_t w;
2932     int dst_stride;
2933     __m128i xmm_src, xmm_alpha;
2934     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
2935
2936     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
2937
2938     if (src == 0)
2939         return;
2940
2941     PIXMAN_IMAGE_GET_LINE (
2942         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
2943
2944     xmm_src = expand_pixel_32_1x128 (src);
2945     xmm_alpha = expand_alpha_1x128 (xmm_src);
2946
2947     while (height--)
2948     {
2949         dst = dst_line;
2950
2951         /* call prefetch hint to optimize cache load*/
2952         cache_prefetch ((__m128i*)dst);
2953
2954         dst_line += dst_stride;
2955         w = width;
2956
2957         while (w && (unsigned long)dst & 15)
2958         {
2959             d = *dst;
2960             *dst++ = pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
2961                                               _mm_movepi64_pi64 (xmm_alpha),
2962                                               unpack_32_1x64 (d)));
2963             w--;
2964         }
2965
2966         cache_prefetch ((__m128i*)dst);
2967
2968         while (w >= 4)
2969         {
2970             /* fill cache line with next memory */
2971             cache_prefetch_next ((__m128i*)dst);
2972
2973             xmm_dst = load_128_aligned ((__m128i*)dst);
2974
2975             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
2976
2977             over_2x128 (&xmm_src, &xmm_src,
2978                         &xmm_alpha, &xmm_alpha,
2979                         &xmm_dst_lo, &xmm_dst_hi);
2980
2981             /* rebuid the 4 pixel data and save*/
2982             save_128_aligned (
2983                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2984
2985             w -= 4;
2986             dst += 4;
2987         }
2988
2989         while (w)
2990         {
2991             d = *dst;
2992             *dst++ = pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
2993                                               _mm_movepi64_pi64 (xmm_alpha),
2994                                               unpack_32_1x64 (d)));
2995             w--;
2996         }
2997
2998     }
2999     _mm_empty ();
3000 }
3001
3002 /* ---------------------------------------------------------------------
3003  * composite_over_n_0565
3004  */
3005 static void
3006 sse2_composite_over_n_0565 (pixman_implementation_t *imp,
3007                             pixman_op_t              op,
3008                             pixman_image_t *         src_image,
3009                             pixman_image_t *         mask_image,
3010                             pixman_image_t *         dst_image,
3011                             int32_t                  src_x,
3012                             int32_t                  src_y,
3013                             int32_t                  mask_x,
3014                             int32_t                  mask_y,
3015                             int32_t                  dest_x,
3016                             int32_t                  dest_y,
3017                             int32_t                  width,
3018                             int32_t                  height)
3019 {
3020     uint32_t src;
3021     uint16_t    *dst_line, *dst, d;
3022     uint16_t w;
3023     int dst_stride;
3024     __m128i xmm_src, xmm_alpha;
3025     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
3026
3027     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
3028
3029     if (src == 0)
3030         return;
3031
3032     PIXMAN_IMAGE_GET_LINE (
3033         dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
3034
3035     xmm_src = expand_pixel_32_1x128 (src);
3036     xmm_alpha = expand_alpha_1x128 (xmm_src);
3037
3038     while (height--)
3039     {
3040         dst = dst_line;
3041
3042         /* call prefetch hint to optimize cache load*/
3043         cache_prefetch ((__m128i*)dst);
3044
3045         dst_line += dst_stride;
3046         w = width;
3047
3048         while (w && (unsigned long)dst & 15)
3049         {
3050             d = *dst;
3051
3052             *dst++ = pack_565_32_16 (
3053                 pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
3054                                          _mm_movepi64_pi64 (xmm_alpha),
3055                                          expand565_16_1x64 (d))));
3056             w--;
3057         }
3058
3059         /* call prefetch hint to optimize cache load*/
3060         cache_prefetch ((__m128i*)dst);
3061
3062         while (w >= 8)
3063         {
3064             /* fill cache line with next memory */
3065             cache_prefetch_next ((__m128i*)dst);
3066
3067             xmm_dst = load_128_aligned ((__m128i*)dst);
3068
3069             unpack_565_128_4x128 (xmm_dst,
3070                                   &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
3071
3072             over_2x128 (&xmm_src, &xmm_src,
3073                         &xmm_alpha, &xmm_alpha,
3074                         &xmm_dst0, &xmm_dst1);
3075             over_2x128 (&xmm_src, &xmm_src,
3076                         &xmm_alpha, &xmm_alpha,
3077                         &xmm_dst2, &xmm_dst3);
3078
3079             xmm_dst = pack_565_4x128_128 (
3080                 &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
3081
3082             save_128_aligned ((__m128i*)dst, xmm_dst);
3083
3084             dst += 8;
3085             w -= 8;
3086         }
3087
3088         while (w--)
3089         {
3090             d = *dst;
3091             *dst++ = pack_565_32_16 (
3092                 pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
3093                                          _mm_movepi64_pi64 (xmm_alpha),
3094                                          expand565_16_1x64 (d))));
3095         }
3096     }
3097
3098     _mm_empty ();
3099 }
3100
3101 /* ---------------------------------------------------------------------------
3102  * composite_over_n_8888_8888_ca
3103  */
3104
3105 static void
3106 sse2_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
3107                                     pixman_op_t              op,
3108                                     pixman_image_t *         src_image,
3109                                     pixman_image_t *         mask_image,
3110                                     pixman_image_t *         dst_image,
3111                                     int32_t                  src_x,
3112                                     int32_t                  src_y,
3113                                     int32_t                  mask_x,
3114                                     int32_t                  mask_y,
3115                                     int32_t                  dest_x,
3116                                     int32_t                  dest_y,
3117                                     int32_t                  width,
3118                                     int32_t                  height)
3119 {
3120     uint32_t src;
3121     uint32_t    *dst_line, d;
3122     uint32_t    *mask_line, m;
3123     uint32_t pack_cmp;
3124     int dst_stride, mask_stride;
3125
3126     __m128i xmm_src, xmm_alpha;
3127     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
3128     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
3129
3130     __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
3131
3132     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
3133
3134     if (src == 0)
3135         return;
3136
3137     PIXMAN_IMAGE_GET_LINE (
3138         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3139     PIXMAN_IMAGE_GET_LINE (
3140         mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
3141
3142     xmm_src = _mm_unpacklo_epi8 (
3143         create_mask_2x32_128 (src, src), _mm_setzero_si128 ());
3144     xmm_alpha = expand_alpha_1x128 (xmm_src);
3145     mmx_src   = _mm_movepi64_pi64 (xmm_src);
3146     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
3147
3148     while (height--)
3149     {
3150         int w = width;
3151         const uint32_t *pm = (uint32_t *)mask_line;
3152         uint32_t *pd = (uint32_t *)dst_line;
3153
3154         dst_line += dst_stride;
3155         mask_line += mask_stride;
3156
3157         /* call prefetch hint to optimize cache load*/
3158         cache_prefetch ((__m128i*)pd);
3159         cache_prefetch ((__m128i*)pm);
3160
3161         while (w && (unsigned long)pd & 15)
3162         {
3163             m = *pm++;
3164
3165             if (m)
3166             {
3167                 d = *pd;
3168                 mmx_mask = unpack_32_1x64 (m);
3169                 mmx_dest = unpack_32_1x64 (d);
3170
3171                 *pd = pack_1x64_32 (in_over_1x64 (&mmx_src,
3172                                                   &mmx_alpha,
3173                                                   &mmx_mask,
3174                                                   &mmx_dest));
3175             }
3176
3177             pd++;
3178             w--;
3179         }
3180
3181         /* call prefetch hint to optimize cache load*/
3182         cache_prefetch ((__m128i*)pd);
3183         cache_prefetch ((__m128i*)pm);
3184
3185         while (w >= 4)
3186         {
3187             /* fill cache line with next memory */
3188             cache_prefetch_next ((__m128i*)pd);
3189             cache_prefetch_next ((__m128i*)pm);
3190
3191             xmm_mask = load_128_unaligned ((__m128i*)pm);
3192
3193             pack_cmp =
3194                 _mm_movemask_epi8 (
3195                     _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ()));
3196
3197             /* if all bits in mask are zero, pack_cmp are equal to 0xffff */
3198             if (pack_cmp != 0xffff)
3199             {
3200                 xmm_dst = load_128_aligned ((__m128i*)pd);
3201
3202                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
3203                 unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
3204
3205                 in_over_2x128 (&xmm_src, &xmm_src,
3206                                &xmm_alpha, &xmm_alpha,
3207                                &xmm_mask_lo, &xmm_mask_hi,
3208                                &xmm_dst_lo, &xmm_dst_hi);
3209
3210                 save_128_aligned (
3211                     (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
3212             }
3213
3214             pd += 4;
3215             pm += 4;
3216             w -= 4;
3217         }
3218
3219         while (w)
3220         {
3221             m = *pm++;
3222
3223             if (m)
3224             {
3225                 d = *pd;
3226                 mmx_mask = unpack_32_1x64 (m);
3227                 mmx_dest = unpack_32_1x64 (d);
3228
3229                 *pd = pack_1x64_32 (
3230                     in_over_1x64 (&mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest));
3231             }
3232
3233             pd++;
3234             w--;
3235         }
3236     }
3237
3238     _mm_empty ();
3239 }
3240
3241 /*---------------------------------------------------------------------
3242  * composite_over_8888_n_8888
3243  */
3244
3245 static void
3246 sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
3247                                  pixman_op_t              op,
3248                                  pixman_image_t *         src_image,
3249                                  pixman_image_t *         mask_image,
3250                                  pixman_image_t *         dst_image,
3251                                  int32_t                  src_x,
3252                                  int32_t                  src_y,
3253                                  int32_t                  mask_x,
3254                                  int32_t                  mask_y,
3255                                  int32_t                  dest_x,
3256                                  int32_t                  dest_y,
3257                                  int32_t                  width,
3258                                  int32_t                  height)
3259 {
3260     uint32_t    *dst_line, *dst;
3261     uint32_t    *src_line, *src;
3262     uint32_t mask;
3263     uint16_t w;
3264     int dst_stride, src_stride;
3265
3266     __m128i xmm_mask;
3267     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
3268     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
3269     __m128i xmm_alpha_lo, xmm_alpha_hi;
3270
3271     PIXMAN_IMAGE_GET_LINE (
3272         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3273     PIXMAN_IMAGE_GET_LINE (
3274         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3275
3276     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
3277
3278     xmm_mask = create_mask_16_128 (mask >> 24);
3279
3280     while (height--)
3281     {
3282         dst = dst_line;
3283         dst_line += dst_stride;
3284         src = src_line;
3285         src_line += src_stride;
3286         w = width;
3287
3288         /* call prefetch hint to optimize cache load*/
3289         cache_prefetch ((__m128i*)dst);
3290         cache_prefetch ((__m128i*)src);
3291
3292         while (w && (unsigned long)dst & 15)
3293         {
3294             uint32_t s = *src++;
3295             uint32_t d = *dst;
3296
3297             __m64 ms = unpack_32_1x64 (s);
3298             __m64 alpha    = expand_alpha_1x64 (ms);
3299             __m64 dest     = _mm_movepi64_pi64 (xmm_mask);
3300             __m64 alpha_dst = unpack_32_1x64 (d);
3301
3302             *dst++ = pack_1x64_32 (
3303                 in_over_1x64 (&ms, &alpha, &dest, &alpha_dst));
3304
3305             w--;
3306         }
3307
3308         /* call prefetch hint to optimize cache load*/
3309         cache_prefetch ((__m128i*)dst);
3310         cache_prefetch ((__m128i*)src);
3311
3312         while (w >= 4)
3313         {
3314             /* fill cache line with next memory */
3315             cache_prefetch_next ((__m128i*)dst);
3316             cache_prefetch_next ((__m128i*)src);
3317
3318             xmm_src = load_128_unaligned ((__m128i*)src);
3319             xmm_dst = load_128_aligned ((__m128i*)dst);
3320
3321             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
3322             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
3323             expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
3324                                 &xmm_alpha_lo, &xmm_alpha_hi);
3325
3326             in_over_2x128 (&xmm_src_lo, &xmm_src_hi,
3327                            &xmm_alpha_lo, &xmm_alpha_hi,
3328                            &xmm_mask, &xmm_mask,
3329                            &xmm_dst_lo, &xmm_dst_hi);
3330
3331             save_128_aligned (
3332                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
3333
3334             dst += 4;
3335             src += 4;
3336             w -= 4;
3337         }
3338
3339         while (w)
3340         {
3341             uint32_t s = *src++;
3342             uint32_t d = *dst;
3343
3344             __m64 ms = unpack_32_1x64 (s);
3345             __m64 alpha = expand_alpha_1x64 (ms);
3346             __m64 mask  = _mm_movepi64_pi64 (xmm_mask);
3347             __m64 dest  = unpack_32_1x64 (d);
3348
3349             *dst++ = pack_1x64_32 (
3350                 in_over_1x64 (&ms, &alpha, &mask, &dest));
3351
3352             w--;
3353         }
3354     }
3355
3356     _mm_empty ();
3357 }
3358
3359 /* ---------------------------------------------------------------------
3360  * composite_over_x888_n_8888
3361  */
3362 static void
3363 sse2_composite_over_x888_n_8888 (pixman_implementation_t *imp,
3364                                  pixman_op_t              op,
3365                                  pixman_image_t *         src_image,
3366                                  pixman_image_t *         mask_image,
3367                                  pixman_image_t *         dst_image,
3368                                  int32_t                  src_x,
3369                                  int32_t                  src_y,
3370                                  int32_t                  mask_x,
3371                                  int32_t                  mask_y,
3372                                  int32_t                  dest_x,
3373                                  int32_t                  dest_y,
3374                                  int32_t                  width,
3375                                  int32_t                  height)
3376 {
3377     uint32_t    *dst_line, *dst;
3378     uint32_t    *src_line, *src;
3379     uint32_t mask;
3380     int dst_stride, src_stride;
3381     uint16_t w;
3382
3383     __m128i xmm_mask, xmm_alpha;
3384     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
3385     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
3386
3387     PIXMAN_IMAGE_GET_LINE (
3388         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3389     PIXMAN_IMAGE_GET_LINE (
3390         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3391
3392     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
3393
3394     xmm_mask = create_mask_16_128 (mask >> 24);
3395     xmm_alpha = mask_00ff;
3396
3397     while (height--)
3398     {
3399         dst = dst_line;
3400         dst_line += dst_stride;
3401         src = src_line;
3402         src_line += src_stride;
3403         w = width;
3404
3405         /* call prefetch hint to optimize cache load*/
3406         cache_prefetch ((__m128i*)dst);
3407         cache_prefetch ((__m128i*)src);
3408
3409         while (w && (unsigned long)dst & 15)
3410         {
3411             uint32_t s = (*src++) | 0xff000000;
3412             uint32_t d = *dst;
3413
3414             __m64 src   = unpack_32_1x64 (s);
3415             __m64 alpha = _mm_movepi64_pi64 (xmm_alpha);
3416             __m64 mask  = _mm_movepi64_pi64 (xmm_mask);
3417             __m64 dest  = unpack_32_1x64 (d);
3418
3419             *dst++ = pack_1x64_32 (
3420                 in_over_1x64 (&src, &alpha, &mask, &dest));
3421
3422             w--;
3423         }
3424
3425         /* call prefetch hint to optimize cache load*/
3426         cache_prefetch ((__m128i*)dst);
3427         cache_prefetch ((__m128i*)src);
3428
3429         while (w >= 4)
3430         {
3431             /* fill cache line with next memory */
3432             cache_prefetch_next ((__m128i*)dst);
3433             cache_prefetch_next ((__m128i*)src);
3434
3435             xmm_src = _mm_or_si128 (
3436                 load_128_unaligned ((__m128i*)src), mask_ff000000);
3437             xmm_dst = load_128_aligned ((__m128i*)dst);
3438
3439             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
3440             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
3441
3442             in_over_2x128 (&xmm_src_lo, &xmm_src_hi,
3443                            &xmm_alpha, &xmm_alpha,
3444                            &xmm_mask, &xmm_mask,
3445                            &xmm_dst_lo, &xmm_dst_hi);
3446
3447             save_128_aligned (
3448                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
3449
3450             dst += 4;
3451             src += 4;
3452             w -= 4;
3453
3454         }
3455
3456         while (w)
3457         {
3458             uint32_t s = (*src++) | 0xff000000;
3459             uint32_t d = *dst;
3460
3461             __m64 src  = unpack_32_1x64 (s);
3462             __m64 alpha = _mm_movepi64_pi64 (xmm_alpha);
3463             __m64 mask  = _mm_movepi64_pi64 (xmm_mask);
3464             __m64 dest  = unpack_32_1x64 (d);
3465
3466             *dst++ = pack_1x64_32 (
3467                 in_over_1x64 (&src, &alpha, &mask, &dest));
3468
3469             w--;
3470         }
3471     }
3472
3473     _mm_empty ();
3474 }
3475
3476 /* --------------------------------------------------------------------
3477  * composite_over_8888_8888
3478  */
3479 static void
3480 sse2_composite_over_8888_8888 (pixman_implementation_t *imp,
3481                                pixman_op_t              op,
3482                                pixman_image_t *         src_image,
3483                                pixman_image_t *         mask_image,
3484                                pixman_image_t *         dst_image,
3485                                int32_t                  src_x,
3486                                int32_t                  src_y,
3487                                int32_t                  mask_x,
3488                                int32_t                  mask_y,
3489                                int32_t                  dest_x,
3490                                int32_t                  dest_y,
3491                                int32_t                  width,
3492                                int32_t                  height)
3493 {
3494     int dst_stride, src_stride;
3495     uint32_t    *dst_line, *dst;
3496     uint32_t    *src_line, *src;
3497
3498     PIXMAN_IMAGE_GET_LINE (
3499         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3500     PIXMAN_IMAGE_GET_LINE (
3501         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3502
3503     dst = dst_line;
3504     src = src_line;
3505
3506     while (height--)
3507     {
3508         core_combine_over_u_sse2 (dst, src, NULL, width);
3509
3510         dst += dst_stride;
3511         src += src_stride;
3512     }
3513     _mm_empty ();
3514 }
3515
3516 /* ------------------------------------------------------------------
3517  * composite_over_8888_0565
3518  */
3519 static force_inline uint16_t
3520 composite_over_8888_0565pixel (uint32_t src, uint16_t dst)
3521 {
3522     __m64 ms;
3523
3524     ms = unpack_32_1x64 (src);
3525     return pack_565_32_16 (
3526         pack_1x64_32 (
3527             over_1x64 (
3528                 ms, expand_alpha_1x64 (ms), expand565_16_1x64 (dst))));
3529 }
3530
3531 static void
3532 sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
3533                                pixman_op_t              op,
3534                                pixman_image_t *         src_image,
3535                                pixman_image_t *         mask_image,
3536                                pixman_image_t *         dst_image,
3537                                int32_t                  src_x,
3538                                int32_t                  src_y,
3539                                int32_t                  mask_x,
3540                                int32_t                  mask_y,
3541                                int32_t                  dest_x,
3542                                int32_t                  dest_y,
3543                                int32_t                  width,
3544                                int32_t                  height)
3545 {
3546     uint16_t    *dst_line, *dst, d;
3547     uint32_t    *src_line, *src, s;
3548     int dst_stride, src_stride;
3549     uint16_t w;
3550
3551     __m128i xmm_alpha_lo, xmm_alpha_hi;
3552     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
3553     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
3554
3555     PIXMAN_IMAGE_GET_LINE (
3556         dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
3557     PIXMAN_IMAGE_GET_LINE (
3558         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3559
3560 #if 0
3561     /* FIXME
3562      *
3563      * I copy the code from MMX one and keep the fixme.
3564      * If it's a problem there, probably is a problem here.
3565      */
3566     assert (src_image->drawable == mask_image->drawable);
3567 #endif
3568
3569     while (height--)
3570     {
3571         dst = dst_line;
3572         src = src_line;
3573
3574         /* call prefetch hint to optimize cache load*/
3575         cache_prefetch ((__m128i*)src);
3576         cache_prefetch ((__m128i*)dst);
3577
3578         dst_line += dst_stride;
3579         src_line += src_stride;
3580         w = width;
3581
3582         /* Align dst on a 16-byte boundary */
3583         while (w &&
3584                ((unsigned long)dst & 15))
3585         {
3586             s = *src++;
3587             d = *dst;
3588
3589             *dst++ = composite_over_8888_0565pixel (s, d);
3590             w--;
3591         }
3592
3593         /* call prefetch hint to optimize cache load*/
3594         cache_prefetch ((__m128i*)src);
3595         cache_prefetch ((__m128i*)dst);
3596
3597         /* It's a 8 pixel loop */
3598         while (w >= 8)
3599         {
3600             /* fill cache line with next memory */
3601             cache_prefetch_next ((__m128i*)src);
3602             cache_prefetch_next ((__m128i*)dst);
3603
3604             /* I'm loading unaligned because I'm not sure
3605              * about the address alignment.
3606              */
3607             xmm_src = load_128_unaligned ((__m128i*) src);
3608             xmm_dst = load_128_aligned ((__m128i*) dst);
3609
3610             /* Unpacking */
3611             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
3612             unpack_565_128_4x128 (xmm_dst,
3613                                   &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
3614             expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
3615                                 &xmm_alpha_lo, &xmm_alpha_hi);
3616
3617             /* I'm loading next 4 pixels from memory
3618              * before to optimze the memory read.
3619              */
3620             xmm_src = load_128_unaligned ((__m128i*) (src + 4));
3621
3622             over_2x128 (&xmm_src_lo, &xmm_src_hi,
3623                         &xmm_alpha_lo, &xmm_alpha_hi,
3624                         &xmm_dst0, &xmm_dst1);
3625
3626             /* Unpacking */
3627             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
3628             expand_alpha_2x128 (xmm_src_lo, xmm_src_hi,
3629                                 &xmm_alpha_lo, &xmm_alpha_hi);
3630
3631             over_2x128 (&xmm_src_lo, &xmm_src_hi,
3632                         &xmm_alpha_lo, &xmm_alpha_hi,
3633                         &xmm_dst2, &xmm_dst3);
3634
3635             save_128_aligned (
3636                 (__m128i*)dst, pack_565_4x128_128 (
3637                     &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
3638
3639             w -= 8;
3640             dst += 8;
3641             src += 8;
3642         }
3643
3644         while (w--)
3645         {
3646             s = *src++;
3647             d = *dst;
3648
3649             *dst++ = composite_over_8888_0565pixel (s, d);
3650         }
3651     }
3652
3653     _mm_empty ();
3654 }
3655
3656 /* -----------------------------------------------------------------
3657  * composite_over_n_8_8888
3658  */
3659
3660 static void
3661 sse2_composite_over_n_8_8888 (pixman_implementation_t *imp,
3662                               pixman_op_t              op,
3663                               pixman_image_t *         src_image,
3664                               pixman_image_t *         mask_image,
3665                               pixman_image_t *         dst_image,
3666                               int32_t                  src_x,
3667                               int32_t                  src_y,
3668                               int32_t                  mask_x,
3669                               int32_t                  mask_y,
3670                               int32_t                  dest_x,
3671                               int32_t                  dest_y,
3672                               int32_t                  width,
3673                               int32_t                  height)
3674 {
3675     uint32_t src, srca;
3676     uint32_t *dst_line, *dst;
3677     uint8_t *mask_line, *mask;
3678     int dst_stride, mask_stride;
3679     uint16_t w;
3680     uint32_t m, d;
3681
3682     __m128i xmm_src, xmm_alpha, xmm_def;
3683     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
3684     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
3685
3686     __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
3687
3688     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
3689
3690     srca = src >> 24;
3691     if (src == 0)
3692         return;
3693
3694     PIXMAN_IMAGE_GET_LINE (
3695         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3696     PIXMAN_IMAGE_GET_LINE (
3697         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
3698
3699     xmm_def = create_mask_2x32_128 (src, src);
3700     xmm_src = expand_pixel_32_1x128 (src);
3701     xmm_alpha = expand_alpha_1x128 (xmm_src);
3702     mmx_src   = _mm_movepi64_pi64 (xmm_src);
3703     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
3704
3705     while (height--)
3706     {
3707         dst = dst_line;
3708         dst_line += dst_stride;
3709         mask = mask_line;
3710         mask_line += mask_stride;
3711         w = width;
3712
3713         /* call prefetch hint to optimize cache load*/
3714         cache_prefetch ((__m128i*)mask);
3715         cache_prefetch ((__m128i*)dst);
3716
3717         while (w && (unsigned long)dst & 15)
3718         {
3719             uint8_t m = *mask++;
3720
3721             if (m)
3722             {
3723                 d = *dst;
3724                 mmx_mask = expand_pixel_8_1x64 (m);
3725                 mmx_dest = unpack_32_1x64 (d);
3726
3727                 *dst = pack_1x64_32 (in_over_1x64 (&mmx_src,
3728                                                    &mmx_alpha,
3729                                                    &mmx_mask,
3730                                                    &mmx_dest));
3731             }
3732
3733             w--;
3734             dst++;
3735         }
3736
3737         /* call prefetch hint to optimize cache load*/
3738         cache_prefetch ((__m128i*)mask);
3739         cache_prefetch ((__m128i*)dst);
3740
3741         while (w >= 4)
3742         {
3743             /* fill cache line with next memory */
3744             cache_prefetch_next ((__m128i*)mask);
3745             cache_prefetch_next ((__m128i*)dst);
3746
3747             m = *((uint32_t*)mask);
3748
3749             if (srca == 0xff && m == 0xffffffff)
3750             {
3751                 save_128_aligned ((__m128i*)dst, xmm_def);
3752             }
3753             else if (m)
3754             {
3755                 xmm_dst = load_128_aligned ((__m128i*) dst);
3756                 xmm_mask = unpack_32_1x128 (m);
3757                 xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ());
3758
3759                 /* Unpacking */
3760                 unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
3761                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
3762
3763                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
3764                                         &xmm_mask_lo, &xmm_mask_hi);
3765
3766                 in_over_2x128 (&xmm_src, &xmm_src,
3767                                &xmm_alpha, &xmm_alpha,
3768                                &xmm_mask_lo, &xmm_mask_hi,
3769                                &xmm_dst_lo, &xmm_dst_hi);
3770
3771                 save_128_aligned (
3772                     (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
3773             }
3774
3775             w -= 4;
3776             dst += 4;
3777             mask += 4;
3778         }
3779
3780         while (w)
3781         {
3782             uint8_t m = *mask++;
3783
3784             if (m)
3785             {
3786                 d = *dst;
3787                 mmx_mask = expand_pixel_8_1x64 (m);
3788                 mmx_dest = unpack_32_1x64 (d);
3789
3790                 *dst = pack_1x64_32 (in_over_1x64 (&mmx_src,
3791                                                    &mmx_alpha,
3792                                                    &mmx_mask,
3793                                                    &mmx_dest));
3794             }
3795
3796             w--;
3797             dst++;
3798         }
3799     }
3800
3801     _mm_empty ();
3802 }
3803
3804 /* ----------------------------------------------------------------
3805  * composite_over_n_8_8888
3806  */
3807
3808 pixman_bool_t
3809 pixman_fill_sse2 (uint32_t *bits,
3810                   int       stride,
3811                   int       bpp,
3812                   int       x,
3813                   int       y,
3814                   int       width,
3815                   int       height,
3816                   uint32_t  data)
3817 {
3818     uint32_t byte_width;
3819     uint8_t         *byte_line;
3820
3821     __m128i xmm_def;
3822
3823     if (bpp == 16 && (data >> 16 != (data & 0xffff)))
3824         return FALSE;
3825
3826     if (bpp != 16 && bpp != 32)
3827         return FALSE;
3828
3829     if (bpp == 16)
3830     {
3831         stride = stride * (int) sizeof (uint32_t) / 2;
3832         byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x);
3833         byte_width = 2 * width;
3834         stride *= 2;
3835     }
3836     else
3837     {
3838         stride = stride * (int) sizeof (uint32_t) / 4;
3839         byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x);
3840         byte_width = 4 * width;
3841         stride *= 4;
3842     }
3843
3844     cache_prefetch ((__m128i*)byte_line);
3845     xmm_def = create_mask_2x32_128 (data, data);
3846
3847     while (height--)
3848     {
3849         int w;
3850         uint8_t *d = byte_line;
3851         byte_line += stride;
3852         w = byte_width;
3853
3854
3855         cache_prefetch_next ((__m128i*)d);
3856
3857         while (w >= 2 && ((unsigned long)d & 3))
3858         {
3859             *(uint16_t *)d = data;
3860             w -= 2;
3861             d += 2;
3862         }
3863
3864         while (w >= 4 && ((unsigned long)d & 15))
3865         {
3866             *(uint32_t *)d = data;
3867
3868             w -= 4;
3869             d += 4;
3870         }
3871
3872         cache_prefetch_next ((__m128i*)d);
3873
3874         while (w >= 128)
3875         {
3876             cache_prefetch (((__m128i*)d) + 12);
3877
3878             save_128_aligned ((__m128i*)(d),     xmm_def);
3879             save_128_aligned ((__m128i*)(d + 16),  xmm_def);
3880             save_128_aligned ((__m128i*)(d + 32),  xmm_def);
3881             save_128_aligned ((__m128i*)(d + 48),  xmm_def);
3882             save_128_aligned ((__m128i*)(d + 64),  xmm_def);
3883             save_128_aligned ((__m128i*)(d + 80),  xmm_def);
3884             save_128_aligned ((__m128i*)(d + 96),  xmm_def);
3885             save_128_aligned ((__m128i*)(d + 112), xmm_def);
3886
3887             d += 128;
3888             w -= 128;
3889         }
3890
3891         if (w >= 64)
3892         {
3893             cache_prefetch (((__m128i*)d) + 8);
3894
3895             save_128_aligned ((__m128i*)(d),     xmm_def);
3896             save_128_aligned ((__m128i*)(d + 16),  xmm_def);
3897             save_128_aligned ((__m128i*)(d + 32),  xmm_def);
3898             save_128_aligned ((__m128i*)(d + 48),  xmm_def);
3899
3900             d += 64;
3901             w -= 64;
3902         }
3903
3904         cache_prefetch_next ((__m128i*)d);
3905
3906         if (w >= 32)
3907         {
3908             save_128_aligned ((__m128i*)(d),     xmm_def);
3909             save_128_aligned ((__m128i*)(d + 16),  xmm_def);
3910
3911             d += 32;
3912             w -= 32;
3913         }
3914
3915         if (w >= 16)
3916         {
3917             save_128_aligned ((__m128i*)(d),     xmm_def);
3918
3919             d += 16;
3920             w -= 16;
3921         }
3922
3923         cache_prefetch_next ((__m128i*)d);
3924
3925         while (w >= 4)
3926         {
3927             *(uint32_t *)d = data;
3928
3929             w -= 4;
3930             d += 4;
3931         }
3932
3933         if (w >= 2)
3934         {
3935             *(uint16_t *)d = data;
3936             w -= 2;
3937             d += 2;
3938         }
3939     }
3940
3941     _mm_empty ();
3942     return TRUE;
3943 }
3944
3945 static void
3946 sse2_composite_src_n_8_8888 (pixman_implementation_t *imp,
3947                              pixman_op_t              op,
3948                              pixman_image_t *         src_image,
3949                              pixman_image_t *         mask_image,
3950                              pixman_image_t *         dst_image,
3951                              int32_t                  src_x,
3952                              int32_t                  src_y,
3953                              int32_t                  mask_x,
3954                              int32_t                  mask_y,
3955                              int32_t                  dest_x,
3956                              int32_t                  dest_y,
3957                              int32_t                  width,
3958                              int32_t                  height)
3959 {
3960     uint32_t src, srca;
3961     uint32_t    *dst_line, *dst;
3962     uint8_t     *mask_line, *mask;
3963     int dst_stride, mask_stride;
3964     uint16_t w;
3965     uint32_t m;
3966
3967     __m128i xmm_src, xmm_def;
3968     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
3969
3970     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
3971
3972     srca = src >> 24;
3973     if (src == 0)
3974     {
3975         pixman_fill_sse2 (dst_image->bits.bits, dst_image->bits.rowstride,
3976                           PIXMAN_FORMAT_BPP (dst_image->bits.format),
3977                           dest_x, dest_y, width, height, 0);
3978         return;
3979     }
3980
3981     PIXMAN_IMAGE_GET_LINE (
3982         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3983     PIXMAN_IMAGE_GET_LINE (
3984         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
3985
3986     xmm_def = create_mask_2x32_128 (src, src);
3987     xmm_src = expand_pixel_32_1x128 (src);
3988
3989     while (height--)
3990     {
3991         dst = dst_line;
3992         dst_line += dst_stride;
3993         mask = mask_line;
3994         mask_line += mask_stride;
3995         w = width;
3996
3997         /* call prefetch hint to optimize cache load*/
3998         cache_prefetch ((__m128i*)mask);
3999         cache_prefetch ((__m128i*)dst);
4000
4001         while (w && (unsigned long)dst & 15)
4002         {
4003             uint8_t m = *mask++;
4004
4005             if (m)
4006             {
4007                 *dst = pack_1x64_32 (
4008                     pix_multiply_1x64 (
4009                         _mm_movepi64_pi64 (xmm_src), expand_pixel_8_1x64 (m)));
4010             }
4011             else
4012             {
4013                 *dst = 0;
4014             }
4015
4016             w--;
4017             dst++;
4018         }
4019
4020         /* call prefetch hint to optimize cache load*/
4021         cache_prefetch ((__m128i*)mask);
4022         cache_prefetch ((__m128i*)dst);
4023
4024         while (w >= 4)
4025         {
4026             /* fill cache line with next memory */
4027             cache_prefetch_next ((__m128i*)mask);
4028             cache_prefetch_next ((__m128i*)dst);
4029
4030             m = *((uint32_t*)mask);
4031
4032             if (srca == 0xff && m == 0xffffffff)
4033             {
4034                 save_128_aligned ((__m128i*)dst, xmm_def);
4035             }
4036             else if (m)
4037             {
4038                 xmm_mask = unpack_32_1x128 (m);
4039                 xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ());
4040
4041                 /* Unpacking */
4042                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4043
4044                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
4045                                         &xmm_mask_lo, &xmm_mask_hi);
4046
4047                 pix_multiply_2x128 (&xmm_src, &xmm_src,
4048                                     &xmm_mask_lo, &xmm_mask_hi,
4049                                     &xmm_mask_lo, &xmm_mask_hi);
4050
4051                 save_128_aligned (
4052                     (__m128i*)dst, pack_2x128_128 (xmm_mask_lo, xmm_mask_hi));
4053             }
4054             else
4055             {
4056                 save_128_aligned ((__m128i*)dst, _mm_setzero_si128 ());
4057             }
4058
4059             w -= 4;
4060             dst += 4;
4061             mask += 4;
4062         }
4063
4064         while (w)
4065         {
4066             uint8_t m = *mask++;
4067
4068             if (m)
4069             {
4070                 *dst = pack_1x64_32 (
4071                     pix_multiply_1x64 (
4072                         _mm_movepi64_pi64 (xmm_src), expand_pixel_8_1x64 (m)));
4073             }
4074             else
4075             {
4076                 *dst = 0;
4077             }
4078
4079             w--;
4080             dst++;
4081         }
4082     }
4083
4084     _mm_empty ();
4085 }
4086
4087 /*-----------------------------------------------------------------------
4088  * composite_over_n_8_0565
4089  */
4090
4091 static void
4092 sse2_composite_over_n_8_0565 (pixman_implementation_t *imp,
4093                               pixman_op_t              op,
4094                               pixman_image_t *         src_image,
4095                               pixman_image_t *         mask_image,
4096                               pixman_image_t *         dst_image,
4097                               int32_t                  src_x,
4098                               int32_t                  src_y,
4099                               int32_t                  mask_x,
4100                               int32_t                  mask_y,
4101                               int32_t                  dest_x,
4102                               int32_t                  dest_y,
4103                               int32_t                  width,
4104                               int32_t                  height)
4105 {
4106     uint32_t src, srca;
4107     uint16_t    *dst_line, *dst, d;
4108     uint8_t     *mask_line, *mask;
4109     int dst_stride, mask_stride;
4110     uint16_t w;
4111     uint32_t m;
4112     __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
4113
4114     __m128i xmm_src, xmm_alpha;
4115     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
4116     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
4117
4118     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
4119
4120     srca = src >> 24;
4121     if (src == 0)
4122         return;
4123
4124     PIXMAN_IMAGE_GET_LINE (
4125         dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
4126     PIXMAN_IMAGE_GET_LINE (
4127         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
4128
4129     xmm_src = expand_pixel_32_1x128 (src);
4130     xmm_alpha = expand_alpha_1x128 (xmm_src);
4131     mmx_src = _mm_movepi64_pi64 (xmm_src);
4132     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
4133
4134     while (height--)
4135     {
4136         dst = dst_line;
4137         dst_line += dst_stride;
4138         mask = mask_line;
4139         mask_line += mask_stride;
4140         w = width;
4141
4142         /* call prefetch hint to optimize cache load*/
4143         cache_prefetch ((__m128i*)mask);
4144         cache_prefetch ((__m128i*)dst);
4145
4146         while (w && (unsigned long)dst & 15)
4147         {
4148             m = *mask++;
4149
4150             if (m)
4151             {
4152                 d = *dst;
4153                 mmx_mask = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
4154                 mmx_dest = expand565_16_1x64 (d);
4155
4156                 *dst = pack_565_32_16 (
4157                     pack_1x64_32 (
4158                         in_over_1x64 (
4159                             &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)));
4160             }
4161
4162             w--;
4163             dst++;
4164         }
4165
4166         /* call prefetch hint to optimize cache load*/
4167         cache_prefetch ((__m128i*)mask);
4168         cache_prefetch ((__m128i*)dst);
4169
4170         while (w >= 8)
4171         {
4172             /* fill cache line with next memory */
4173             cache_prefetch_next ((__m128i*)mask);
4174             cache_prefetch_next ((__m128i*)dst);
4175
4176             xmm_dst = load_128_aligned ((__m128i*) dst);
4177             unpack_565_128_4x128 (xmm_dst,
4178                                   &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
4179
4180             m = *((uint32_t*)mask);
4181             mask += 4;
4182
4183             if (m)
4184             {
4185                 xmm_mask = unpack_32_1x128 (m);
4186                 xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ());
4187
4188                 /* Unpacking */
4189                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4190
4191                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
4192                                         &xmm_mask_lo, &xmm_mask_hi);
4193
4194                 in_over_2x128 (&xmm_src, &xmm_src,
4195                                &xmm_alpha, &xmm_alpha,
4196                                &xmm_mask_lo, &xmm_mask_hi,
4197                                &xmm_dst0, &xmm_dst1);
4198             }
4199
4200             m = *((uint32_t*)mask);
4201             mask += 4;
4202
4203             if (m)
4204             {
4205                 xmm_mask = unpack_32_1x128 (m);
4206                 xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128 ());
4207
4208                 /* Unpacking */
4209                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4210
4211                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
4212                                         &xmm_mask_lo, &xmm_mask_hi);
4213                 in_over_2x128 (&xmm_src, &xmm_src,
4214                                &xmm_alpha, &xmm_alpha,
4215                                &xmm_mask_lo, &xmm_mask_hi,
4216                                &xmm_dst2, &xmm_dst3);
4217             }
4218
4219             save_128_aligned (
4220                 (__m128i*)dst, pack_565_4x128_128 (
4221                     &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
4222
4223             w -= 8;
4224             dst += 8;
4225         }
4226
4227         while (w)
4228         {
4229             m = *mask++;
4230
4231             if (m)
4232             {
4233                 d = *dst;
4234                 mmx_mask = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
4235                 mmx_dest = expand565_16_1x64 (d);
4236
4237                 *dst = pack_565_32_16 (
4238                     pack_1x64_32 (
4239                         in_over_1x64 (
4240                             &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)));
4241             }
4242
4243             w--;
4244             dst++;
4245         }
4246     }
4247
4248     _mm_empty ();
4249 }
4250
4251 /* -----------------------------------------------------------------------
4252  * composite_over_pixbuf_0565
4253  */
4254
4255 static void
4256 sse2_composite_over_pixbuf_0565 (pixman_implementation_t *imp,
4257                                  pixman_op_t              op,
4258                                  pixman_image_t *         src_image,
4259                                  pixman_image_t *         mask_image,
4260                                  pixman_image_t *         dst_image,
4261                                  int32_t                  src_x,
4262                                  int32_t                  src_y,
4263                                  int32_t                  mask_x,
4264                                  int32_t                  mask_y,
4265                                  int32_t                  dest_x,
4266                                  int32_t                  dest_y,
4267                                  int32_t                  width,
4268                                  int32_t                  height)
4269 {
4270     uint16_t    *dst_line, *dst, d;
4271     uint32_t    *src_line, *src, s;
4272     int dst_stride, src_stride;
4273     uint16_t w;
4274     uint32_t opaque, zero;
4275
4276     __m64 ms;
4277     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
4278     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
4279
4280     PIXMAN_IMAGE_GET_LINE (
4281         dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
4282     PIXMAN_IMAGE_GET_LINE (
4283         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
4284
4285 #if 0
4286     /* FIXME
4287      *
4288      * I copy the code from MMX one and keep the fixme.
4289      * If it's a problem there, probably is a problem here.
4290      */
4291     assert (src_image->drawable == mask_image->drawable);
4292 #endif
4293
4294     while (height--)
4295     {
4296         dst = dst_line;
4297         dst_line += dst_stride;
4298         src = src_line;
4299         src_line += src_stride;
4300         w = width;
4301
4302         /* call prefetch hint to optimize cache load*/
4303         cache_prefetch ((__m128i*)src);
4304         cache_prefetch ((__m128i*)dst);
4305
4306         while (w && (unsigned long)dst & 15)
4307         {
4308             s = *src++;
4309             d = *dst;
4310
4311             ms = unpack_32_1x64 (s);
4312
4313             *dst++ = pack_565_32_16 (
4314                 pack_1x64_32 (
4315                     over_rev_non_pre_1x64 (ms, expand565_16_1x64 (d))));
4316             w--;
4317         }
4318
4319         /* call prefetch hint to optimize cache load*/
4320         cache_prefetch ((__m128i*)src);
4321         cache_prefetch ((__m128i*)dst);
4322
4323         while (w >= 8)
4324         {
4325             /* fill cache line with next memory */
4326             cache_prefetch_next ((__m128i*)src);
4327             cache_prefetch_next ((__m128i*)dst);
4328
4329             /* First round */
4330             xmm_src = load_128_unaligned ((__m128i*)src);
4331             xmm_dst = load_128_aligned  ((__m128i*)dst);
4332
4333             opaque = is_opaque (xmm_src);
4334             zero = is_zero (xmm_src);
4335
4336             unpack_565_128_4x128 (xmm_dst,
4337                                   &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
4338             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
4339
4340             /* preload next round*/
4341             xmm_src = load_128_unaligned ((__m128i*)(src + 4));
4342
4343             if (opaque)
4344             {
4345                 invert_colors_2x128 (xmm_src_lo, xmm_src_hi,
4346                                      &xmm_dst0, &xmm_dst1);
4347             }
4348             else if (!zero)
4349             {
4350                 over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi,
4351                                         &xmm_dst0, &xmm_dst1);
4352             }
4353
4354             /* Second round */
4355             opaque = is_opaque (xmm_src);
4356             zero = is_zero (xmm_src);
4357
4358             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
4359
4360             if (opaque)
4361             {
4362                 invert_colors_2x128 (xmm_src_lo, xmm_src_hi,
4363                                      &xmm_dst2, &xmm_dst3);
4364             }
4365             else if (!zero)
4366             {
4367                 over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi,
4368                                         &xmm_dst2, &xmm_dst3);
4369             }
4370
4371             save_128_aligned (
4372                 (__m128i*)dst, pack_565_4x128_128 (
4373                     &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
4374
4375             w -= 8;
4376             src += 8;
4377             dst += 8;
4378         }
4379
4380         while (w)
4381         {
4382             s = *src++;
4383             d = *dst;
4384
4385             ms = unpack_32_1x64 (s);
4386
4387             *dst++ = pack_565_32_16 (
4388                 pack_1x64_32 (
4389                     over_rev_non_pre_1x64 (ms, expand565_16_1x64 (d))));
4390             w--;
4391         }
4392     }
4393
4394     _mm_empty ();
4395 }
4396
4397 /* -------------------------------------------------------------------------
4398  * composite_over_pixbuf_8888
4399  */
4400
4401 static void
4402 sse2_composite_over_pixbuf_8888 (pixman_implementation_t *imp,
4403                                  pixman_op_t              op,
4404                                  pixman_image_t *         src_image,
4405                                  pixman_image_t *         mask_image,
4406                                  pixman_image_t *         dst_image,
4407                                  int32_t                  src_x,
4408                                  int32_t                  src_y,
4409                                  int32_t                  mask_x,
4410                                  int32_t                  mask_y,
4411                                  int32_t                  dest_x,
4412                                  int32_t                  dest_y,
4413                                  int32_t                  width,
4414                                  int32_t                  height)
4415 {
4416     uint32_t    *dst_line, *dst, d;
4417     uint32_t    *src_line, *src, s;
4418     int dst_stride, src_stride;
4419     uint16_t w;
4420     uint32_t opaque, zero;
4421
4422     __m128i xmm_src_lo, xmm_src_hi;
4423     __m128i xmm_dst_lo, xmm_dst_hi;
4424
4425     PIXMAN_IMAGE_GET_LINE (
4426         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
4427     PIXMAN_IMAGE_GET_LINE (
4428         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
4429
4430 #if 0
4431     /* FIXME
4432      *
4433      * I copy the code from MMX one and keep the fixme.
4434      * If it's a problem there, probably is a problem here.
4435      */
4436     assert (src_image->drawable == mask_image->drawable);
4437 #endif
4438
4439     while (height--)
4440     {
4441         dst = dst_line;
4442         dst_line += dst_stride;
4443         src = src_line;
4444         src_line += src_stride;
4445         w = width;
4446
4447         /* call prefetch hint to optimize cache load*/
4448         cache_prefetch ((__m128i*)src);
4449         cache_prefetch ((__m128i*)dst);
4450
4451         while (w && (unsigned long)dst & 15)
4452         {
4453             s = *src++;
4454             d = *dst;
4455
4456             *dst++ = pack_1x64_32 (
4457                 over_rev_non_pre_1x64 (
4458                     unpack_32_1x64 (s), unpack_32_1x64 (d)));
4459
4460             w--;
4461         }
4462
4463         /* call prefetch hint to optimize cache load*/
4464         cache_prefetch ((__m128i*)src);
4465         cache_prefetch ((__m128i*)dst);
4466
4467         while (w >= 4)
4468         {
4469             /* fill cache line with next memory */
4470             cache_prefetch_next ((__m128i*)src);
4471             cache_prefetch_next ((__m128i*)dst);
4472
4473             xmm_src_hi = load_128_unaligned ((__m128i*)src);
4474
4475             opaque = is_opaque (xmm_src_hi);
4476             zero = is_zero (xmm_src_hi);
4477
4478             unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
4479
4480             if (opaque)
4481             {
4482                 invert_colors_2x128 (xmm_src_lo, xmm_src_hi,
4483                                      &xmm_dst_lo, &xmm_dst_hi);
4484
4485                 save_128_aligned (
4486                     (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4487             }
4488             else if (!zero)
4489             {
4490                 xmm_dst_hi = load_128_aligned  ((__m128i*)dst);
4491
4492                 unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
4493
4494                 over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi,
4495                                         &xmm_dst_lo, &xmm_dst_hi);
4496
4497                 save_128_aligned (
4498                     (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4499             }
4500
4501             w -= 4;
4502             dst += 4;
4503             src += 4;
4504         }
4505
4506         while (w)
4507         {
4508             s = *src++;
4509             d = *dst;
4510
4511             *dst++ = pack_1x64_32 (
4512                 over_rev_non_pre_1x64 (
4513                     unpack_32_1x64 (s), unpack_32_1x64 (d)));
4514
4515             w--;
4516         }
4517     }
4518
4519     _mm_empty ();
4520 }
4521
4522 /* -------------------------------------------------------------------------------------------------
4523  * composite_over_n_8888_0565_ca
4524  */
4525
4526 static void
4527 sse2_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
4528                                     pixman_op_t              op,
4529                                     pixman_image_t *         src_image,
4530                                     pixman_image_t *         mask_image,
4531                                     pixman_image_t *         dst_image,
4532                                     int32_t                  src_x,
4533                                     int32_t                  src_y,
4534                                     int32_t                  mask_x,
4535                                     int32_t                  mask_y,
4536                                     int32_t                  dest_x,
4537                                     int32_t                  dest_y,
4538                                     int32_t                  width,
4539                                     int32_t                  height)
4540 {
4541     uint32_t src;
4542     uint16_t    *dst_line, *dst, d;
4543     uint32_t    *mask_line, *mask, m;
4544     int dst_stride, mask_stride;
4545     int w;
4546     uint32_t pack_cmp;
4547
4548     __m128i xmm_src, xmm_alpha;
4549     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
4550     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
4551
4552     __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
4553
4554     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
4555
4556     if (src == 0)
4557         return;
4558
4559     PIXMAN_IMAGE_GET_LINE (
4560         dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
4561     PIXMAN_IMAGE_GET_LINE (
4562         mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
4563
4564     xmm_src = expand_pixel_32_1x128 (src);
4565     xmm_alpha = expand_alpha_1x128 (xmm_src);
4566     mmx_src = _mm_movepi64_pi64 (xmm_src);
4567     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
4568
4569     while (height--)
4570     {
4571         w = width;
4572         mask = mask_line;
4573         dst = dst_line;
4574         mask_line += mask_stride;
4575         dst_line += dst_stride;
4576
4577         /* call prefetch hint to optimize cache load*/
4578         cache_prefetch ((__m128i*)mask);
4579         cache_prefetch ((__m128i*)dst);
4580
4581         while (w && ((unsigned long)dst & 15))
4582         {
4583             m = *(uint32_t *) mask;
4584
4585             if (m)
4586             {
4587                 d = *dst;
4588                 mmx_mask = unpack_32_1x64 (m);
4589                 mmx_dest = expand565_16_1x64 (d);
4590
4591                 *dst = pack_565_32_16 (
4592                     pack_1x64_32 (
4593                         in_over_1x64 (
4594                             &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)));
4595             }
4596
4597             w--;
4598             dst++;
4599             mask++;
4600         }
4601
4602         /* call prefetch hint to optimize cache load*/
4603         cache_prefetch ((__m128i*)mask);
4604         cache_prefetch ((__m128i*)dst);
4605
4606         while (w >= 8)
4607         {
4608             /* fill cache line with next memory */
4609             cache_prefetch_next ((__m128i*)mask);
4610             cache_prefetch_next ((__m128i*)dst);
4611
4612             /* First round */
4613             xmm_mask = load_128_unaligned ((__m128i*)mask);
4614             xmm_dst = load_128_aligned ((__m128i*)dst);
4615
4616             pack_cmp = _mm_movemask_epi8 (
4617                 _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ()));
4618
4619             unpack_565_128_4x128 (xmm_dst,
4620                                   &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
4621             unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4622
4623             /* preload next round */
4624             xmm_mask = load_128_unaligned ((__m128i*)(mask + 4));
4625
4626             /* preload next round */
4627             if (pack_cmp != 0xffff)
4628             {
4629                 in_over_2x128 (&xmm_src, &xmm_src,
4630                                &xmm_alpha, &xmm_alpha,
4631                                &xmm_mask_lo, &xmm_mask_hi,
4632                                &xmm_dst0, &xmm_dst1);
4633             }
4634
4635             /* Second round */
4636             pack_cmp = _mm_movemask_epi8 (
4637                 _mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128 ()));
4638
4639             unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4640
4641             if (pack_cmp != 0xffff)
4642             {
4643                 in_over_2x128 (&xmm_src, &xmm_src,
4644                                &xmm_alpha, &xmm_alpha,
4645                                &xmm_mask_lo, &xmm_mask_hi,
4646                                &xmm_dst2, &xmm_dst3);
4647             }
4648
4649             save_128_aligned (
4650                 (__m128i*)dst, pack_565_4x128_128 (
4651                     &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
4652
4653             w -= 8;
4654             dst += 8;
4655             mask += 8;
4656         }
4657
4658         while (w)
4659         {
4660             m = *(uint32_t *) mask;
4661
4662             if (m)
4663             {
4664                 d = *dst;
4665                 mmx_mask = unpack_32_1x64 (m);
4666                 mmx_dest = expand565_16_1x64 (d);
4667
4668                 *dst = pack_565_32_16 (
4669                     pack_1x64_32 (
4670                         in_over_1x64 (
4671                             &mmx_src, &mmx_alpha, &mmx_mask, &mmx_dest)));
4672             }
4673
4674             w--;
4675             dst++;
4676             mask++;
4677         }
4678     }
4679
4680     _mm_empty ();
4681 }
4682
4683 /* -----------------------------------------------------------------------
4684  * composite_in_n_8_8
4685  */
4686
4687 static void
4688 sse2_composite_in_n_8_8 (pixman_implementation_t *imp,
4689                          pixman_op_t              op,
4690                          pixman_image_t *         src_image,
4691                          pixman_image_t *         mask_image,
4692                          pixman_image_t *         dst_image,
4693                          int32_t                  src_x,
4694                          int32_t                  src_y,
4695                          int32_t                  mask_x,
4696                          int32_t                  mask_y,
4697                          int32_t                  dest_x,
4698                          int32_t                  dest_y,
4699                          int32_t                  width,
4700                          int32_t                  height)
4701 {
4702     uint8_t     *dst_line, *dst;
4703     uint8_t     *mask_line, *mask;
4704     int dst_stride, mask_stride;
4705     uint16_t w, d, m;
4706     uint32_t src;
4707     uint8_t sa;
4708
4709     __m128i xmm_alpha;
4710     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
4711     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
4712
4713     PIXMAN_IMAGE_GET_LINE (
4714         dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
4715     PIXMAN_IMAGE_GET_LINE (
4716         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
4717
4718     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
4719
4720     sa = src >> 24;
4721     if (sa == 0)
4722         return;
4723
4724     xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src));
4725
4726     while (height--)
4727     {
4728         dst = dst_line;
4729         dst_line += dst_stride;
4730         mask = mask_line;
4731         mask_line += mask_stride;
4732         w = width;
4733
4734         /* call prefetch hint to optimize cache load*/
4735         cache_prefetch ((__m128i*)mask);
4736         cache_prefetch ((__m128i*)dst);
4737
4738         while (w && ((unsigned long)dst & 15))
4739         {
4740             m = (uint32_t) *mask++;
4741             d = (uint32_t) *dst;
4742
4743             *dst++ = (uint8_t) pack_1x64_32 (
4744                 pix_multiply_1x64 (
4745                     pix_multiply_1x64 (_mm_movepi64_pi64 (xmm_alpha),
4746                                        unpack_32_1x64 (m)),
4747                     unpack_32_1x64 (d)));
4748             w--;
4749         }
4750
4751         /* call prefetch hint to optimize cache load*/
4752         cache_prefetch ((__m128i*)mask);
4753         cache_prefetch ((__m128i*)dst);
4754
4755         while (w >= 16)
4756         {
4757             /* fill cache line with next memory */
4758             cache_prefetch_next ((__m128i*)mask);
4759             cache_prefetch_next ((__m128i*)dst);
4760
4761             xmm_mask = load_128_unaligned ((__m128i*)mask);
4762             xmm_dst = load_128_aligned ((__m128i*)dst);
4763
4764             unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4765             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
4766
4767             pix_multiply_2x128 (&xmm_alpha, &xmm_alpha,
4768                                 &xmm_mask_lo, &xmm_mask_hi,
4769                                 &xmm_mask_lo, &xmm_mask_hi);
4770
4771             pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi,
4772                                 &xmm_dst_lo, &xmm_dst_hi,
4773                                 &xmm_dst_lo, &xmm_dst_hi);
4774
4775             save_128_aligned (
4776                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4777
4778             mask += 16;
4779             dst += 16;
4780             w -= 16;
4781         }
4782
4783         while (w)
4784         {
4785             m = (uint32_t) *mask++;
4786             d = (uint32_t) *dst;
4787
4788             *dst++ = (uint8_t) pack_1x64_32 (
4789                 pix_multiply_1x64 (
4790                     pix_multiply_1x64 (
4791                         _mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
4792                     unpack_32_1x64 (d)));
4793             w--;
4794         }
4795     }
4796
4797     _mm_empty ();
4798 }
4799
4800 /* ---------------------------------------------------------------------------
4801  * composite_in_8_8
4802  */
4803
4804 static void
4805 sse2_composite_in_8_8 (pixman_implementation_t *imp,
4806                        pixman_op_t              op,
4807                        pixman_image_t *         src_image,
4808                        pixman_image_t *         mask_image,
4809                        pixman_image_t *         dst_image,
4810                        int32_t                  src_x,
4811                        int32_t                  src_y,
4812                        int32_t                  mask_x,
4813                        int32_t                  mask_y,
4814                        int32_t                  dest_x,
4815                        int32_t                  dest_y,
4816                        int32_t                  width,
4817                        int32_t                  height)
4818 {
4819     uint8_t     *dst_line, *dst;
4820     uint8_t     *src_line, *src;
4821     int src_stride, dst_stride;
4822     uint16_t w;
4823     uint32_t s, d;
4824
4825     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
4826     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
4827
4828     PIXMAN_IMAGE_GET_LINE (
4829         dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
4830     PIXMAN_IMAGE_GET_LINE (
4831         src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
4832
4833     while (height--)
4834     {
4835         dst = dst_line;
4836         dst_line += dst_stride;
4837         src = src_line;
4838         src_line += src_stride;
4839         w = width;
4840
4841         /* call prefetch hint to optimize cache load*/
4842         cache_prefetch ((__m128i*)src);
4843         cache_prefetch ((__m128i*)dst);
4844
4845         while (w && ((unsigned long)dst & 15))
4846         {
4847             s = (uint32_t) *src++;
4848             d = (uint32_t) *dst;
4849
4850             *dst++ = (uint8_t) pack_1x64_32 (
4851                 pix_multiply_1x64 (
4852                     unpack_32_1x64 (s), unpack_32_1x64 (d)));
4853             w--;
4854         }
4855
4856         /* call prefetch hint to optimize cache load*/
4857         cache_prefetch ((__m128i*)src);
4858         cache_prefetch ((__m128i*)dst);
4859
4860         while (w >= 16)
4861         {
4862             /* fill cache line with next memory */
4863             cache_prefetch_next ((__m128i*)src);
4864             cache_prefetch_next ((__m128i*)dst);
4865
4866             xmm_src = load_128_unaligned ((__m128i*)src);
4867             xmm_dst = load_128_aligned ((__m128i*)dst);
4868
4869             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
4870             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
4871
4872             pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi,
4873                                 &xmm_dst_lo, &xmm_dst_hi,
4874                                 &xmm_dst_lo, &xmm_dst_hi);
4875
4876             save_128_aligned (
4877                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4878
4879             src += 16;
4880             dst += 16;
4881             w -= 16;
4882         }
4883
4884         while (w)
4885         {
4886             s = (uint32_t) *src++;
4887             d = (uint32_t) *dst;
4888
4889             *dst++ = (uint8_t) pack_1x64_32 (
4890                 pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (d)));
4891             w--;
4892         }
4893     }
4894
4895     _mm_empty ();
4896 }
4897
4898 /* -------------------------------------------------------------------------
4899  * composite_add_8888_8_8
4900  */
4901
4902 static void
4903 sse2_composite_add_8888_8_8 (pixman_implementation_t *imp,
4904                              pixman_op_t              op,
4905                              pixman_image_t *         src_image,
4906                              pixman_image_t *         mask_image,
4907                              pixman_image_t *         dst_image,
4908                              int32_t                  src_x,
4909                              int32_t                  src_y,
4910                              int32_t                  mask_x,
4911                              int32_t                  mask_y,
4912                              int32_t                  dest_x,
4913                              int32_t                  dest_y,
4914                              int32_t                  width,
4915                              int32_t                  height)
4916 {
4917     uint8_t     *dst_line, *dst;
4918     uint8_t     *mask_line, *mask;
4919     int dst_stride, mask_stride;
4920     uint16_t w;
4921     uint32_t src;
4922     uint8_t sa;
4923     uint32_t m, d;
4924
4925     __m128i xmm_alpha;
4926     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
4927     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
4928
4929     PIXMAN_IMAGE_GET_LINE (
4930         dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
4931     PIXMAN_IMAGE_GET_LINE (
4932         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
4933
4934     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
4935
4936     sa = src >> 24;
4937     if (sa == 0)
4938         return;
4939
4940     xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src));
4941
4942     while (height--)
4943     {
4944         dst = dst_line;
4945         dst_line += dst_stride;
4946         mask = mask_line;
4947         mask_line += mask_stride;
4948         w = width;
4949
4950         /* call prefetch hint to optimize cache load*/
4951         cache_prefetch ((__m128i*)mask);
4952         cache_prefetch ((__m128i*)dst);
4953
4954         while (w && ((unsigned long)dst & 15))
4955         {
4956             m = (uint32_t) *mask++;
4957             d = (uint32_t) *dst;
4958
4959             *dst++ = (uint8_t) pack_1x64_32 (
4960                 _mm_adds_pu16 (
4961                     pix_multiply_1x64 (
4962                         _mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
4963                     unpack_32_1x64 (d)));
4964             w--;
4965         }
4966
4967         /* call prefetch hint to optimize cache load*/
4968         cache_prefetch ((__m128i*)mask);
4969         cache_prefetch ((__m128i*)dst);
4970
4971         while (w >= 16)
4972         {
4973             /* fill cache line with next memory */
4974             cache_prefetch_next ((__m128i*)mask);
4975             cache_prefetch_next ((__m128i*)dst);
4976
4977             xmm_mask = load_128_unaligned ((__m128i*)mask);
4978             xmm_dst = load_128_aligned ((__m128i*)dst);
4979
4980             unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4981             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
4982
4983             pix_multiply_2x128 (&xmm_alpha, &xmm_alpha,
4984                                 &xmm_mask_lo, &xmm_mask_hi,
4985                                 &xmm_mask_lo, &xmm_mask_hi);
4986
4987             xmm_dst_lo = _mm_adds_epu16 (xmm_mask_lo, xmm_dst_lo);
4988             xmm_dst_hi = _mm_adds_epu16 (xmm_mask_hi, xmm_dst_hi);
4989
4990             save_128_aligned (
4991                 (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4992
4993             mask += 16;
4994             dst += 16;
4995             w -= 16;
4996         }
4997
4998         while (w)
4999         {
5000             m = (uint32_t) *mask++;
5001             d = (uint32_t) *dst;
5002
5003             *dst++ = (uint8_t) pack_1x64_32 (
5004                 _mm_adds_pu16 (
5005                     pix_multiply_1x64 (
5006                         _mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
5007                     unpack_32_1x64 (d)));
5008
5009             w--;
5010         }
5011     }
5012
5013     _mm_empty ();
5014 }
5015
5016 /* ----------------------------------------------------------------------
5017  * composite_add_8000_8000
5018  */
5019
5020 static void
5021 sse2_composite_add_8000_8000 (pixman_implementation_t *imp,
5022                               pixman_op_t              op,
5023                               pixman_image_t *         src_image,
5024                               pixman_image_t *         mask_image,
5025                               pixman_image_t *         dst_image,
5026                               int32_t                  src_x,
5027                               int32_t                  src_y,
5028                               int32_t                  mask_x,
5029                               int32_t                  mask_y,
5030                               int32_t                  dest_x,
5031                               int32_t                  dest_y,
5032                               int32_t                  width,
5033                               int32_t                  height)
5034 {
5035     uint8_t     *dst_line, *dst;
5036     uint8_t     *src_line, *src;
5037     int dst_stride, src_stride;
5038     uint16_t w;
5039     uint16_t t;
5040
5041     PIXMAN_IMAGE_GET_LINE (
5042         src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
5043     PIXMAN_IMAGE_GET_LINE (
5044         dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
5045
5046     while (height--)
5047     {
5048         dst = dst_line;
5049         src = src_line;
5050
5051         /* call prefetch hint to optimize cache load*/
5052         cache_prefetch ((__m128i*)src);
5053         cache_prefetch ((__m128i*)dst);
5054
5055         dst_line += dst_stride;
5056         src_line += src_stride;
5057         w = width;
5058
5059         /* Small head */
5060         while (w && (unsigned long)dst & 3)
5061         {
5062             t = (*dst) + (*src++);
5063             *dst++ = t | (0 - (t >> 8));
5064             w--;
5065         }
5066
5067         core_combine_add_u_sse2 ((uint32_t*)dst, (uint32_t*)src, NULL, w >> 2);
5068
5069         /* Small tail */
5070         dst += w & 0xfffc;
5071         src += w & 0xfffc;
5072
5073         w &= 3;
5074
5075         while (w)
5076         {
5077             t = (*dst) + (*src++);
5078             *dst++ = t | (0 - (t >> 8));
5079             w--;
5080         }
5081     }
5082
5083     _mm_empty ();
5084 }
5085
5086 /* ---------------------------------------------------------------------
5087  * composite_add_8888_8888
5088  */
5089 static void
5090 sse2_composite_add_8888_8888 (pixman_implementation_t *imp,
5091                               pixman_op_t              op,
5092                               pixman_image_t *         src_image,
5093                               pixman_image_t *         mask_image,
5094                               pixman_image_t *         dst_image,
5095                               int32_t                  src_x,
5096                               int32_t                  src_y,
5097                               int32_t                  mask_x,
5098                               int32_t                  mask_y,
5099                               int32_t                  dest_x,
5100                               int32_t                  dest_y,
5101                               int32_t                  width,
5102                               int32_t                  height)
5103 {
5104     uint32_t    *dst_line, *dst;
5105     uint32_t    *src_line, *src;
5106     int dst_stride, src_stride;
5107
5108     PIXMAN_IMAGE_GET_LINE (
5109         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
5110     PIXMAN_IMAGE_GET_LINE (
5111         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
5112
5113     while (height--)
5114     {
5115         dst = dst_line;
5116         dst_line += dst_stride;
5117         src = src_line;
5118         src_line += src_stride;
5119
5120         core_combine_add_u_sse2 (dst, src, NULL, width);
5121     }
5122
5123     _mm_empty ();
5124 }
5125
5126 /* -------------------------------------------------------------------------------------------------
5127  * sse2_composite_copy_area
5128  */
5129
5130 static pixman_bool_t
5131 pixman_blt_sse2 (uint32_t *src_bits,
5132                  uint32_t *dst_bits,
5133                  int       src_stride,
5134                  int       dst_stride,
5135                  int       src_bpp,
5136                  int       dst_bpp,
5137                  int       src_x,
5138                  int       src_y,
5139                  int       dst_x,
5140                  int       dst_y,
5141                  int       width,
5142                  int       height)
5143 {
5144     uint8_t *   src_bytes;
5145     uint8_t *   dst_bytes;
5146     int byte_width;
5147
5148     if (src_bpp != dst_bpp)
5149         return FALSE;
5150
5151     if (src_bpp == 16)
5152     {
5153         src_stride = src_stride * (int) sizeof (uint32_t) / 2;
5154         dst_stride = dst_stride * (int) sizeof (uint32_t) / 2;
5155         src_bytes =(uint8_t *)(((uint16_t *)src_bits) + src_stride * (src_y) + (src_x));
5156         dst_bytes = (uint8_t *)(((uint16_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
5157         byte_width = 2 * width;
5158         src_stride *= 2;
5159         dst_stride *= 2;
5160     }
5161     else if (src_bpp == 32)
5162     {
5163         src_stride = src_stride * (int) sizeof (uint32_t) / 4;
5164         dst_stride = dst_stride * (int) sizeof (uint32_t) / 4;
5165         src_bytes = (uint8_t *)(((uint32_t *)src_bits) + src_stride * (src_y) + (src_x));
5166         dst_bytes = (uint8_t *)(((uint32_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
5167         byte_width = 4 * width;
5168         src_stride *= 4;
5169         dst_stride *= 4;
5170     }
5171     else
5172     {
5173         return FALSE;
5174     }
5175
5176     cache_prefetch ((__m128i*)src_bytes);
5177     cache_prefetch ((__m128i*)dst_bytes);
5178
5179     while (height--)
5180     {
5181         int w;
5182         uint8_t *s = src_bytes;
5183         uint8_t *d = dst_bytes;
5184         src_bytes += src_stride;
5185         dst_bytes += dst_stride;
5186         w = byte_width;
5187
5188         cache_prefetch_next ((__m128i*)s);
5189         cache_prefetch_next ((__m128i*)d);
5190
5191         while (w >= 2 && ((unsigned long)d & 3))
5192         {
5193             *(uint16_t *)d = *(uint16_t *)s;
5194             w -= 2;
5195             s += 2;
5196             d += 2;
5197         }
5198
5199         while (w >= 4 && ((unsigned long)d & 15))
5200         {
5201             *(uint32_t *)d = *(uint32_t *)s;
5202
5203             w -= 4;
5204             s += 4;
5205             d += 4;
5206         }
5207
5208         cache_prefetch_next ((__m128i*)s);
5209         cache_prefetch_next ((__m128i*)d);
5210
5211         while (w >= 64)
5212         {
5213             __m128i xmm0, xmm1, xmm2, xmm3;
5214
5215             /* 128 bytes ahead */
5216             cache_prefetch (((__m128i*)s) + 8);
5217             cache_prefetch (((__m128i*)d) + 8);
5218
5219             xmm0 = load_128_unaligned ((__m128i*)(s));
5220             xmm1 = load_128_unaligned ((__m128i*)(s + 16));
5221             xmm2 = load_128_unaligned ((__m128i*)(s + 32));
5222             xmm3 = load_128_unaligned ((__m128i*)(s + 48));
5223
5224             save_128_aligned ((__m128i*)(d),    xmm0);
5225             save_128_aligned ((__m128i*)(d + 16), xmm1);
5226             save_128_aligned ((__m128i*)(d + 32), xmm2);
5227             save_128_aligned ((__m128i*)(d + 48), xmm3);
5228
5229             s += 64;
5230             d += 64;
5231             w -= 64;
5232         }
5233
5234         cache_prefetch_next ((__m128i*)s);
5235         cache_prefetch_next ((__m128i*)d);
5236
5237         while (w >= 16)
5238         {
5239             save_128_aligned ((__m128i*)d, load_128_unaligned ((__m128i*)s) );
5240
5241             w -= 16;
5242             d += 16;
5243             s += 16;
5244         }
5245
5246         cache_prefetch_next ((__m128i*)s);
5247         cache_prefetch_next ((__m128i*)d);
5248
5249         while (w >= 4)
5250         {
5251             *(uint32_t *)d = *(uint32_t *)s;
5252
5253             w -= 4;
5254             s += 4;
5255             d += 4;
5256         }
5257
5258         if (w >= 2)
5259         {
5260             *(uint16_t *)d = *(uint16_t *)s;
5261             w -= 2;
5262             s += 2;
5263             d += 2;
5264         }
5265     }
5266
5267     _mm_empty ();
5268
5269     return TRUE;
5270 }
5271
5272 static void
5273 sse2_composite_copy_area (pixman_implementation_t *imp,
5274                           pixman_op_t              op,
5275                           pixman_image_t *         src_image,
5276                           pixman_image_t *         mask_image,
5277                           pixman_image_t *         dst_image,
5278                           int32_t                  src_x,
5279                           int32_t                  src_y,
5280                           int32_t                  mask_x,
5281                           int32_t                  mask_y,
5282                           int32_t                  dest_x,
5283                           int32_t                  dest_y,
5284                           int32_t                  width,
5285                           int32_t                  height)
5286 {
5287     pixman_blt_sse2 (src_image->bits.bits,
5288                      dst_image->bits.bits,
5289                      src_image->bits.rowstride,
5290                      dst_image->bits.rowstride,
5291                      PIXMAN_FORMAT_BPP (src_image->bits.format),
5292                      PIXMAN_FORMAT_BPP (dst_image->bits.format),
5293                      src_x, src_y, dest_x, dest_y, width, height);
5294 }
5295
5296 #if 0
5297 /* This code are buggy in MMX version, now the bug was translated to SSE2 version */
5298 void
5299 sse2_composite_over_x888_8_8888 (pixman_implementation_t *imp,
5300                                  pixman_op_t              op,
5301                                  pixman_image_t *         src_image,
5302                                  pixman_image_t *         mask_image,
5303                                  pixman_image_t *         dst_image,
5304                                  int32_t                  src_x,
5305                                  int32_t                  src_y,
5306                                  int32_t                  mask_x,
5307                                  int32_t                  mask_y,
5308                                  int32_t                  dest_x,
5309                                  int32_t                  dest_y,
5310                                  int32_t                  width,
5311                                  int32_t                  height)
5312 {
5313     uint32_t    *src, *src_line, s;
5314     uint32_t    *dst, *dst_line, d;
5315     uint8_t         *mask, *mask_line;
5316     uint32_t m;
5317     int src_stride, mask_stride, dst_stride;
5318     uint16_t w;
5319
5320     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
5321     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
5322     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
5323
5324     PIXMAN_IMAGE_GET_LINE (
5325         dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
5326     PIXMAN_IMAGE_GET_LINE (
5327         mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
5328     PIXMAN_IMAGE_GET_LINE (
5329         src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
5330
5331     while (height--)
5332     {
5333         src = src_line;
5334         src_line += src_stride;
5335         dst = dst_line;
5336         dst_line += dst_stride;
5337         mask = mask_line;
5338         mask_line += mask_stride;
5339
5340         w = width;
5341
5342         /* call prefetch hint to optimize cache load*/
5343         cache_prefetch ((__m128i*)src);
5344         cache_prefetch ((__m128i*)dst);
5345         cache_prefetch ((__m128i*)mask);
5346
5347         while (w && (unsigned long)dst & 15)
5348         {
5349             s = 0xff000000 | *src++;
5350             m = (uint32_t) *mask++;
5351             d = *dst;
5352
5353             __m64 ms = unpack_32_1x64 (s);
5354
5355             if (m != 0xff)
5356             {
5357                 ms = in_over_1x64 (ms,
5358                                    mask_x00ff,
5359                                    expand_alpha_rev_1x64 (unpack_32_1x64 (m)),
5360                                    unpack_32_1x64 (d));
5361             }
5362
5363             *dst++ = pack_1x64_32 (ms);
5364             w--;
5365         }
5366
5367         /* call prefetch hint to optimize cache load*/
5368         cache_prefetch ((__m128i*)src);
5369         cache_prefetch ((__m128i*)dst);
5370         cache_prefetch ((__m128i*)mask);
5371
5372         while (w >= 4)
5373         {
5374             /* fill cache line with next memory */
5375             cache_prefetch_next ((__m128i*)src);
5376             cache_prefetch_next ((__m128i*)dst);
5377             cache_prefetch_next ((__m128i*)mask);
5378
5379             m = *(uint32_t*) mask;
5380             xmm_src = _mm_or_si128 (load_128_unaligned ((__m128i*)src), mask_ff000000);
5381
5382             if (m == 0xffffffff)
5383             {
5384                 save_128_aligned ((__m128i*)dst, xmm_src);
5385             }
5386             else
5387             {
5388                 xmm_dst = load_128_aligned ((__m128i*)dst);
5389
5390                 xmm_mask = _mm_unpacklo_epi16 (
5391                     unpack_32_1x128 (m), _mm_setzero_si128 ());
5392
5393                 unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
5394                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
5395                 unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
5396
5397                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi,
5398                                         &xmm_mask_lo, &xmm_mask_hi);
5399
5400                 in_over_2x128 (xmm_src_lo, xmm_src_hi,
5401                                mask_00ff, mask_00ff,
5402                                xmm_mask_lo, xmm_mask_hi,
5403                                &xmm_dst_lo, &xmm_dst_hi);
5404
5405                 save_128_aligned (
5406                     (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
5407             }
5408
5409             src += 4;
5410             dst += 4;
5411             mask += 4;
5412             w -= 4;
5413         }
5414
5415         while (w)
5416         {
5417             m = (uint32_t) *mask++;
5418
5419             if (m)
5420             {
5421                 s = 0xff000000 | *src;
5422
5423                 if (m == 0xff)
5424                 {
5425                     *dst = s;
5426                 }
5427                 else
5428                 {
5429                     d = *dst;
5430
5431                     *dst = pack_1x64_32 (
5432                         in_over_1x64 (
5433                             unpack_32_1x64 (s),
5434                             mask_x00ff,
5435                             expand_alpha_rev_1x64 (unpack_32_1x64 (m)),
5436                             unpack_32_1x64 (d)));
5437                 }
5438
5439             }
5440
5441             src++;
5442             dst++;
5443             w--;
5444         }
5445     }
5446
5447     _mm_empty ();
5448 }
5449
5450 #endif
5451
5452 static const pixman_fast_path_t sse2_fast_paths[] =
5453 {
5454     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   sse2_composite_over_n_8_0565,       0 },
5455     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   sse2_composite_over_n_8_0565,       0 },
5456     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_over_n_8888,         0 },
5457     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_over_n_8888,         0 },
5458     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   sse2_composite_over_n_0565,         0 },
5459     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_over_8888_8888,      0 },
5460     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_over_8888_8888,      0 },
5461     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_composite_over_8888_8888,      0 },
5462     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_over_8888_8888,      0 },
5463     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   sse2_composite_over_8888_0565,      0 },
5464     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   sse2_composite_over_8888_0565,      0 },
5465     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_n_8_8888,       0 },
5466     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_n_8_8888,       0 },
5467     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_over_n_8_8888,       0 },
5468     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_n_8_8888,       0 },
5469 #if 0
5470     /* FIXME: This code are buggy in MMX version, now the bug was translated to SSE2 version */
5471     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_x888_8_8888,    0 },
5472     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_x888_8_8888,    0 },
5473     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_x888_8_8888,    0 },
5474     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_x888_8_8888,    0 },
5475 #endif
5476     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_x888_n_8888,    NEED_SOLID_MASK },
5477     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_x888_n_8888,    NEED_SOLID_MASK },
5478     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_over_x888_n_8888,    NEED_SOLID_MASK },
5479     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_x888_n_8888,    NEED_SOLID_MASK },
5480     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_8888_n_8888,    NEED_SOLID_MASK },
5481     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_8888_n_8888,    NEED_SOLID_MASK },
5482     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_over_8888_n_8888,    NEED_SOLID_MASK },
5483     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_8888_n_8888,    NEED_SOLID_MASK },
5484     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
5485     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
5486     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
5487     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
5488     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   sse2_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
5489     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   sse2_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
5490     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5491     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5492     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5493     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5494     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5495     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5496     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5497     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_composite_over_pixbuf_8888,    NEED_PIXBUF },
5498     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   sse2_composite_over_pixbuf_0565,    NEED_PIXBUF },
5499     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5,   sse2_composite_over_pixbuf_0565,    NEED_PIXBUF },
5500     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5,   sse2_composite_over_pixbuf_0565,    NEED_PIXBUF },
5501     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   sse2_composite_over_pixbuf_0565,    NEED_PIXBUF },
5502     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_copy_area,           0 },
5503     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_copy_area,           0 },
5504
5505     { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       sse2_composite_add_8000_8000,       0 },
5506     { PIXMAN_OP_ADD,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_add_8888_8888,       0 },
5507     { PIXMAN_OP_ADD,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_composite_add_8888_8888,       0 },
5508     { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       sse2_composite_add_8888_8_8,        0 },
5509
5510     { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_src_n_8_8888,        0 },
5511     { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_src_n_8_8888,        0 },
5512     { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_src_n_8_8888,        0 },
5513     { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_src_n_8_8888,        0 },
5514     { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_copy_area,           0 },
5515     { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_composite_copy_area,           0 },
5516     { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_copy_area,           0 },
5517     { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_copy_area,           0 },
5518     { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_copy_area,           0 },
5519     { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_copy_area,           0 },
5520     { PIXMAN_OP_SRC, PIXMAN_r5g6b5,    PIXMAN_null,     PIXMAN_r5g6b5,   sse2_composite_copy_area,           0 },
5521     { PIXMAN_OP_SRC, PIXMAN_b5g6r5,    PIXMAN_null,     PIXMAN_b5g6r5,   sse2_composite_copy_area,           0 },
5522
5523     { PIXMAN_OP_IN,  PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       sse2_composite_in_8_8,              0 },
5524     { PIXMAN_OP_IN,  PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8,       sse2_composite_in_n_8_8,            0 },
5525
5526     { PIXMAN_OP_NONE },
5527 };
5528
5529 /*
5530  * Work around GCC bug causing crashes in Mozilla with SSE2
5531  *
5532  * When using -msse, gcc generates movdqa instructions assuming that
5533  * the stack is 16 byte aligned. Unfortunately some applications, such
5534  * as Mozilla and Mono, end up aligning the stack to 4 bytes, which
5535  * causes the movdqa instructions to fail.
5536  *
5537  * The __force_align_arg_pointer__ makes gcc generate a prologue that
5538  * realigns the stack pointer to 16 bytes.
5539  *
5540  * On x86-64 this is not necessary because the standard ABI already
5541  * calls for a 16 byte aligned stack.
5542  *
5543  * See https://bugs.freedesktop.org/show_bug.cgi?id=15693
5544  */
5545 #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
5546 __attribute__((__force_align_arg_pointer__))
5547 #endif
5548 static void
5549 sse2_composite (pixman_implementation_t *imp,
5550                 pixman_op_t              op,
5551                 pixman_image_t *         src,
5552                 pixman_image_t *         mask,
5553                 pixman_image_t *         dest,
5554                 int32_t                  src_x,
5555                 int32_t                  src_y,
5556                 int32_t                  mask_x,
5557                 int32_t                  mask_y,
5558                 int32_t                  dest_x,
5559                 int32_t                  dest_y,
5560                 int32_t                  width,
5561                 int32_t                  height)
5562 {
5563     if (_pixman_run_fast_path (sse2_fast_paths, imp,
5564                                op, src, mask, dest,
5565                                src_x, src_y,
5566                                mask_x, mask_y,
5567                                dest_x, dest_y,
5568                                width, height))
5569     {
5570         return;
5571     }
5572
5573     _pixman_implementation_composite (imp->delegate, op,
5574                                       src, mask, dest,
5575                                       src_x, src_y,
5576                                       mask_x, mask_y,
5577                                       dest_x, dest_y,
5578                                       width, height);
5579 }
5580
5581 #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
5582 __attribute__((__force_align_arg_pointer__))
5583 #endif
5584 static pixman_bool_t
5585 sse2_blt (pixman_implementation_t *imp,
5586           uint32_t *               src_bits,
5587           uint32_t *               dst_bits,
5588           int                      src_stride,
5589           int                      dst_stride,
5590           int                      src_bpp,
5591           int                      dst_bpp,
5592           int                      src_x,
5593           int                      src_y,
5594           int                      dst_x,
5595           int                      dst_y,
5596           int                      width,
5597           int                      height)
5598 {
5599     if (!pixman_blt_sse2 (
5600             src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
5601             src_x, src_y, dst_x, dst_y, width, height))
5602
5603     {
5604         return _pixman_implementation_blt (
5605             imp->delegate,
5606             src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
5607             src_x, src_y, dst_x, dst_y, width, height);
5608     }
5609
5610     return TRUE;
5611 }
5612
5613 #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
5614 __attribute__((__force_align_arg_pointer__))
5615 #endif
5616 static pixman_bool_t
5617 sse2_fill (pixman_implementation_t *imp,
5618            uint32_t *               bits,
5619            int                      stride,
5620            int                      bpp,
5621            int                      x,
5622            int                      y,
5623            int                      width,
5624            int                      height,
5625            uint32_t xor)
5626 {
5627     if (!pixman_fill_sse2 (bits, stride, bpp, x, y, width, height, xor))
5628     {
5629         return _pixman_implementation_fill (
5630             imp->delegate, bits, stride, bpp, x, y, width, height, xor);
5631     }
5632
5633     return TRUE;
5634 }
5635
5636 #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
5637 __attribute__((__force_align_arg_pointer__))
5638 #endif
5639 pixman_implementation_t *
5640 _pixman_implementation_create_sse2 (void)
5641 {
5642     pixman_implementation_t *mmx = _pixman_implementation_create_mmx ();
5643     pixman_implementation_t *imp = _pixman_implementation_create (mmx);
5644
5645     /* SSE2 constants */
5646     mask_565_r  = create_mask_2x32_128 (0x00f80000, 0x00f80000);
5647     mask_565_g1 = create_mask_2x32_128 (0x00070000, 0x00070000);
5648     mask_565_g2 = create_mask_2x32_128 (0x000000e0, 0x000000e0);
5649     mask_565_b  = create_mask_2x32_128 (0x0000001f, 0x0000001f);
5650     mask_red   = create_mask_2x32_128 (0x00f80000, 0x00f80000);
5651     mask_green = create_mask_2x32_128 (0x0000fc00, 0x0000fc00);
5652     mask_blue  = create_mask_2x32_128 (0x000000f8, 0x000000f8);
5653     mask_565_fix_rb = create_mask_2x32_128 (0x00e000e0, 0x00e000e0);
5654     mask_565_fix_g = create_mask_2x32_128  (0x0000c000, 0x0000c000);
5655     mask_0080 = create_mask_16_128 (0x0080);
5656     mask_00ff = create_mask_16_128 (0x00ff);
5657     mask_0101 = create_mask_16_128 (0x0101);
5658     mask_ffff = create_mask_16_128 (0xffff);
5659     mask_ff000000 = create_mask_2x32_128 (0xff000000, 0xff000000);
5660     mask_alpha = create_mask_2x32_128 (0x00ff0000, 0x00000000);
5661
5662     /* MMX constants */
5663     mask_x565_rgb = create_mask_2x32_64 (0x000001f0, 0x003f001f);
5664     mask_x565_unpack = create_mask_2x32_64 (0x00000084, 0x04100840);
5665
5666     mask_x0080 = create_mask_16_64 (0x0080);
5667     mask_x00ff = create_mask_16_64 (0x00ff);
5668     mask_x0101 = create_mask_16_64 (0x0101);
5669     mask_x_alpha = create_mask_2x32_64 (0x00ff0000, 0x00000000);
5670
5671     _mm_empty ();
5672
5673     /* Set up function pointers */
5674
5675     /* SSE code patch for fbcompose.c */
5676     imp->combine_32[PIXMAN_OP_OVER] = sse2_combine_over_u;
5677     imp->combine_32[PIXMAN_OP_OVER_REVERSE] = sse2_combine_over_reverse_u;
5678     imp->combine_32[PIXMAN_OP_IN] = sse2_combine_in_u;
5679     imp->combine_32[PIXMAN_OP_IN_REVERSE] = sse2_combine_in_reverse_u;
5680     imp->combine_32[PIXMAN_OP_OUT] = sse2_combine_out_u;
5681     imp->combine_32[PIXMAN_OP_OUT_REVERSE] = sse2_combine_out_reverse_u;
5682     imp->combine_32[PIXMAN_OP_ATOP] = sse2_combine_atop_u;
5683     imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = sse2_combine_atop_reverse_u;
5684     imp->combine_32[PIXMAN_OP_XOR] = sse2_combine_xor_u;
5685     imp->combine_32[PIXMAN_OP_ADD] = sse2_combine_add_u;
5686
5687     imp->combine_32[PIXMAN_OP_SATURATE] = sse2_combine_saturate_u;
5688
5689     imp->combine_32_ca[PIXMAN_OP_SRC] = sse2_combine_src_ca;
5690     imp->combine_32_ca[PIXMAN_OP_OVER] = sse2_combine_over_ca;
5691     imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = sse2_combine_over_reverse_ca;
5692     imp->combine_32_ca[PIXMAN_OP_IN] = sse2_combine_in_ca;
5693     imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = sse2_combine_in_reverse_ca;
5694     imp->combine_32_ca[PIXMAN_OP_OUT] = sse2_combine_out_ca;
5695     imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = sse2_combine_out_reverse_ca;
5696     imp->combine_32_ca[PIXMAN_OP_ATOP] = sse2_combine_atop_ca;
5697     imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = sse2_combine_atop_reverse_ca;
5698     imp->combine_32_ca[PIXMAN_OP_XOR] = sse2_combine_xor_ca;
5699     imp->combine_32_ca[PIXMAN_OP_ADD] = sse2_combine_add_ca;
5700
5701     imp->composite = sse2_composite;
5702     imp->blt = sse2_blt;
5703     imp->fill = sse2_fill;
5704
5705     return imp;
5706 }
5707
5708 #endif /* USE_SSE2 */