Convert CamelCase names to underscore_names.
[profile/ivi/pixman.git] / pixman / pixman-sse2.c
1 /*
2  * Copyright © 2008 Rodrigo Kumpera
3  * Copyright © 2008 André Tupinambá
4  *
5  * Permission to use, copy, modify, distribute, and sell this software and its
6  * documentation for any purpose is hereby granted without fee, provided that
7  * the above copyright notice appear in all copies and that both that
8  * copyright notice and this permission notice appear in supporting
9  * documentation, and that the name of Red Hat not be used in advertising or
10  * publicity pertaining to distribution of the software without specific,
11  * written prior permission.  Red Hat makes no representations about the
12  * suitability of this software for any purpose.  It is provided "as is"
13  * without express or implied warranty.
14  *
15  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
16  * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
17  * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
18  * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
20  * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
21  * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
22  * SOFTWARE.
23  *
24  * Author:  Rodrigo Kumpera (kumpera@gmail.com)
25  *          André Tupinambá (andrelrt@gmail.com)
26  * 
27  * Based on work by Owen Taylor and Søren Sandmann
28  */
29 #ifdef HAVE_CONFIG_H
30 #include <config.h>
31 #endif
32
33 #include <mmintrin.h>
34 #include <xmmintrin.h> /* for _mm_shuffle_pi16 and _MM_SHUFFLE */
35 #include <emmintrin.h> /* for SSE2 intrinsics */
36 #include "pixman-private.h"
37 #include "pixman-combine32.h"
38
39 #ifdef USE_SSE2
40
41 /* -------------------------------------------------------------------------------------------------
42  * Locals
43  */
44
45 static __m64 mask_x0080;
46 static __m64 mask_x00ff;
47 static __m64 mask_x0101;
48 static __m64 mask_x_alpha;
49
50 static __m64 mask_x565_rgb;
51 static __m64 mask_x565_unpack;
52
53 static __m128i mask_0080;
54 static __m128i mask_00ff;
55 static __m128i mask_0101;
56 static __m128i mask_ffff;
57 static __m128i mask_ff000000;
58 static __m128i mask_alpha;
59
60 static __m128i mask_565_r;
61 static __m128i mask_565_g1, mask_565_g2;
62 static __m128i mask_565_b;
63 static __m128i mask_red;
64 static __m128i mask_green;
65 static __m128i mask_blue;
66
67 static __m128i mask_565_fix_rb;
68 static __m128i mask_565_fix_g;
69
70 /* -------------------------------------------------------------------------------------------------
71  * SSE2 Inlines
72  */
73 static force_inline __m128i
74 unpack_32_1x128 (uint32_t data)
75 {
76     return _mm_unpacklo_epi8 (_mm_cvtsi32_si128 (data), _mm_setzero_si128());
77 }
78
79 static force_inline void
80 unpack_128_2x128 (__m128i data, __m128i* data_lo, __m128i* data_hi)
81 {
82     *data_lo = _mm_unpacklo_epi8 (data, _mm_setzero_si128 ());
83     *data_hi = _mm_unpackhi_epi8 (data, _mm_setzero_si128 ());
84 }
85
86 static force_inline __m128i
87 unpack_565to8888 (__m128i lo)
88 {
89     __m128i r, g, b, rb, t;
90     
91     r = _mm_and_si128 (_mm_slli_epi32 (lo, 8), mask_red);
92     g = _mm_and_si128 (_mm_slli_epi32 (lo, 5), mask_green);
93     b = _mm_and_si128 (_mm_slli_epi32 (lo, 3), mask_blue);
94
95     rb = _mm_or_si128 (r, b);
96     t  = _mm_and_si128 (rb, mask_565_fix_rb);
97     t  = _mm_srli_epi32 (t, 5);
98     rb = _mm_or_si128 (rb, t);
99
100     t  = _mm_and_si128 (g, mask_565_fix_g);
101     t  = _mm_srli_epi32 (t, 6);
102     g  = _mm_or_si128 (g, t);
103     
104     return _mm_or_si128 (rb, g);
105 }
106
107 static force_inline void
108 unpack_565_128_4x128 (__m128i data, __m128i* data0, __m128i* data1, __m128i* data2, __m128i* data3)
109 {
110     __m128i lo, hi;
111
112     lo = _mm_unpacklo_epi16 (data, _mm_setzero_si128 ());
113     hi = _mm_unpackhi_epi16 (data, _mm_setzero_si128 ());
114
115     lo = unpack_565to8888 (lo);
116     hi = unpack_565to8888 (hi);
117
118     unpack_128_2x128 (lo, data0, data1);
119     unpack_128_2x128 (hi, data2, data3);
120 }
121
122 static force_inline uint16_t
123 pack_565_32_16 (uint32_t pixel)
124 {
125     return (uint16_t) (((pixel>>8) & 0xf800) | ((pixel>>5) & 0x07e0) | ((pixel>>3) & 0x001f));
126 }
127
128 static force_inline __m128i
129 pack_2x128_128 (__m128i lo, __m128i hi)
130 {
131     return _mm_packus_epi16 (lo, hi);
132 }
133
134 static force_inline __m128i
135 pack_565_2x128_128 (__m128i lo, __m128i hi)
136 {
137     __m128i data;
138     __m128i r, g1, g2, b;
139
140     data = pack_2x128_128 ( lo, hi );
141
142     r  = _mm_and_si128 (data , mask_565_r);
143     g1 = _mm_and_si128 (_mm_slli_epi32 (data , 3), mask_565_g1);
144     g2 = _mm_and_si128 (_mm_srli_epi32 (data , 5), mask_565_g2);
145     b  = _mm_and_si128 (_mm_srli_epi32 (data , 3), mask_565_b);
146
147     return _mm_or_si128 (_mm_or_si128 (_mm_or_si128 (r, g1), g2), b);
148 }
149
150 static force_inline __m128i
151 pack_565_4x128_128 (__m128i* xmm0, __m128i* xmm1, __m128i* xmm2, __m128i* xmm3)
152 {
153     return _mm_packus_epi16 (pack_565_2x128_128 (*xmm0, *xmm1), pack_565_2x128_128 (*xmm2, *xmm3));
154 }
155
156 static force_inline int
157 is_opaque (__m128i x)
158 {
159     __m128i ffs = _mm_cmpeq_epi8 (x, x);
160     return (_mm_movemask_epi8 (_mm_cmpeq_epi8 (x, ffs)) & 0x8888) == 0x8888;
161 }
162
163 static force_inline int
164 is_zero (__m128i x)
165 {
166     return _mm_movemask_epi8 (_mm_cmpeq_epi8 (x, _mm_setzero_si128())) == 0xffff;
167 }
168
169 static force_inline int
170 is_transparent (__m128i x)
171 {
172     return (_mm_movemask_epi8 (_mm_cmpeq_epi8 (x, _mm_setzero_si128())) & 0x8888) == 0x8888;
173 }
174
175 static force_inline __m128i
176 expand_pixel_32_1x128 (uint32_t data)
177 {
178     return _mm_shuffle_epi32 (unpack_32_1x128 (data), _MM_SHUFFLE(1, 0, 1, 0));
179 }
180
181 static force_inline __m128i
182 expand_alpha_1x128 (__m128i data)
183 {
184     return _mm_shufflehi_epi16 (_mm_shufflelo_epi16 (data, _MM_SHUFFLE(3, 3, 3, 3)), _MM_SHUFFLE(3, 3, 3, 3));
185 }
186
187 static force_inline void
188 expand_alpha_2x128 (__m128i data_lo, __m128i data_hi, __m128i* alpha_lo, __m128i* alpha_hi)
189 {
190     __m128i lo, hi;
191
192     lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE(3, 3, 3, 3));
193     hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE(3, 3, 3, 3));
194     *alpha_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE(3, 3, 3, 3));
195     *alpha_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE(3, 3, 3, 3));
196 }
197
198 static force_inline void
199 expand_alpha_rev_2x128 (__m128i data_lo, __m128i data_hi, __m128i* alpha_lo, __m128i* alpha_hi)
200 {
201     __m128i lo, hi;
202
203     lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE(0, 0, 0, 0));
204     hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE(0, 0, 0, 0));
205     *alpha_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE(0, 0, 0, 0));
206     *alpha_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE(0, 0, 0, 0));
207 }
208
209 static force_inline void
210 pix_multiply_2x128 (__m128i* data_lo, __m128i* data_hi, __m128i* alpha_lo, __m128i* alpha_hi, __m128i* ret_lo, __m128i* ret_hi)
211 {
212     __m128i lo, hi;
213
214     lo = _mm_mullo_epi16 (*data_lo, *alpha_lo);
215     hi = _mm_mullo_epi16 (*data_hi, *alpha_hi);
216     lo = _mm_adds_epu16 (lo, mask_0080);
217     hi = _mm_adds_epu16 (hi, mask_0080);
218     *ret_lo = _mm_mulhi_epu16 (lo, mask_0101);
219     *ret_hi = _mm_mulhi_epu16 (hi, mask_0101);
220 }
221
222 static force_inline void
223 pix_add_multiply_2x128 (__m128i* src_lo, __m128i* src_hi, __m128i* alpha_dst_lo, __m128i* alpha_dst_hi,
224                       __m128i* dst_lo, __m128i* dst_hi, __m128i* alpha_src_lo, __m128i* alpha_src_hi,
225                       __m128i* ret_lo, __m128i* ret_hi)
226 {
227     __m128i lo, hi;
228     __m128i mul_lo, mul_hi;
229
230     lo = _mm_mullo_epi16 (*src_lo, *alpha_dst_lo);
231     hi = _mm_mullo_epi16 (*src_hi, *alpha_dst_hi);
232     mul_lo = _mm_mullo_epi16 (*dst_lo, *alpha_src_lo);
233     mul_hi = _mm_mullo_epi16 (*dst_hi, *alpha_src_hi);
234     lo = _mm_adds_epu16 (lo, mask_0080);
235     hi = _mm_adds_epu16 (hi, mask_0080);
236     lo = _mm_adds_epu16 (lo, mul_lo);
237     hi = _mm_adds_epu16 (hi, mul_hi);
238     *ret_lo = _mm_mulhi_epu16 (lo, mask_0101);
239     *ret_hi = _mm_mulhi_epu16 (hi, mask_0101);
240 }
241
242 static force_inline void
243 negate_2x128 (__m128i data_lo, __m128i data_hi, __m128i* neg_lo, __m128i* neg_hi)
244 {
245     *neg_lo = _mm_xor_si128 (data_lo, mask_00ff);
246     *neg_hi = _mm_xor_si128 (data_hi, mask_00ff);
247 }
248
249 static force_inline void
250 invert_colors_2x128 (__m128i data_lo, __m128i data_hi, __m128i* inv_lo, __m128i* inv_hi)
251 {
252     __m128i lo, hi;
253
254     lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE(3, 0, 1, 2));
255     hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE(3, 0, 1, 2));
256     *inv_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE(3, 0, 1, 2));
257     *inv_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE(3, 0, 1, 2));
258 }
259
260 static force_inline void
261 over_2x128 (__m128i* src_lo, __m128i* src_hi, __m128i* alpha_lo, __m128i* alpha_hi, __m128i* dst_lo, __m128i* dst_hi)
262 {
263     __m128i t1, t2;
264
265     negate_2x128 (*alpha_lo, *alpha_hi, &t1, &t2);
266
267     pix_multiply_2x128 (dst_lo, dst_hi, &t1, &t2, dst_lo, dst_hi);
268
269     *dst_lo = _mm_adds_epu8 (*src_lo, *dst_lo);
270     *dst_hi = _mm_adds_epu8 (*src_hi, *dst_hi);
271 }
272
273 static force_inline void
274 over_rev_non_pre_2x128 (__m128i src_lo, __m128i src_hi, __m128i* dst_lo, __m128i* dst_hi)
275 {
276     __m128i lo, hi;
277     __m128i alpha_lo, alpha_hi;
278
279     expand_alpha_2x128 (src_lo, src_hi, &alpha_lo, &alpha_hi);
280
281     lo = _mm_or_si128 (alpha_lo, mask_alpha);
282     hi = _mm_or_si128 (alpha_hi, mask_alpha);
283
284     invert_colors_2x128 (src_lo, src_hi, &src_lo, &src_hi);
285
286     pix_multiply_2x128 (&src_lo, &src_hi, &lo, &hi, &lo, &hi);
287
288     over_2x128 (&lo, &hi, &alpha_lo, &alpha_hi, dst_lo, dst_hi);
289 }
290
291 static force_inline void
292 in_over_2x128 (__m128i* src_lo,  __m128i* src_hi,  __m128i*  alpha_lo, __m128i*  alpha_hi,
293               __m128i* mask_lo, __m128i* mask_hi, __m128i* dst_lo,   __m128i* dst_hi)
294 {
295     __m128i s_lo, s_hi;
296     __m128i a_lo, a_hi;
297
298     pix_multiply_2x128 (  src_lo,   src_hi, mask_lo, mask_hi, &s_lo, &s_hi);
299     pix_multiply_2x128 (alpha_lo, alpha_hi, mask_lo, mask_hi, &a_lo, &a_hi);
300
301     over_2x128 (&s_lo, &s_hi, &a_lo, &a_hi, dst_lo, dst_hi);
302 }
303
304 static force_inline void
305 cache_prefetch (__m128i* addr)
306 {
307     _mm_prefetch (addr, _MM_HINT_T0);
308 }
309
310 static force_inline void
311 cache_prefetch_next (__m128i* addr)
312 {
313     _mm_prefetch (addr + 4, _MM_HINT_T0); // 64 bytes ahead
314 }
315
316 /* load 4 pixels from a 16-byte boundary aligned address */
317 static force_inline __m128i
318 load_128_aligned (__m128i* src)
319 {
320     return _mm_load_si128 (src);
321 }
322
323 /* load 4 pixels from a unaligned address */
324 static force_inline __m128i
325 load_128_unaligned (const __m128i* src)
326 {
327     return _mm_loadu_si128 (src);
328 }
329
330 /* save 4 pixels using Write Combining memory on a 16-byte boundary aligned address */
331 static force_inline void
332 save128write_combining (__m128i* dst, __m128i data)
333 {
334     _mm_stream_si128 (dst, data);
335 }
336
337 /* save 4 pixels on a 16-byte boundary aligned address */
338 static force_inline void
339 save_128_aligned (__m128i* dst, __m128i data)
340 {
341     _mm_store_si128 (dst, data);
342 }
343
344 /* save 4 pixels on a unaligned address */
345 static force_inline void
346 save_128_unaligned (__m128i* dst, __m128i data)
347 {
348     _mm_storeu_si128 (dst, data);
349 }
350
351 /* -------------------------------------------------------------------------------------------------
352  * MMX inlines
353  */
354
355 static force_inline __m64
356 unpack_32_1x64 (uint32_t data)
357 {
358     return _mm_unpacklo_pi8 (_mm_cvtsi32_si64 (data), _mm_setzero_si64());
359 }
360
361 static force_inline __m64
362 expand_alpha_1x64 (__m64 data)
363 {
364     return _mm_shuffle_pi16 (data, _MM_SHUFFLE(3, 3, 3, 3));
365 }
366
367 static force_inline __m64
368 expand_alpha_rev_1x64 (__m64 data)
369 {
370     return _mm_shuffle_pi16 (data, _MM_SHUFFLE(0, 0, 0, 0));
371 }
372
373 static force_inline __m64
374 expand_pixel_8_1x64 (uint8_t data)
375 {
376     return _mm_shuffle_pi16 (unpack_32_1x64 ((uint32_t)data), _MM_SHUFFLE(0, 0, 0, 0));
377 }
378
379 static force_inline __m64
380 pix_multiply_1x64 (__m64 data, __m64 alpha)
381 {
382     return _mm_mulhi_pu16 (_mm_adds_pu16 (_mm_mullo_pi16 (data, alpha),
383                                           mask_x0080),
384                            mask_x0101);
385 }
386
387 static force_inline __m64
388 pix_add_multiply_1x64 (__m64* src, __m64* alpha_dst, __m64* dst, __m64* alpha_src)
389 {
390     return _mm_mulhi_pu16 (_mm_adds_pu16 (_mm_adds_pu16 (_mm_mullo_pi16 (*src, *alpha_dst),
391                                                          mask_x0080),
392                                           _mm_mullo_pi16 (*dst, *alpha_src)),
393                            mask_x0101);
394 }
395
396 static force_inline __m64
397 negate_1x64 (__m64 data)
398 {
399     return _mm_xor_si64 (data, mask_x00ff);
400 }
401
402 static force_inline __m64
403 invert_colors_1x64 (__m64 data)
404 {
405     return _mm_shuffle_pi16 (data, _MM_SHUFFLE(3, 0, 1, 2));
406 }
407
408 static force_inline __m64
409 over_1x64 (__m64 src, __m64 alpha, __m64 dst)
410 {
411     return _mm_adds_pu8 (src, pix_multiply_1x64 (dst, negate_1x64 (alpha)));
412 }
413
414 static force_inline __m64
415 in_over_1x64 (__m64* src, __m64* alpha, __m64* mask, __m64* dst)
416 {
417     return over_1x64 (pix_multiply_1x64 (*src, *mask),
418                       pix_multiply_1x64 (*alpha, *mask),
419                       *dst);
420 }
421
422 static force_inline __m64
423 over_rev_non_pre_1x64 (__m64 src, __m64 dst)
424 {
425     __m64 alpha = expand_alpha_1x64 (src);
426
427     return over_1x64 (pix_multiply_1x64 (invert_colors_1x64 (src),
428                                         _mm_or_si64 (alpha, mask_x_alpha)),
429                       alpha,
430                       dst);
431 }
432
433 static force_inline uint32_t
434 pack_1x64_32( __m64 data )
435 {
436     return _mm_cvtsi64_si32 (_mm_packs_pu16 (data, _mm_setzero_si64()));
437 }
438
439 /* Expand 16 bits positioned at @pos (0-3) of a mmx register into
440  *
441  *    00RR00GG00BB
442  *
443  * --- Expanding 565 in the low word ---
444  *
445  * m = (m << (32 - 3)) | (m << (16 - 5)) | m;
446  * m = m & (01f0003f001f);
447  * m = m * (008404100840);
448  * m = m >> 8;
449  *
450  * Note the trick here - the top word is shifted by another nibble to
451  * avoid it bumping into the middle word
452  */
453 static force_inline __m64
454 expand565_16_1x64 (uint16_t pixel)
455 {
456     __m64 p;
457     __m64 t1, t2;
458
459     p = _mm_cvtsi32_si64 ((uint32_t) pixel);
460
461     t1 = _mm_slli_si64 (p, 36 - 11);
462     t2 = _mm_slli_si64 (p, 16 - 5);
463
464     p = _mm_or_si64 (t1, p);
465     p = _mm_or_si64 (t2, p);
466     p = _mm_and_si64 (p, mask_x565_rgb);
467     p = _mm_mullo_pi16 (p, mask_x565_unpack);
468
469     return _mm_srli_pi16 (p, 8);
470 }
471
472 /* -------------------------------------------------------------------------------------------------
473  * Compose Core transformations
474  */
475 static force_inline uint32_t
476 core_combine_over_u_pixel_sse2 (uint32_t src, uint32_t dst)
477 {
478     uint8_t     a;
479     __m64       ms;
480
481     a = src >> 24;
482
483     if (a == 0xff)
484     {
485         return src;
486     }
487     else if (src)
488     {
489         ms = unpack_32_1x64 (src);
490         return pack_1x64_32 (over_1x64 (ms, expand_alpha_1x64 (ms), unpack_32_1x64 (dst)));
491     }
492
493     return dst;
494 }
495
496 static force_inline uint32_t
497 combine1 (const uint32_t *ps, const uint32_t *pm)
498 {
499     uint32_t s = *ps;
500
501     if (pm)
502     {
503         __m64 ms, mm;
504
505         mm = unpack_32_1x64 (*pm);
506         mm = expand_alpha_1x64 (mm);
507         
508         ms = unpack_32_1x64 (s);
509         ms = pix_multiply_1x64 (ms, mm);
510
511         s = pack_1x64_32 (ms);
512     }
513
514     return s;
515 }
516
517 static force_inline __m128i
518 combine4 (const __m128i *ps, const __m128i *pm)
519 {
520     __m128i xmm_src_lo, xmm_src_hi;
521     __m128i xmm_msk_lo, xmm_msk_hi;
522     __m128i s;
523     
524     if (pm)
525     {
526         xmm_msk_lo = load_128_unaligned (pm);
527
528         if (is_transparent (xmm_msk_lo))
529             return _mm_setzero_si128 ();
530     }
531     
532     s = load_128_unaligned (ps);
533         
534     if (pm)
535     {
536         unpack_128_2x128 (s, &xmm_src_lo, &xmm_src_hi);
537         unpack_128_2x128 (xmm_msk_lo, &xmm_msk_lo, &xmm_msk_hi);
538         
539         expand_alpha_2x128 (xmm_msk_lo, xmm_msk_hi, &xmm_msk_lo, &xmm_msk_hi);
540         
541         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_msk_lo, &xmm_msk_hi, &xmm_src_lo, &xmm_src_hi);
542         
543         s = pack_2x128_128 (xmm_src_lo, xmm_src_hi);
544     }
545
546     return s;
547 }
548
549 static force_inline void
550 core_combine_over_u_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
551 {
552     uint32_t s, d;
553
554     __m128i xmm_dst_lo, xmm_dst_hi;
555     __m128i xmm_src_lo, xmm_src_hi;
556     __m128i xmm_alpha_lo, xmm_alpha_hi;
557
558     /* call prefetch hint to optimize cache load*/
559     cache_prefetch ((__m128i*)ps);
560     cache_prefetch ((__m128i*)pd);
561     cache_prefetch ((__m128i*)pm);
562
563     /* Align dst on a 16-byte boundary */
564     while (w &&
565            ((unsigned long)pd & 15))
566     {
567         d = *pd;
568         s = combine1 (ps, pm);
569
570         *pd++ = core_combine_over_u_pixel_sse2 (s, d);
571         ps++;
572         if (pm)
573             pm++;
574         w--;
575     }
576
577     /* call prefetch hint to optimize cache load*/
578     cache_prefetch ((__m128i*)ps);
579     cache_prefetch ((__m128i*)pd);
580     cache_prefetch ((__m128i*)pm);
581
582     while (w >= 4)
583     {
584         /* fill cache line with next memory */
585         cache_prefetch_next ((__m128i*)ps);
586         cache_prefetch_next ((__m128i*)pd);
587         cache_prefetch_next ((__m128i*)pm);
588
589         /* I'm loading unaligned because I'm not sure about the address alignment. */
590         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
591
592         if (is_opaque (xmm_src_hi))
593         {
594             save_128_aligned ((__m128i*)pd, xmm_src_hi);
595         }
596         else if (!is_zero (xmm_src_hi))
597         {
598             xmm_dst_hi = load_128_aligned ((__m128i*) pd);
599
600             unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
601             unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
602
603             expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
604
605             over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst_lo, &xmm_dst_hi);
606
607             /* rebuid the 4 pixel data and save*/
608             save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
609         }
610
611         w -= 4;
612         ps += 4;
613         pd += 4;
614         if (pm)
615             pm += 4;
616     }
617
618     while (w)
619     {
620         d = *pd;
621         s = combine1 (ps, pm);
622
623         *pd++ = core_combine_over_u_pixel_sse2 (s, d);
624         ps++;
625         if (pm)
626             pm++;
627         w--;
628     }
629 }
630
631 static force_inline void
632 core_combine_over_reverse_u_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
633 {
634     uint32_t s, d;
635
636     __m128i xmm_dst_lo, xmm_dst_hi;
637     __m128i xmm_src_lo, xmm_src_hi;
638     __m128i xmm_alpha_lo, xmm_alpha_hi;
639
640     /* call prefetch hint to optimize cache load*/
641     cache_prefetch ((__m128i*)ps);
642     cache_prefetch ((__m128i*)pd);
643     cache_prefetch ((__m128i*)pm);
644
645     /* Align dst on a 16-byte boundary */
646     while (w &&
647            ((unsigned long)pd & 15))
648     {
649         d = *pd;
650         s = combine1 (ps, pm);
651
652         *pd++ = core_combine_over_u_pixel_sse2 (d, s);
653         w--;
654         ps++;
655         if (pm)
656             pm++;
657     }
658
659     /* call prefetch hint to optimize cache load*/
660     cache_prefetch ((__m128i*)ps);
661     cache_prefetch ((__m128i*)pd);
662     cache_prefetch ((__m128i*)pm);
663
664     while (w >= 4)
665     {
666         /* fill cache line with next memory */
667         cache_prefetch_next ((__m128i*)ps);
668         cache_prefetch_next ((__m128i*)pd);
669         cache_prefetch_next ((__m128i*)pm);
670
671         /* I'm loading unaligned because I'm not sure about the address alignment. */
672         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
673         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
674
675         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
676         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
677
678         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi);
679
680         over_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_src_lo, &xmm_src_hi);
681
682         /* rebuid the 4 pixel data and save*/
683         save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_src_lo, xmm_src_hi));
684
685         w -= 4;
686         ps += 4;
687         pd += 4;
688         if (pm)
689             pm += 4;
690     }
691
692     while (w)
693     {
694         d = *pd;
695         s = combine1 (ps, pm);
696
697         *pd++ = core_combine_over_u_pixel_sse2 (d, s);
698         ps++;
699         w--;
700         if (pm)
701             pm++;
702     }
703 }
704
705 static force_inline uint32_t
706 core_combine_in_u_pixelsse2 (uint32_t src, uint32_t dst)
707 {
708     uint32_t maska = src >> 24;
709
710     if (maska == 0)
711     {
712         return 0;
713     }
714     else if (maska != 0xff)
715     {
716         return pack_1x64_32(pix_multiply_1x64 (unpack_32_1x64 (dst), expand_alpha_1x64 (unpack_32_1x64 (src))));
717     }
718
719     return dst;
720 }
721
722 static force_inline void
723 core_combine_in_u_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
724 {
725     uint32_t s, d;
726
727     __m128i xmm_src_lo, xmm_src_hi;
728     __m128i xmm_dst_lo, xmm_dst_hi;
729
730     /* call prefetch hint to optimize cache load*/
731     cache_prefetch ((__m128i*)ps);
732     cache_prefetch ((__m128i*)pd);
733     cache_prefetch ((__m128i*)pm);
734
735     while (w && ((unsigned long) pd & 15))
736     {
737         s = combine1 (ps, pm);
738         d = *pd;
739
740         *pd++ = core_combine_in_u_pixelsse2 (d, s);
741         w--;
742         ps++;
743         if (pm)
744             pm++;
745     }
746
747     /* call prefetch hint to optimize cache load*/
748     cache_prefetch ((__m128i*)ps);
749     cache_prefetch ((__m128i*)pd);
750     cache_prefetch ((__m128i*)pm);
751
752     while (w >= 4)
753     {
754         /* fill cache line with next memory */
755         cache_prefetch_next ((__m128i*)ps);
756         cache_prefetch_next ((__m128i*)pd);
757         cache_prefetch_next ((__m128i*)pm);
758
759         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
760         xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*) pm);
761
762         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
763         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
764
765         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
766         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
767
768         save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
769
770         ps += 4;
771         pd += 4;
772         w -= 4;
773         if (pm)
774             pm += 4;
775     }
776
777     while (w)
778     {
779         s = combine1 (ps, pm);
780         d = *pd;
781
782         *pd++ = core_combine_in_u_pixelsse2 (d, s);
783         w--;
784         ps++;
785         if (pm)
786             pm++;
787     }
788 }
789
790 static force_inline void
791 core_combine_reverse_in_u_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
792 {
793     uint32_t s, d;
794
795     __m128i xmm_src_lo, xmm_src_hi;
796     __m128i xmm_dst_lo, xmm_dst_hi;
797
798     /* call prefetch hint to optimize cache load*/
799     cache_prefetch ((__m128i*)ps);
800     cache_prefetch ((__m128i*)pd);
801     cache_prefetch ((__m128i*)pm);
802
803     while (w && ((unsigned long) pd & 15))
804     {
805         s = combine1 (ps, pm);
806         d = *pd;
807
808         *pd++ = core_combine_in_u_pixelsse2 (s, d);
809         ps++;
810         w--;
811         if (pm)
812             pm++;
813     }
814
815     /* call prefetch hint to optimize cache load*/
816     cache_prefetch ((__m128i*)ps);
817     cache_prefetch ((__m128i*)pd);
818     cache_prefetch ((__m128i*)pm);
819
820     while (w >= 4)
821     {
822         /* fill cache line with next memory */
823         cache_prefetch_next ((__m128i*)ps);
824         cache_prefetch_next ((__m128i*)pd);
825         cache_prefetch_next ((__m128i*)pm);
826
827         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
828         xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm);
829
830         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
831         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
832
833         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
834         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_src_lo, &xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi);
835
836         save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
837
838         ps += 4;
839         pd += 4;
840         w -= 4;
841         if (pm)
842             pm += 4;
843     }
844
845     while (w)
846     {
847         s = combine1 (ps, pm);
848         d = *pd;
849
850         *pd++ = core_combine_in_u_pixelsse2 (s, d);
851         w--;
852         ps++;
853         if (pm)
854             pm++;
855     }
856 }
857
858 static force_inline void
859 core_combine_reverse_out_u_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
860 {
861     /* call prefetch hint to optimize cache load*/
862     cache_prefetch ((__m128i*)ps);
863     cache_prefetch ((__m128i*)pd);
864     cache_prefetch ((__m128i*)pm);
865
866     while (w && ((unsigned long) pd & 15))
867     {
868         uint32_t s = combine1 (ps, pm);
869         uint32_t d = *pd;
870
871         *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (d), negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (s)))));
872         if (pm)
873             pm++;
874         ps++;
875         w--;
876     }
877
878     /* call prefetch hint to optimize cache load*/
879     cache_prefetch ((__m128i*)ps);
880     cache_prefetch ((__m128i*)pd);
881     cache_prefetch ((__m128i*)pm);
882
883     while (w >= 4)
884     {
885         __m128i xmm_src_lo, xmm_src_hi;
886         __m128i xmm_dst_lo, xmm_dst_hi;
887
888         /* fill cache line with next memory */
889         cache_prefetch_next ((__m128i*)ps);
890         cache_prefetch_next ((__m128i*)pd);
891         cache_prefetch_next ((__m128i*)pm);
892
893         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
894         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
895
896         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
897         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
898
899         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
900         negate_2x128      (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
901
902         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_src_lo, &xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi);
903
904         save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
905
906         ps += 4;
907         pd += 4;
908         if (pm)
909             pm += 4;
910         w -= 4;
911     }
912
913     while (w)
914     {
915         uint32_t s = combine1 (ps, pm);
916         uint32_t d = *pd;
917
918         *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (d), negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (s)))));
919         ps++;
920         if (pm)
921             pm++;
922         w--;
923     }
924 }
925
926 static force_inline void
927 core_combine_out_u_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
928 {
929     /* call prefetch hint to optimize cache load*/
930     cache_prefetch ((__m128i*)ps);
931     cache_prefetch ((__m128i*)pd);
932     cache_prefetch ((__m128i*)pm);
933
934     while (w && ((unsigned long) pd & 15))
935     {
936         uint32_t s = combine1 (ps, pm);
937         uint32_t d = *pd;
938
939         *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (s), negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d)))));
940         w--;
941         ps++;
942         if (pm)
943             pm++;
944     }
945
946     /* call prefetch hint to optimize cache load*/
947     cache_prefetch ((__m128i*)ps);
948     cache_prefetch ((__m128i*)pd);
949     cache_prefetch ((__m128i*)pm);
950
951     while (w >= 4)
952     {
953         __m128i xmm_src_lo, xmm_src_hi;
954         __m128i xmm_dst_lo, xmm_dst_hi;
955
956         /* fill cache line with next memory */
957         cache_prefetch_next ((__m128i*)ps);
958         cache_prefetch_next ((__m128i*)pd);
959         cache_prefetch_next ((__m128i*)pm);
960
961         xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm);
962         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
963
964         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
965         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
966
967         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
968         negate_2x128      (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
969
970         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
971
972         save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
973
974         ps += 4;
975         pd += 4;
976         w -= 4;
977         if (pm)
978             pm += 4;
979     }
980
981     while (w)
982     {
983         uint32_t s = combine1 (ps, pm);
984         uint32_t d = *pd;
985
986         *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (s), negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d)))));
987         w--;
988         ps++;
989         if (pm)
990             pm++;
991     }
992 }
993
994 static force_inline uint32_t
995 core_combine_atop_u_pixel_sse2 (uint32_t src, uint32_t dst)
996 {
997     __m64 s = unpack_32_1x64 (src);
998     __m64 d = unpack_32_1x64 (dst);
999
1000     __m64 sa = negate_1x64 (expand_alpha_1x64 (s));
1001     __m64 da = expand_alpha_1x64 (d);
1002
1003     return pack_1x64_32 (pix_add_multiply_1x64 (&s, &da, &d, &sa));
1004 }
1005
1006 static force_inline void
1007 core_combine_atop_u_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
1008 {
1009     uint32_t s, d;
1010
1011     __m128i xmm_src_lo, xmm_src_hi;
1012     __m128i xmm_dst_lo, xmm_dst_hi;
1013     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
1014     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
1015
1016     /* call prefetch hint to optimize cache load*/
1017     cache_prefetch ((__m128i*)ps);
1018     cache_prefetch ((__m128i*)pd);
1019     cache_prefetch ((__m128i*)pm);
1020
1021     while (w && ((unsigned long) pd & 15))
1022     {
1023         s = combine1 (ps, pm);
1024         d = *pd;
1025
1026         *pd++ = core_combine_atop_u_pixel_sse2 (s, d);
1027         w--;
1028         ps++;
1029         if (pm)
1030             pm++;
1031     }
1032
1033     /* call prefetch hint to optimize cache load*/
1034     cache_prefetch ((__m128i*)ps);
1035     cache_prefetch ((__m128i*)pd);
1036     cache_prefetch ((__m128i*)pm);
1037
1038     while (w >= 4)
1039     {
1040         /* fill cache line with next memory */
1041         cache_prefetch_next ((__m128i*)ps);
1042         cache_prefetch_next ((__m128i*)pd);
1043         cache_prefetch_next ((__m128i*)pm);
1044
1045         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
1046         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
1047
1048         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1049         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1050
1051         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1052         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1053
1054         negate_2x128 (xmm_alpha_src_lo, xmm_alpha_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1055
1056         pix_add_multiply_2x128 ( &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
1057                                &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
1058                                &xmm_dst_lo, &xmm_dst_hi );
1059
1060         save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1061
1062         ps += 4;
1063         pd += 4;
1064         w -= 4;
1065         if (pm)
1066             pm += 4;
1067     }
1068
1069     while (w)
1070     {
1071         s = combine1 (ps, pm);
1072         d = *pd;
1073
1074         *pd++ = core_combine_atop_u_pixel_sse2 (s, d);
1075         w--;
1076         ps++;
1077         if (pm)
1078             pm++;
1079     }
1080 }
1081
1082 static force_inline uint32_t
1083 core_combine_reverse_atop_u_pixel_sse2 (uint32_t src, uint32_t dst)
1084 {
1085     __m64 s = unpack_32_1x64 (src);
1086     __m64 d = unpack_32_1x64 (dst);
1087
1088     __m64 sa = expand_alpha_1x64 (s);
1089     __m64 da = negate_1x64 (expand_alpha_1x64 (d));
1090
1091     return pack_1x64_32 (pix_add_multiply_1x64 (&s, &da, &d, &sa));
1092 }
1093
1094 static force_inline void
1095 core_combine_reverse_atop_u_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
1096 {
1097     uint32_t s, d;
1098
1099     __m128i xmm_src_lo, xmm_src_hi;
1100     __m128i xmm_dst_lo, xmm_dst_hi;
1101     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
1102     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
1103
1104     /* call prefetch hint to optimize cache load*/
1105     cache_prefetch ((__m128i*)ps);
1106     cache_prefetch ((__m128i*)pd);
1107     cache_prefetch ((__m128i*)pm);
1108
1109     while (w && ((unsigned long) pd & 15))
1110     {
1111         s = combine1 (ps, pm);
1112         d = *pd;
1113
1114         *pd++ = core_combine_reverse_atop_u_pixel_sse2 (s, d);
1115         ps++;
1116         w--;
1117         if (pm)
1118             pm++;
1119     }
1120
1121     /* call prefetch hint to optimize cache load*/
1122     cache_prefetch ((__m128i*)ps);
1123     cache_prefetch ((__m128i*)pd);
1124     cache_prefetch ((__m128i*)pm);
1125
1126     while (w >= 4)
1127     {
1128         /* fill cache line with next memory */
1129         cache_prefetch_next ((__m128i*)ps);
1130         cache_prefetch_next ((__m128i*)pd);
1131         cache_prefetch_next ((__m128i*)pm);
1132
1133         xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
1134         xmm_dst_hi = load_128_aligned ((__m128i*) pd);
1135
1136         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1137         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1138
1139         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1140         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1141
1142         negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1143
1144         pix_add_multiply_2x128 ( &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
1145                                &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
1146                                &xmm_dst_lo, &xmm_dst_hi );
1147
1148         save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1149
1150         ps += 4;
1151         pd += 4;
1152         w -= 4;
1153         if (pm)
1154             pm += 4;
1155     }
1156
1157     while (w)
1158     {
1159         s = combine1 (ps, pm);
1160         d = *pd;
1161
1162         *pd++ = core_combine_reverse_atop_u_pixel_sse2 (s, d);
1163         ps++;
1164         w--;
1165         if (pm)
1166             pm++;
1167     }
1168 }
1169
1170 static force_inline uint32_t
1171 core_combine_xor_u_pixel_sse2 (uint32_t src, uint32_t dst)
1172 {
1173     __m64 s = unpack_32_1x64 (src);
1174     __m64 d = unpack_32_1x64 (dst);
1175
1176     __m64 neg_d = negate_1x64 (expand_alpha_1x64 (d));
1177     __m64 neg_s = negate_1x64 (expand_alpha_1x64 (s));
1178
1179     return pack_1x64_32 (pix_add_multiply_1x64 (&s, &neg_d, &d, &neg_s));
1180 }
1181
1182 static force_inline void
1183 core_combine_xor_u_sse2 (uint32_t* dst, const uint32_t* src, const uint32_t *mask, int width)
1184 {
1185     int w = width;
1186     uint32_t s, d;
1187     uint32_t* pd = dst;
1188     const uint32_t* ps = src;
1189     const uint32_t* pm = mask;
1190     
1191     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
1192     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
1193     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
1194     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
1195
1196     /* call prefetch hint to optimize cache load*/
1197     cache_prefetch ((__m128i*)ps);
1198     cache_prefetch ((__m128i*)pd);
1199     cache_prefetch ((__m128i*)pm);
1200
1201     while (w && ((unsigned long) pd & 15))
1202     {
1203         s = combine1 (ps, pm);
1204         d = *pd;
1205
1206         *pd++ = core_combine_xor_u_pixel_sse2 (s, d);
1207         w--;
1208         ps++;
1209         if (pm)
1210             pm++;
1211     }
1212
1213     /* call prefetch hint to optimize cache load*/
1214     cache_prefetch ((__m128i*)ps);
1215     cache_prefetch ((__m128i*)pd);
1216     cache_prefetch ((__m128i*)pm);
1217
1218     while (w >= 4)
1219     {
1220         /* fill cache line with next memory */
1221         cache_prefetch_next ((__m128i*)ps);
1222         cache_prefetch_next ((__m128i*)pd);
1223         cache_prefetch_next ((__m128i*)pm);
1224
1225         xmm_src = combine4 ((__m128i*) ps, (__m128i*) pm);
1226         xmm_dst = load_128_aligned ((__m128i*) pd);
1227
1228         unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
1229         unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
1230
1231         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1232         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1233
1234         negate_2x128 (xmm_alpha_src_lo, xmm_alpha_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi);
1235         negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
1236
1237         pix_add_multiply_2x128 ( &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
1238                                &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
1239                                &xmm_dst_lo, &xmm_dst_hi );
1240
1241         save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1242
1243         ps += 4;
1244         pd += 4;
1245         w -= 4;
1246         if (pm)
1247             pm += 4;
1248     }
1249
1250     while (w)
1251     {
1252         s = combine1 (ps, pm);
1253         d = *pd;
1254
1255         *pd++ = core_combine_xor_u_pixel_sse2 (s, d);
1256         w--;
1257         ps++;
1258         if (pm)
1259             pm++;
1260     }
1261 }
1262
1263 static force_inline void
1264 core_combine_add_u_sse2 (uint32_t* dst, const uint32_t* src, const uint32_t* mask, int width)
1265 {
1266     int w = width;
1267     uint32_t s,d;
1268     uint32_t* pd = dst;
1269     const uint32_t* ps = src;
1270     const uint32_t* pm = mask;
1271
1272     /* call prefetch hint to optimize cache load*/
1273     cache_prefetch ((__m128i*)ps);
1274     cache_prefetch ((__m128i*)pd);
1275     cache_prefetch ((__m128i*)pm);
1276
1277     while (w && (unsigned long)pd & 15)
1278     {
1279         s = combine1 (ps, pm);
1280         d = *pd;
1281         ps++;
1282         if (pm)
1283             pm++;
1284         *pd++ = _mm_cvtsi64_si32 (_mm_adds_pu8 (_mm_cvtsi32_si64 (s), _mm_cvtsi32_si64 (d)));
1285         w--;
1286     }
1287
1288     /* call prefetch hint to optimize cache load*/
1289     cache_prefetch ((__m128i*)ps);
1290     cache_prefetch ((__m128i*)pd);
1291     cache_prefetch ((__m128i*)pm);
1292
1293     while (w >= 4)
1294     {
1295         __m128i s;
1296         
1297         /* fill cache line with next memory */
1298         cache_prefetch_next ((__m128i*)ps);
1299         cache_prefetch_next ((__m128i*)pd);
1300         cache_prefetch_next ((__m128i*)pm);
1301
1302         s = combine4((__m128i*)ps,(__m128i*)pm);
1303         
1304         save_128_aligned( (__m128i*)pd,
1305                         _mm_adds_epu8( s, load_128_aligned  ((__m128i*)pd)) );
1306         pd += 4;
1307         ps += 4;
1308         if (pm)
1309             pm += 4;
1310         w -= 4;
1311     }
1312
1313     while (w--)
1314     {
1315         s = combine1 (ps, pm);
1316         d = *pd;
1317         ps++;
1318         *pd++ = _mm_cvtsi64_si32 (_mm_adds_pu8 (_mm_cvtsi32_si64 (s), _mm_cvtsi32_si64 (d)));
1319         if (pm)
1320             pm++;
1321     }
1322 }
1323
1324 static force_inline uint32_t
1325 core_combine_saturate_u_pixel_sse2 (uint32_t src, uint32_t dst)
1326 {
1327     __m64 ms = unpack_32_1x64 (src);
1328     __m64 md = unpack_32_1x64 (dst);
1329     uint32_t sa = src >> 24;
1330     uint32_t da = ~dst >> 24;
1331
1332     if (sa > da)
1333     {
1334         ms = pix_multiply_1x64 (ms, expand_alpha_1x64 (unpack_32_1x64 (DIV_UN8(da, sa) << 24)));
1335     }
1336
1337     return pack_1x64_32 (_mm_adds_pu16 (md, ms));
1338 }
1339
1340 static force_inline void
1341 core_combine_saturate_u_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
1342 {
1343     uint32_t s,d;
1344
1345     uint32_t pack_cmp;
1346     __m128i xmm_src, xmm_dst;
1347
1348     /* call prefetch hint to optimize cache load*/
1349     cache_prefetch ((__m128i*)ps);
1350     cache_prefetch ((__m128i*)pd);
1351     cache_prefetch ((__m128i*)pm);
1352
1353     while (w && (unsigned long)pd & 15)
1354     {
1355         s = combine1 (ps, pm);
1356         d = *pd;
1357         *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1358         w--;
1359         ps++;
1360         if (pm)
1361             pm++;
1362     }
1363
1364     /* call prefetch hint to optimize cache load*/
1365     cache_prefetch ((__m128i*)ps);
1366     cache_prefetch ((__m128i*)pd);
1367     cache_prefetch ((__m128i*)pm);
1368
1369     while (w >= 4)
1370     {
1371         /* fill cache line with next memory */
1372         cache_prefetch_next ((__m128i*)ps);
1373         cache_prefetch_next ((__m128i*)pd);
1374         cache_prefetch_next ((__m128i*)pm);
1375
1376         xmm_dst = load_128_aligned  ((__m128i*)pd);
1377         xmm_src = combine4 ((__m128i*)ps, (__m128i*)pm);
1378
1379         pack_cmp = _mm_movemask_epi8 (_mm_cmpgt_epi32 (_mm_srli_epi32 (xmm_src, 24),
1380                                                       _mm_srli_epi32 (_mm_xor_si128 (xmm_dst, mask_ff000000), 24)));
1381
1382         /* if some alpha src is grater than respective ~alpha dst */
1383         if (pack_cmp)
1384         {
1385             s = combine1 (ps++, pm);
1386             d = *pd;
1387             *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1388             if (pm)
1389                 pm++;
1390
1391             s = combine1 (ps++, pm);
1392             d = *pd;
1393             *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1394             if (pm)
1395                 pm++;
1396
1397             s = combine1 (ps++, pm);
1398             d = *pd;
1399             *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1400             if (pm)
1401                 pm++;
1402
1403             s = combine1 (ps++, pm);
1404             d = *pd;
1405             *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1406             if (pm)
1407                 pm++;
1408         }
1409         else
1410         {
1411             save_128_aligned ((__m128i*)pd, _mm_adds_epu8 (xmm_dst, xmm_src));
1412
1413             pd += 4;
1414             ps += 4;
1415             if (pm)
1416                 pm += 4;
1417         }
1418
1419         w -= 4;
1420     }
1421
1422     while (w--)
1423     {
1424         s = combine1 (ps, pm);
1425         d = *pd;
1426         *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
1427         ps++;
1428         if (pm)
1429             pm++;
1430     }
1431 }
1432
1433 static force_inline void
1434 core_combine_src_c_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
1435 {
1436     uint32_t s, m;
1437
1438     __m128i xmm_src_lo, xmm_src_hi;
1439     __m128i xmm_mask_lo, xmm_mask_hi;
1440     __m128i xmm_dst_lo, xmm_dst_hi;
1441
1442     /* call prefetch hint to optimize cache load*/
1443     cache_prefetch ((__m128i*)ps);
1444     cache_prefetch ((__m128i*)pd);
1445     cache_prefetch ((__m128i*)pm);
1446
1447     while (w && (unsigned long)pd & 15)
1448     {
1449         s = *ps++;
1450         m = *pm++;
1451         *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)));
1452         w--;
1453     }
1454
1455     /* call prefetch hint to optimize cache load*/
1456     cache_prefetch ((__m128i*)ps);
1457     cache_prefetch ((__m128i*)pd);
1458     cache_prefetch ((__m128i*)pm);
1459
1460     while (w >= 4)
1461     {
1462         /* fill cache line with next memory */
1463         cache_prefetch_next ((__m128i*)ps);
1464         cache_prefetch_next ((__m128i*)pd);
1465         cache_prefetch_next ((__m128i*)pm);
1466
1467         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1468         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1469
1470         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1471         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1472
1473         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
1474
1475         save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1476
1477         ps += 4;
1478         pd += 4;
1479         pm += 4;
1480         w -= 4;
1481     }
1482
1483     while (w)
1484     {
1485         s = *ps++;
1486         m = *pm++;
1487         *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)));
1488         w--;
1489     }
1490 }
1491
1492 static force_inline uint32_t
1493 core_combine_over_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
1494 {
1495     __m64 s = unpack_32_1x64 (src);
1496     __m64 expAlpha = expand_alpha_1x64 (s);
1497     __m64 unpk_mask = unpack_32_1x64 (mask);
1498     __m64 unpk_dst  = unpack_32_1x64 (dst);
1499
1500     return pack_1x64_32 (in_over_1x64 (&s, &expAlpha, &unpk_mask, &unpk_dst));
1501 }
1502
1503 static force_inline void
1504 core_combine_over_c_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
1505 {
1506     uint32_t s, m, d;
1507
1508     __m128i xmm_alpha_lo, xmm_alpha_hi;
1509     __m128i xmm_src_lo, xmm_src_hi;
1510     __m128i xmm_dst_lo, xmm_dst_hi;
1511     __m128i xmm_mask_lo, xmm_mask_hi;
1512
1513     /* call prefetch hint to optimize cache load*/
1514     cache_prefetch ((__m128i*)ps);
1515     cache_prefetch ((__m128i*)pd);
1516     cache_prefetch ((__m128i*)pm);
1517
1518     while (w && (unsigned long)pd & 15)
1519     {
1520         s = *ps++;
1521         m = *pm++;
1522         d = *pd;
1523
1524         *pd++ = core_combine_over_c_pixel_sse2 (s, m, d);
1525         w--;
1526     }
1527
1528     /* call prefetch hint to optimize cache load*/
1529     cache_prefetch ((__m128i*)ps);
1530     cache_prefetch ((__m128i*)pd);
1531     cache_prefetch ((__m128i*)pm);
1532
1533     while (w >= 4)
1534     {
1535         /* fill cache line with next memory */
1536         cache_prefetch_next ((__m128i*)ps);
1537         cache_prefetch_next ((__m128i*)pd);
1538         cache_prefetch_next ((__m128i*)pm);
1539
1540         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1541         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1542         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1543
1544         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1545         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1546         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1547
1548         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
1549
1550         in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
1551
1552         save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1553
1554         ps += 4;
1555         pd += 4;
1556         pm += 4;
1557         w -= 4;
1558     }
1559
1560     while (w)
1561     {
1562         s = *ps++;
1563         m = *pm++;
1564         d = *pd;
1565
1566         *pd++ = core_combine_over_c_pixel_sse2 (s, m, d);
1567         w--;
1568     }
1569 }
1570
1571 static force_inline uint32_t
1572 core_combine_over_reverse_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
1573 {
1574     __m64 d = unpack_32_1x64 (dst);
1575
1576         return pack_1x64_32(over_1x64 (d, expand_alpha_1x64 (d), pix_multiply_1x64 (unpack_32_1x64 (src), unpack_32_1x64 (mask))));
1577 }
1578
1579 static force_inline void
1580 core_combine_over_reverse_c_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
1581 {
1582     uint32_t s, m, d;
1583
1584     __m128i xmm_alpha_lo, xmm_alpha_hi;
1585     __m128i xmm_src_lo, xmm_src_hi;
1586     __m128i xmm_dst_lo, xmm_dst_hi;
1587     __m128i xmm_mask_lo, xmm_mask_hi;
1588
1589     /* call prefetch hint to optimize cache load*/
1590     cache_prefetch ((__m128i*)ps);
1591     cache_prefetch ((__m128i*)pd);
1592     cache_prefetch ((__m128i*)pm);
1593
1594     while (w && (unsigned long)pd & 15)
1595     {
1596         s = *ps++;
1597         m = *pm++;
1598         d = *pd;
1599
1600         *pd++ = core_combine_over_reverse_c_pixel_sse2 (s, m, d);
1601         w--;
1602     }
1603
1604     /* call prefetch hint to optimize cache load*/
1605     cache_prefetch ((__m128i*)ps);
1606     cache_prefetch ((__m128i*)pd);
1607     cache_prefetch ((__m128i*)pm);
1608
1609     while (w >= 4)
1610     {
1611         /* fill cache line with next memory */
1612         cache_prefetch_next ((__m128i*)ps);
1613         cache_prefetch_next ((__m128i*)pd);
1614         cache_prefetch_next ((__m128i*)pm);
1615
1616         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1617         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1618         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1619
1620         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1621         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1622         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1623
1624         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi);
1625         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1626
1627         over_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_mask_lo, &xmm_mask_hi);
1628
1629         save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_mask_lo, xmm_mask_hi));
1630
1631         ps += 4;
1632         pd += 4;
1633         pm += 4;
1634         w -= 4;
1635     }
1636
1637     while (w)
1638     {
1639         s = *ps++;
1640         m = *pm++;
1641         d = *pd;
1642
1643         *pd++ = core_combine_over_reverse_c_pixel_sse2 (s, m, d);
1644         w--;
1645     }
1646 }
1647
1648 static force_inline void
1649 core_combine_in_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
1650 {
1651     uint32_t s, m, d;
1652
1653     __m128i xmm_alpha_lo, xmm_alpha_hi;
1654     __m128i xmm_src_lo, xmm_src_hi;
1655     __m128i xmm_dst_lo, xmm_dst_hi;
1656     __m128i xmm_mask_lo, xmm_mask_hi;
1657
1658     /* call prefetch hint to optimize cache load*/
1659     cache_prefetch ((__m128i*)ps);
1660     cache_prefetch ((__m128i*)pd);
1661     cache_prefetch ((__m128i*)pm);
1662
1663     while (w && (unsigned long)pd & 15)
1664     {
1665         s = *ps++;
1666         m = *pm++;
1667         d = *pd;
1668
1669         *pd++ = pack_1x64_32 (pix_multiply_1x64 (pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)),
1670                                                 expand_alpha_1x64 (unpack_32_1x64 (d))));
1671         w--;
1672     }
1673
1674     /* call prefetch hint to optimize cache load*/
1675     cache_prefetch ((__m128i*)ps);
1676     cache_prefetch ((__m128i*)pd);
1677     cache_prefetch ((__m128i*)pm);
1678
1679     while (w >= 4)
1680     {
1681         /* fill cache line with next memory */
1682         cache_prefetch_next ((__m128i*)ps);
1683         cache_prefetch_next ((__m128i*)pd);
1684         cache_prefetch_next ((__m128i*)pm);
1685
1686         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1687         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1688         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1689
1690         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1691         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1692         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1693
1694         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi);
1695         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
1696
1697         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst_lo, &xmm_dst_hi);
1698
1699         save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1700
1701         ps += 4;
1702         pd += 4;
1703         pm += 4;
1704         w -= 4;
1705     }
1706
1707     while (w)
1708     {
1709         s = *ps++;
1710         m = *pm++;
1711         d = *pd;
1712
1713         *pd++ = pack_1x64_32 (pix_multiply_1x64 (pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)),
1714                                                 expand_alpha_1x64 (unpack_32_1x64 (d))));
1715         w--;
1716     }
1717 }
1718
1719 static force_inline void
1720 core_combine_in_reverse_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
1721 {
1722     uint32_t s, m, d;
1723
1724     __m128i xmm_alpha_lo, xmm_alpha_hi;
1725     __m128i xmm_src_lo, xmm_src_hi;
1726     __m128i xmm_dst_lo, xmm_dst_hi;
1727     __m128i xmm_mask_lo, xmm_mask_hi;
1728
1729     /* call prefetch hint to optimize cache load*/
1730     cache_prefetch ((__m128i*)ps);
1731     cache_prefetch ((__m128i*)pd);
1732     cache_prefetch ((__m128i*)pm);
1733
1734     while (w && (unsigned long)pd & 15)
1735     {
1736         s = *ps++;
1737         m = *pm++;
1738         d = *pd;
1739
1740         *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (d),
1741                                                 pix_multiply_1x64 (unpack_32_1x64 (m),
1742                                                                   expand_alpha_1x64 (unpack_32_1x64 (s)))));
1743         w--;
1744     }
1745
1746     /* call prefetch hint to optimize cache load*/
1747     cache_prefetch ((__m128i*)ps);
1748     cache_prefetch ((__m128i*)pd);
1749     cache_prefetch ((__m128i*)pm);
1750
1751     while (w >= 4)
1752     {
1753         /* fill cache line with next memory */
1754         cache_prefetch_next ((__m128i*)ps);
1755         cache_prefetch_next ((__m128i*)pd);
1756         cache_prefetch_next ((__m128i*)pm);
1757
1758         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1759         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1760         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1761
1762         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1763         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1764         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1765
1766         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
1767         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_alpha_lo, &xmm_alpha_hi);
1768
1769         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst_lo, &xmm_dst_hi);
1770
1771         save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1772
1773         ps += 4;
1774         pd += 4;
1775         pm += 4;
1776         w -= 4;
1777     }
1778
1779     while (w)
1780     {
1781         s = *ps++;
1782         m = *pm++;
1783         d = *pd;
1784
1785         *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (d),
1786                                                 pix_multiply_1x64 (unpack_32_1x64 (m),
1787                                                                   expand_alpha_1x64 (unpack_32_1x64 (s)))));
1788         w--;
1789     }
1790 }
1791
1792 static force_inline void
1793 core_combine_out_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
1794 {
1795     uint32_t s, m, d;
1796
1797     __m128i xmm_alpha_lo, xmm_alpha_hi;
1798     __m128i xmm_src_lo, xmm_src_hi;
1799     __m128i xmm_dst_lo, xmm_dst_hi;
1800     __m128i xmm_mask_lo, xmm_mask_hi;
1801
1802     /* call prefetch hint to optimize cache load*/
1803     cache_prefetch ((__m128i*)ps);
1804     cache_prefetch ((__m128i*)pd);
1805     cache_prefetch ((__m128i*)pm);
1806
1807     while (w && (unsigned long)pd & 15)
1808     {
1809         s = *ps++;
1810         m = *pm++;
1811         d = *pd;
1812
1813         *pd++ = pack_1x64_32 (pix_multiply_1x64 (pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)),
1814                                                 negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d)))));
1815         w--;
1816     }
1817
1818     /* call prefetch hint to optimize cache load*/
1819     cache_prefetch ((__m128i*)ps);
1820     cache_prefetch ((__m128i*)pd);
1821     cache_prefetch ((__m128i*)pm);
1822
1823     while (w >= 4)
1824     {
1825         /* fill cache line with next memory */
1826         cache_prefetch_next ((__m128i*)ps);
1827         cache_prefetch_next ((__m128i*)pd);
1828         cache_prefetch_next ((__m128i*)pm);
1829
1830         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1831         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1832         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1833
1834         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1835         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1836         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1837
1838         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi);
1839         negate_2x128 (xmm_alpha_lo, xmm_alpha_hi, &xmm_alpha_lo, &xmm_alpha_hi);
1840
1841         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
1842         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst_lo, &xmm_dst_hi);
1843
1844         save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1845
1846         ps += 4;
1847         pd += 4;
1848         pm += 4;
1849         w -= 4;
1850     }
1851
1852     while (w)
1853     {
1854         s = *ps++;
1855         m = *pm++;
1856         d = *pd;
1857
1858         *pd++ = pack_1x64_32 (pix_multiply_1x64 (pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)),
1859                                                 negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d)))));
1860         w--;
1861     }
1862 }
1863
1864 static force_inline void
1865 core_combine_out_reverse_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
1866 {
1867     uint32_t s, m, d;
1868
1869     __m128i xmm_alpha_lo, xmm_alpha_hi;
1870     __m128i xmm_src_lo, xmm_src_hi;
1871     __m128i xmm_dst_lo, xmm_dst_hi;
1872     __m128i xmm_mask_lo, xmm_mask_hi;
1873
1874     /* call prefetch hint to optimize cache load*/
1875     cache_prefetch ((__m128i*)ps);
1876     cache_prefetch ((__m128i*)pd);
1877     cache_prefetch ((__m128i*)pm);
1878
1879     while (w && (unsigned long)pd & 15)
1880     {
1881         s = *ps++;
1882         m = *pm++;
1883         d = *pd;
1884
1885         *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (d),
1886                                                 negate_1x64 (pix_multiply_1x64 (unpack_32_1x64 (m),
1887                                                                                expand_alpha_1x64 (unpack_32_1x64 (s))))));
1888         w--;
1889     }
1890
1891     /* call prefetch hint to optimize cache load*/
1892     cache_prefetch ((__m128i*)ps);
1893     cache_prefetch ((__m128i*)pd);
1894     cache_prefetch ((__m128i*)pm);
1895
1896     while (w >= 4)
1897     {
1898         /* fill cache line with next memory */
1899         cache_prefetch_next ((__m128i*)ps);
1900         cache_prefetch_next ((__m128i*)pd);
1901         cache_prefetch_next ((__m128i*)pm);
1902
1903         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1904         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1905         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1906
1907         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1908         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1909         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1910
1911         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
1912
1913         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_mask_lo, &xmm_mask_hi);
1914
1915         negate_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
1916
1917         pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
1918
1919         save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
1920
1921         ps += 4;
1922         pd += 4;
1923         pm += 4;
1924         w -= 4;
1925     }
1926
1927     while (w)
1928     {
1929         s = *ps++;
1930         m = *pm++;
1931         d = *pd;
1932
1933         *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (d),
1934                                                 negate_1x64 (pix_multiply_1x64 (unpack_32_1x64 (m),
1935                                                                                expand_alpha_1x64 (unpack_32_1x64 (s))))));
1936         w--;
1937     }
1938 }
1939
1940 static force_inline uint32_t
1941 core_combine_atop_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
1942 {
1943     __m64 m = unpack_32_1x64 (mask);
1944     __m64 s = unpack_32_1x64 (src);
1945     __m64 d = unpack_32_1x64 (dst);
1946     __m64 sa = expand_alpha_1x64 (s);
1947     __m64 da = expand_alpha_1x64 (d);
1948
1949     s = pix_multiply_1x64 (s, m);
1950     m = negate_1x64 (pix_multiply_1x64 (m, sa));
1951
1952     return pack_1x64_32 (pix_add_multiply_1x64 (&d, &m, &s, &da));
1953 }
1954
1955 static force_inline void
1956 core_combine_atop_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
1957 {
1958     uint32_t s, m, d;
1959
1960     __m128i xmm_src_lo, xmm_src_hi;
1961     __m128i xmm_dst_lo, xmm_dst_hi;
1962     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
1963     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
1964     __m128i xmm_mask_lo, xmm_mask_hi;
1965
1966     /* call prefetch hint to optimize cache load*/
1967     cache_prefetch ((__m128i*)ps);
1968     cache_prefetch ((__m128i*)pd);
1969     cache_prefetch ((__m128i*)pm);
1970
1971     while (w && (unsigned long)pd & 15)
1972     {
1973         s = *ps++;
1974         m = *pm++;
1975         d = *pd;
1976
1977         *pd++ = core_combine_atop_c_pixel_sse2 (s, m, d);
1978         w--;
1979     }
1980
1981     /* call prefetch hint to optimize cache load*/
1982     cache_prefetch ((__m128i*)ps);
1983     cache_prefetch ((__m128i*)pd);
1984     cache_prefetch ((__m128i*)pm);
1985
1986     while (w >= 4)
1987     {
1988         /* fill cache line with next memory */
1989         cache_prefetch_next ((__m128i*)ps);
1990         cache_prefetch_next ((__m128i*)pd);
1991         cache_prefetch_next ((__m128i*)pm);
1992
1993         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
1994         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
1995         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
1996
1997         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
1998         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
1999         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2000
2001         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi);
2002         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2003
2004         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_src_lo, &xmm_src_hi);
2005         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi, &xmm_mask_lo, &xmm_mask_hi);
2006
2007         negate_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2008
2009         pix_add_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
2010                               &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
2011                               &xmm_dst_lo, &xmm_dst_hi);
2012
2013         save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2014
2015         ps += 4;
2016         pd += 4;
2017         pm += 4;
2018         w -= 4;
2019     }
2020
2021     while (w)
2022     {
2023         s = *ps++;
2024         m = *pm++;
2025         d = *pd;
2026
2027         *pd++ = core_combine_atop_c_pixel_sse2 (s, m, d);
2028         w--;
2029     }
2030 }
2031
2032 static force_inline uint32_t
2033 core_combine_reverse_atop_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
2034 {
2035     __m64 m = unpack_32_1x64 (mask);
2036     __m64 s = unpack_32_1x64 (src);
2037     __m64 d = unpack_32_1x64 (dst);
2038
2039     __m64 da = negate_1x64 (expand_alpha_1x64 (d));
2040     __m64 sa = expand_alpha_1x64 (s);
2041
2042     s = pix_multiply_1x64 (s, m);
2043     m = pix_multiply_1x64 (m, sa);
2044
2045     return pack_1x64_32 (pix_add_multiply_1x64 (&d, &m, &s, &da));
2046 }
2047
2048 static force_inline void
2049 core_combine_reverse_atop_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
2050 {
2051     uint32_t s, m, d;
2052
2053     __m128i xmm_src_lo, xmm_src_hi;
2054     __m128i xmm_dst_lo, xmm_dst_hi;
2055     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
2056     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
2057     __m128i xmm_mask_lo, xmm_mask_hi;
2058
2059     /* call prefetch hint to optimize cache load*/
2060     cache_prefetch ((__m128i*)ps);
2061     cache_prefetch ((__m128i*)pd);
2062     cache_prefetch ((__m128i*)pm);
2063
2064     while (w && (unsigned long)pd & 15)
2065     {
2066         s = *ps++;
2067         m = *pm++;
2068         d = *pd;
2069
2070         *pd++ = core_combine_reverse_atop_c_pixel_sse2 (s, m, d);
2071         w--;
2072     }
2073
2074     /* call prefetch hint to optimize cache load*/
2075     cache_prefetch ((__m128i*)ps);
2076     cache_prefetch ((__m128i*)pd);
2077     cache_prefetch ((__m128i*)pm);
2078
2079     while (w >= 4)
2080     {
2081         /* fill cache line with next memory */
2082         cache_prefetch_next ((__m128i*)ps);
2083         cache_prefetch_next ((__m128i*)pd);
2084         cache_prefetch_next ((__m128i*)pm);
2085
2086         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2087         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2088         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2089
2090         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2091         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2092         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2093
2094         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi);
2095         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2096
2097         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_src_lo, &xmm_src_hi);
2098         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi, &xmm_mask_lo, &xmm_mask_hi);
2099
2100         negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2101
2102         pix_add_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
2103                               &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
2104                               &xmm_dst_lo, &xmm_dst_hi);
2105
2106         save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2107
2108         ps += 4;
2109         pd += 4;
2110         pm += 4;
2111         w -= 4;
2112     }
2113
2114     while (w)
2115     {
2116         s = *ps++;
2117         m = *pm++;
2118         d = *pd;
2119
2120         *pd++ = core_combine_reverse_atop_c_pixel_sse2 (s, m, d);
2121         w--;
2122     }
2123 }
2124
2125 static force_inline uint32_t
2126 core_combine_xor_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
2127 {
2128     __m64 a = unpack_32_1x64 (mask);
2129     __m64 s = unpack_32_1x64 (src);
2130     __m64 d = unpack_32_1x64 (dst);
2131
2132     __m64 alpha_dst = negate_1x64 (pix_multiply_1x64 (a, expand_alpha_1x64 (s)));
2133     __m64 dest      = pix_multiply_1x64 (s, a);
2134     __m64 alpha_src = negate_1x64 (expand_alpha_1x64 (d));
2135
2136     return pack_1x64_32 (pix_add_multiply_1x64 (&d,
2137                                               &alpha_dst,
2138                                               &dest,
2139                                               &alpha_src));
2140 }
2141
2142 static force_inline void
2143 core_combine_xor_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
2144 {
2145     uint32_t s, m, d;
2146
2147     __m128i xmm_src_lo, xmm_src_hi;
2148     __m128i xmm_dst_lo, xmm_dst_hi;
2149     __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
2150     __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
2151     __m128i xmm_mask_lo, xmm_mask_hi;
2152
2153     /* call prefetch hint to optimize cache load*/
2154     cache_prefetch ((__m128i*)ps);
2155     cache_prefetch ((__m128i*)pd);
2156     cache_prefetch ((__m128i*)pm);
2157
2158     while (w && (unsigned long)pd & 15)
2159     {
2160         s = *ps++;
2161         m = *pm++;
2162         d = *pd;
2163
2164         *pd++ = core_combine_xor_c_pixel_sse2 (s, m, d);
2165         w--;
2166     }
2167
2168     /* call prefetch hint to optimize cache load*/
2169     cache_prefetch ((__m128i*)ps);
2170     cache_prefetch ((__m128i*)pd);
2171     cache_prefetch ((__m128i*)pm);
2172
2173     while (w >= 4)
2174     {
2175         /* fill cache line with next memory */
2176         cache_prefetch_next ((__m128i*)ps);
2177         cache_prefetch_next ((__m128i*)pd);
2178         cache_prefetch_next ((__m128i*)pm);
2179
2180         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2181         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2182         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2183
2184         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2185         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2186         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2187
2188         expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi);
2189         expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2190
2191         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_src_lo, &xmm_src_hi);
2192         pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi, &xmm_mask_lo, &xmm_mask_hi);
2193
2194         negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
2195         negate_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2196
2197         pix_add_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
2198                               &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
2199                               &xmm_dst_lo, &xmm_dst_hi);
2200
2201         save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2202
2203         ps += 4;
2204         pd += 4;
2205         pm += 4;
2206         w -= 4;
2207     }
2208
2209     while (w)
2210     {
2211         s = *ps++;
2212         m = *pm++;
2213         d = *pd;
2214
2215         *pd++ = core_combine_xor_c_pixel_sse2 (s, m, d);
2216         w--;
2217     }
2218 }
2219
2220 static force_inline void
2221 core_combine_add_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
2222 {
2223     uint32_t s, m, d;
2224
2225     __m128i xmm_src_lo, xmm_src_hi;
2226     __m128i xmm_dst_lo, xmm_dst_hi;
2227     __m128i xmm_mask_lo, xmm_mask_hi;
2228
2229     /* call prefetch hint to optimize cache load*/
2230     cache_prefetch ((__m128i*)ps);
2231     cache_prefetch ((__m128i*)pd);
2232     cache_prefetch ((__m128i*)pm);
2233
2234     while (w && (unsigned long)pd & 15)
2235     {
2236         s = *ps++;
2237         m = *pm++;
2238         d = *pd;
2239
2240         *pd++ = pack_1x64_32 (_mm_adds_pu8 (pix_multiply_1x64 (unpack_32_1x64 (s),
2241                                                               unpack_32_1x64 (m)),
2242                                             unpack_32_1x64 (d)));
2243         w--;
2244     }
2245
2246     /* call prefetch hint to optimize cache load*/
2247     cache_prefetch ((__m128i*)ps);
2248     cache_prefetch ((__m128i*)pd);
2249     cache_prefetch ((__m128i*)pm);
2250
2251     while (w >= 4)
2252     {
2253         /* fill cache line with next memory */
2254         cache_prefetch_next ((__m128i*)ps);
2255         cache_prefetch_next ((__m128i*)pd);
2256         cache_prefetch_next ((__m128i*)pm);
2257
2258         xmm_src_hi = load_128_unaligned ((__m128i*)ps);
2259         xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
2260         xmm_dst_hi = load_128_aligned ((__m128i*)pd);
2261
2262         unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
2263         unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
2264         unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
2265
2266         pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_src_lo, &xmm_src_hi);
2267
2268         save_128_aligned( (__m128i*)pd, pack_2x128_128 (_mm_adds_epu8 (xmm_src_lo, xmm_dst_lo),
2269                                                       _mm_adds_epu8 (xmm_src_hi, xmm_dst_hi)));
2270
2271         ps += 4;
2272         pd += 4;
2273         pm += 4;
2274         w -= 4;
2275     }
2276
2277     while (w)
2278     {
2279         s = *ps++;
2280         m = *pm++;
2281         d = *pd;
2282
2283         *pd++ = pack_1x64_32 (_mm_adds_pu8 (pix_multiply_1x64 (unpack_32_1x64 (s),
2284                                                               unpack_32_1x64 (m)),
2285                                             unpack_32_1x64 (d)));
2286         w--;
2287     }
2288 }
2289
2290 /* -------------------------------------------------------------------------------------------------
2291  * fb_compose_setup_sSE2
2292  */
2293 static force_inline __m64
2294 create_mask_16_64 (uint16_t mask)
2295 {
2296     return _mm_set1_pi16 (mask);
2297 }
2298
2299 static force_inline __m128i
2300 create_mask_16_128 (uint16_t mask)
2301 {
2302     return _mm_set1_epi16 (mask);
2303 }
2304
2305 static force_inline __m64
2306 create_mask_2x32_64 (uint32_t mask0, uint32_t mask1)
2307 {
2308     return _mm_set_pi32 (mask0, mask1);
2309 }
2310
2311 static force_inline __m128i
2312 create_mask_2x32_128 (uint32_t mask0, uint32_t mask1)
2313 {
2314     return _mm_set_epi32 (mask0, mask1, mask0, mask1);
2315 }
2316
2317 /* SSE2 code patch for fbcompose.c */
2318
2319 static void
2320 sse2combine_over_u (pixman_implementation_t *imp, pixman_op_t op,
2321                   uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2322 {
2323     core_combine_over_u_sse2 (dst, src, mask, width);
2324     _mm_empty();
2325 }
2326
2327 static void
2328 sse2combine_over_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
2329                          uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2330 {
2331     core_combine_over_reverse_u_sse2 (dst, src, mask, width);
2332     _mm_empty();
2333 }
2334
2335 static void
2336 sse2combine_in_u (pixman_implementation_t *imp, pixman_op_t op,
2337                 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2338 {
2339     core_combine_in_u_sse2 (dst, src, mask, width);
2340     _mm_empty();
2341 }
2342
2343 static void
2344 sse2combine_in_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
2345                        uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2346 {
2347     core_combine_reverse_in_u_sse2 (dst, src, mask, width);
2348     _mm_empty();
2349 }
2350
2351 static void
2352 sse2combine_out_u (pixman_implementation_t *imp, pixman_op_t op,
2353                  uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2354 {
2355     core_combine_out_u_sse2 (dst, src, mask, width);
2356     _mm_empty();
2357 }
2358
2359 static void
2360 sse2combine_out_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
2361                         uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2362 {
2363     core_combine_reverse_out_u_sse2 (dst, src, mask, width);
2364     _mm_empty();
2365 }
2366
2367 static void
2368 sse2combine_atop_u (pixman_implementation_t *imp, pixman_op_t op,
2369                   uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2370 {
2371     core_combine_atop_u_sse2 (dst, src, mask, width);
2372     _mm_empty();
2373 }
2374
2375 static void
2376 sse2combine_atop_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
2377                          uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2378 {
2379     core_combine_reverse_atop_u_sse2 (dst, src, mask, width);
2380     _mm_empty();
2381 }
2382
2383 static void
2384 sse2combine_xor_u (pixman_implementation_t *imp, pixman_op_t op,
2385                  uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2386 {
2387     core_combine_xor_u_sse2 (dst, src, mask, width);
2388     _mm_empty();
2389 }
2390
2391 static void
2392 sse2combine_add_u (pixman_implementation_t *imp, pixman_op_t op,
2393                  uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2394 {
2395     core_combine_add_u_sse2 (dst, src, mask, width);
2396     _mm_empty();
2397 }
2398
2399 static void
2400 sse2combine_saturate_u (pixman_implementation_t *imp, pixman_op_t op,
2401                       uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2402 {
2403     core_combine_saturate_u_sse2 (dst, src, mask, width);
2404     _mm_empty();
2405 }
2406
2407 static void
2408 sse2combine_src_c (pixman_implementation_t *imp, pixman_op_t op,
2409                  uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2410 {
2411     core_combine_src_c_sse2 (dst, src, mask, width);
2412     _mm_empty();
2413 }
2414
2415 static void
2416 sse2combine_over_c (pixman_implementation_t *imp, pixman_op_t op,
2417                   uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2418 {
2419     core_combine_over_c_sse2 (dst, src, mask, width);
2420     _mm_empty();
2421 }
2422
2423 static void
2424 sse2combine_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
2425                          uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2426 {
2427     core_combine_over_reverse_c_sse2 (dst, src, mask, width);
2428     _mm_empty();
2429 }
2430
2431 static void
2432 sse2combine_in_c (pixman_implementation_t *imp, pixman_op_t op,
2433                 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2434 {
2435     core_combine_in_c_sse2 (dst, src, mask, width);
2436     _mm_empty();
2437 }
2438
2439 static void
2440 sse2combine_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
2441                        uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2442 {
2443     core_combine_in_reverse_c_sse2 (dst, src, mask, width);
2444     _mm_empty();
2445 }
2446
2447 static void
2448 sse2combine_out_c (pixman_implementation_t *imp, pixman_op_t op,
2449                  uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2450 {
2451     core_combine_out_c_sse2 (dst, src, mask, width);
2452     _mm_empty();
2453 }
2454
2455 static void
2456 sse2combine_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
2457                         uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2458 {
2459     core_combine_out_reverse_c_sse2 (dst, src, mask, width);
2460     _mm_empty();
2461 }
2462
2463 static void
2464 sse2combine_atop_c (pixman_implementation_t *imp, pixman_op_t op,
2465                   uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2466 {
2467     core_combine_atop_c_sse2 (dst, src, mask, width);
2468     _mm_empty();
2469 }
2470
2471 static void
2472 sse2combine_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
2473                          uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2474 {
2475     core_combine_reverse_atop_c_sse2 (dst, src, mask, width);
2476     _mm_empty();
2477 }
2478
2479 static void
2480 sse2combine_xor_c (pixman_implementation_t *imp, pixman_op_t op,
2481                  uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2482 {
2483     core_combine_xor_c_sse2 (dst, src, mask, width);
2484     _mm_empty();
2485 }
2486
2487 static void
2488 sse2combine_add_c (pixman_implementation_t *imp, pixman_op_t op,
2489                  uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
2490 {
2491     core_combine_add_c_sse2 (dst, src, mask, width);
2492     _mm_empty();
2493 }
2494
2495 /* -------------------------------------------------------------------------------------------------
2496  * fast_composite_over_n_8888
2497  */
2498
2499 static void
2500 sse2_composite_over_n_8888 (pixman_implementation_t *imp,
2501                              pixman_op_t op,
2502                             pixman_image_t * src_image,
2503                             pixman_image_t * mask_image,
2504                             pixman_image_t * dst_image,
2505                             int32_t     src_x,
2506                             int32_t     src_y,
2507                             int32_t     mask_x,
2508                             int32_t     mask_y,
2509                             int32_t     dest_x,
2510                             int32_t     dest_y,
2511                             int32_t     width,
2512                             int32_t     height)
2513 {
2514     uint32_t    src;
2515     uint32_t    *dst_line, *dst, d;
2516     uint16_t    w;
2517     int dst_stride;
2518     __m128i xmm_src, xmm_alpha;
2519     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
2520
2521     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
2522
2523     if (src == 0)
2524         return;
2525
2526     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
2527
2528     xmm_src = expand_pixel_32_1x128 (src);
2529     xmm_alpha = expand_alpha_1x128 (xmm_src);
2530
2531     while (height--)
2532     {
2533         dst = dst_line;
2534
2535         /* call prefetch hint to optimize cache load*/
2536         cache_prefetch ((__m128i*)dst);
2537
2538         dst_line += dst_stride;
2539         w = width;
2540
2541         while (w && (unsigned long)dst & 15)
2542         {
2543             d = *dst;
2544             *dst++ = pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
2545                                               _mm_movepi64_pi64 (xmm_alpha),
2546                                               unpack_32_1x64 (d)));
2547             w--;
2548         }
2549
2550         cache_prefetch ((__m128i*)dst);
2551
2552         while (w >= 4)
2553         {
2554             /* fill cache line with next memory */
2555             cache_prefetch_next ((__m128i*)dst);
2556
2557             xmm_dst = load_128_aligned ((__m128i*)dst);
2558
2559             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
2560
2561             over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_dst_lo, &xmm_dst_hi);
2562
2563             /* rebuid the 4 pixel data and save*/
2564             save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2565
2566             w -= 4;
2567             dst += 4;
2568         }
2569
2570         while (w)
2571         {
2572             d = *dst;
2573             *dst++ = pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
2574                                               _mm_movepi64_pi64 (xmm_alpha),
2575                                               unpack_32_1x64 (d)));
2576             w--;
2577         }
2578
2579     }
2580     _mm_empty();
2581 }
2582
2583 /* -------------------------------------------------------------------------------------------------
2584  * fast_composite_over_n_0565
2585  */
2586 static void
2587 sse2_composite_over_n_0565 (pixman_implementation_t *imp,
2588                              pixman_op_t op,
2589                             pixman_image_t * src_image,
2590                             pixman_image_t * mask_image,
2591                             pixman_image_t * dst_image,
2592                             int32_t     src_x,
2593                             int32_t     src_y,
2594                             int32_t     mask_x,
2595                             int32_t     mask_y,
2596                             int32_t     dest_x,
2597                             int32_t     dest_y,
2598                             int32_t     width,
2599                             int32_t     height)
2600 {
2601     uint32_t    src;
2602     uint16_t    *dst_line, *dst, d;
2603     uint16_t    w;
2604     int         dst_stride;
2605     __m128i xmm_src, xmm_alpha;
2606     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
2607
2608     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
2609
2610     if (src == 0)
2611         return;
2612
2613     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
2614
2615     xmm_src = expand_pixel_32_1x128 (src);
2616     xmm_alpha = expand_alpha_1x128 (xmm_src);
2617
2618     while (height--)
2619     {
2620         dst = dst_line;
2621
2622         /* call prefetch hint to optimize cache load*/
2623         cache_prefetch ((__m128i*)dst);
2624
2625         dst_line += dst_stride;
2626         w = width;
2627
2628         while (w && (unsigned long)dst & 15)
2629         {
2630             d = *dst;
2631
2632             *dst++ = pack_565_32_16 (pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
2633                                                              _mm_movepi64_pi64 (xmm_alpha),
2634                                                              expand565_16_1x64 (d))));
2635             w--;
2636         }
2637
2638         /* call prefetch hint to optimize cache load*/
2639         cache_prefetch ((__m128i*)dst);
2640
2641         while (w >= 8)
2642         {
2643             /* fill cache line with next memory */
2644             cache_prefetch_next ((__m128i*)dst);
2645
2646             xmm_dst = load_128_aligned ((__m128i*)dst);
2647             
2648             unpack_565_128_4x128 (xmm_dst, &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
2649             
2650             over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_dst0, &xmm_dst1);
2651             over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_dst2, &xmm_dst3);
2652
2653             xmm_dst = pack_565_4x128_128 (&xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
2654             save_128_aligned ((__m128i*)dst, xmm_dst);
2655
2656             dst += 8;
2657             w -= 8;
2658         }
2659
2660         while (w--)
2661         {
2662             d = *dst;
2663             *dst++ = pack_565_32_16 (pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
2664                                                              _mm_movepi64_pi64 (xmm_alpha),
2665                                                              expand565_16_1x64 (d))));
2666         }
2667     }
2668
2669     _mm_empty();
2670 }
2671
2672 /* -------------------------------------------------------------------------------------------------
2673  * fast_composite_over_n_8888_8888_ca
2674  */
2675
2676 static void
2677 sse2_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
2678                                        pixman_op_t op,
2679                                       pixman_image_t * src_image,
2680                                       pixman_image_t * mask_image,
2681                                       pixman_image_t * dst_image,
2682                                       int32_t   src_x,
2683                                       int32_t   src_y,
2684                                       int32_t   mask_x,
2685                                       int32_t   mask_y,
2686                                       int32_t   dest_x,
2687                                       int32_t   dest_y,
2688                                       int32_t   width,
2689                                       int32_t   height)
2690 {
2691     uint32_t    src;
2692     uint32_t    *dst_line, d;
2693     uint32_t    *mask_line, m;
2694     uint32_t    pack_cmp;
2695     int dst_stride, mask_stride;
2696
2697     __m128i xmm_src, xmm_alpha;
2698     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
2699     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
2700
2701     __m64 mmsrc_x, mmx_alpha, mmmask_x, mmdest_x;
2702
2703     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
2704
2705     if (src == 0)
2706         return;
2707
2708     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
2709     PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
2710
2711     xmm_src = _mm_unpacklo_epi8 (create_mask_2x32_128 (src, src), _mm_setzero_si128 ());
2712     xmm_alpha = expand_alpha_1x128 (xmm_src);
2713     mmsrc_x   = _mm_movepi64_pi64 (xmm_src);
2714     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
2715
2716     while (height--)
2717     {
2718         int w = width;
2719         const uint32_t *pm = (uint32_t *)mask_line;
2720         uint32_t *pd = (uint32_t *)dst_line;
2721
2722         dst_line += dst_stride;
2723         mask_line += mask_stride;
2724
2725         /* call prefetch hint to optimize cache load*/
2726         cache_prefetch ((__m128i*)pd);
2727         cache_prefetch ((__m128i*)pm);
2728
2729         while (w && (unsigned long)pd & 15)
2730         {
2731             m = *pm++;
2732
2733             if (m)
2734             {
2735                 d = *pd;
2736                 mmmask_x = unpack_32_1x64 (m);
2737                 mmdest_x = unpack_32_1x64 (d);
2738
2739                 *pd = pack_1x64_32 (in_over_1x64 (&mmsrc_x,
2740                                                  &mmx_alpha,
2741                                                  &mmmask_x,
2742                                                  &mmdest_x));
2743             }
2744
2745             pd++;
2746             w--;
2747         }
2748
2749         /* call prefetch hint to optimize cache load*/
2750         cache_prefetch ((__m128i*)pd);
2751         cache_prefetch ((__m128i*)pm);
2752
2753         while (w >= 4)
2754         {
2755             /* fill cache line with next memory */
2756             cache_prefetch_next ((__m128i*)pd);
2757             cache_prefetch_next ((__m128i*)pm);
2758
2759             xmm_mask = load_128_unaligned ((__m128i*)pm);
2760
2761             pack_cmp = _mm_movemask_epi8 (_mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128()));
2762
2763             /* if all bits in mask are zero, pack_cmp are equal to 0xffff */
2764             if (pack_cmp != 0xffff)
2765             {
2766                 xmm_dst = load_128_aligned ((__m128i*)pd);
2767
2768                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
2769                 unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
2770
2771                 in_over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
2772
2773                 save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2774             }
2775
2776             pd += 4;
2777             pm += 4;
2778             w -= 4;
2779         }
2780
2781         while (w)
2782         {
2783             m = *pm++;
2784
2785             if (m)
2786             {
2787                 d = *pd;
2788                 mmmask_x = unpack_32_1x64 (m);
2789                 mmdest_x = unpack_32_1x64 (d);
2790
2791                 *pd = pack_1x64_32 (in_over_1x64 (&mmsrc_x,
2792                                                  &mmx_alpha,
2793                                                  &mmmask_x,
2794                                                  &mmdest_x));
2795             }
2796
2797             pd++;
2798             w--;
2799         }
2800     }
2801
2802     _mm_empty();
2803 }
2804
2805
2806 /* -------------------------------------------------------------------------------------------------
2807  * fast_composite_over_8888_n_8888
2808  */
2809
2810 static void
2811 sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
2812                                 pixman_op_t op,
2813                                pixman_image_t * src_image,
2814                                pixman_image_t * mask_image,
2815                                pixman_image_t * dst_image,
2816                                int32_t  src_x,
2817                                int32_t  src_y,
2818                                int32_t      mask_x,
2819                                int32_t      mask_y,
2820                                int32_t      dest_x,
2821                                int32_t      dest_y,
2822                                int32_t     width,
2823                                int32_t     height)
2824 {
2825     uint32_t    *dst_line, *dst;
2826     uint32_t    *src_line, *src;
2827     uint32_t    mask;
2828     uint16_t    w;
2829     int dst_stride, src_stride;
2830
2831     __m128i xmm_mask;
2832     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
2833     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
2834     __m128i xmm_alpha_lo, xmm_alpha_hi;
2835
2836     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
2837     PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
2838     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
2839
2840     xmm_mask = create_mask_16_128 (mask >> 24);
2841
2842     while (height--)
2843     {
2844         dst = dst_line;
2845         dst_line += dst_stride;
2846         src = src_line;
2847         src_line += src_stride;
2848         w = width;
2849
2850         /* call prefetch hint to optimize cache load*/
2851         cache_prefetch ((__m128i*)dst);
2852         cache_prefetch ((__m128i*)src);
2853
2854         while (w && (unsigned long)dst & 15)
2855         {
2856             uint32_t s = *src++;
2857             uint32_t d = *dst;
2858
2859             __m64 ms = unpack_32_1x64 (s);
2860             __m64 alpha    = expand_alpha_1x64 (ms);
2861             __m64 dest     = _mm_movepi64_pi64 (xmm_mask);
2862             __m64 alpha_dst = unpack_32_1x64 (d);
2863
2864             *dst++ = pack_1x64_32 (in_over_1x64 (&ms,
2865                                                 &alpha,
2866                                                 &dest,
2867                                                 &alpha_dst));
2868
2869             w--;
2870         }
2871
2872         /* call prefetch hint to optimize cache load*/
2873         cache_prefetch ((__m128i*)dst);
2874         cache_prefetch ((__m128i*)src);
2875
2876         while (w >= 4)
2877         {
2878             /* fill cache line with next memory */
2879             cache_prefetch_next ((__m128i*)dst);
2880             cache_prefetch_next ((__m128i*)src);
2881
2882             xmm_src = load_128_unaligned ((__m128i*)src);
2883             xmm_dst = load_128_aligned ((__m128i*)dst);
2884
2885             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
2886             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
2887             expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
2888
2889             in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_mask, &xmm_mask, &xmm_dst_lo, &xmm_dst_hi);
2890
2891             save_128_aligned( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
2892
2893             dst += 4;
2894             src += 4;
2895             w -= 4;
2896         }
2897
2898         while (w)
2899         {
2900             uint32_t s = *src++;
2901             uint32_t d = *dst;
2902
2903             __m64 ms = unpack_32_1x64 (s);
2904             __m64 alpha = expand_alpha_1x64 (ms);
2905             __m64 mask  = _mm_movepi64_pi64 (xmm_mask);
2906             __m64 dest  = unpack_32_1x64 (d);
2907
2908             *dst++ = pack_1x64_32 (in_over_1x64 (&ms,
2909                                                 &alpha,
2910                                                 &mask,
2911                                                 &dest));
2912
2913             w--;
2914         }
2915     }
2916
2917     _mm_empty();
2918 }
2919
2920 /* -------------------------------------------------------------------------------------------------
2921  * fast_composite_over_x888_n_8888
2922  */
2923 static void
2924 sse2_composite_over_x888_n_8888 (pixman_implementation_t *imp,
2925                                 pixman_op_t op,
2926                                pixman_image_t * src_image,
2927                                pixman_image_t * mask_image,
2928                                pixman_image_t * dst_image,
2929                                int32_t  src_x,
2930                                int32_t  src_y,
2931                                int32_t      mask_x,
2932                                int32_t      mask_y,
2933                                int32_t      dest_x,
2934                                int32_t      dest_y,
2935                                int32_t     width,
2936                                int32_t     height)
2937 {
2938     uint32_t    *dst_line, *dst;
2939     uint32_t    *src_line, *src;
2940     uint32_t    mask;
2941     int dst_stride, src_stride;
2942     uint16_t    w;
2943
2944     __m128i xmm_mask, xmm_alpha;
2945     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
2946     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
2947
2948     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
2949     PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
2950     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
2951
2952     xmm_mask = create_mask_16_128 (mask >> 24);
2953     xmm_alpha = mask_00ff;
2954
2955     while (height--)
2956     {
2957         dst = dst_line;
2958         dst_line += dst_stride;
2959         src = src_line;
2960         src_line += src_stride;
2961         w = width;
2962
2963         /* call prefetch hint to optimize cache load*/
2964         cache_prefetch ((__m128i*)dst);
2965         cache_prefetch ((__m128i*)src);
2966
2967         while (w && (unsigned long)dst & 15)
2968         {
2969             uint32_t s = (*src++) | 0xff000000;
2970             uint32_t d = *dst;
2971
2972             __m64 src   = unpack_32_1x64 (s);
2973             __m64 alpha = _mm_movepi64_pi64 (xmm_alpha);
2974             __m64 mask  = _mm_movepi64_pi64 (xmm_mask);
2975             __m64 dest  = unpack_32_1x64 (d);
2976
2977             *dst++ = pack_1x64_32 (in_over_1x64 (&src,
2978                                                 &alpha,
2979                                                 &mask,
2980                                                 &dest));
2981
2982             w--;
2983         }
2984
2985         /* call prefetch hint to optimize cache load*/
2986         cache_prefetch ((__m128i*)dst);
2987         cache_prefetch ((__m128i*)src);
2988
2989         while (w >= 4)
2990         {
2991             /* fill cache line with next memory */
2992             cache_prefetch_next ((__m128i*)dst);
2993             cache_prefetch_next ((__m128i*)src);
2994
2995             xmm_src = _mm_or_si128 (load_128_unaligned ((__m128i*)src), mask_ff000000);
2996             xmm_dst = load_128_aligned ((__m128i*)dst);
2997
2998             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
2999             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
3000
3001             in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha, &xmm_alpha, &xmm_mask, &xmm_mask, &xmm_dst_lo, &xmm_dst_hi);
3002
3003             save_128_aligned( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
3004
3005             dst += 4;
3006             src += 4;
3007             w -= 4;
3008
3009         }
3010
3011         while (w)
3012         {
3013             uint32_t s = (*src++) | 0xff000000;
3014             uint32_t d = *dst;
3015
3016             __m64 src  = unpack_32_1x64 (s);
3017             __m64 alpha = _mm_movepi64_pi64 (xmm_alpha);
3018             __m64 mask  = _mm_movepi64_pi64 (xmm_mask);
3019             __m64 dest  = unpack_32_1x64 (d);
3020
3021             *dst++ = pack_1x64_32 (in_over_1x64 (&src,
3022                                                 &alpha,
3023                                                 &mask,
3024                                                 &dest));
3025
3026             w--;
3027         }
3028     }
3029
3030     _mm_empty();
3031 }
3032
3033 /* -------------------------------------------------------------------------------------------------
3034  * fast_composite_over_8888_8888
3035  */
3036 static void
3037 sse2_composite_over_8888_8888 (pixman_implementation_t *imp,
3038                               pixman_op_t op,
3039                              pixman_image_t * src_image,
3040                              pixman_image_t * mask_image,
3041                              pixman_image_t * dst_image,
3042                              int32_t    src_x,
3043                              int32_t    src_y,
3044                              int32_t      mask_x,
3045                              int32_t      mask_y,
3046                              int32_t      dest_x,
3047                              int32_t      dest_y,
3048                              int32_t     width,
3049                              int32_t     height)
3050 {
3051     int         dst_stride, src_stride;
3052     uint32_t    *dst_line, *dst;
3053     uint32_t    *src_line, *src;
3054
3055     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3056     PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3057
3058     dst = dst_line;
3059     src = src_line;
3060
3061     while (height--)
3062     {
3063         core_combine_over_u_sse2 (dst, src, NULL, width);
3064
3065         dst += dst_stride;
3066         src += src_stride;
3067     }
3068     _mm_empty();
3069 }
3070
3071 /* -------------------------------------------------------------------------------------------------
3072  * fast_composite_over_8888_0565
3073  */
3074 static force_inline uint16_t
3075 fast_composite_over_8888_0565pixel (uint32_t src, uint16_t dst)
3076 {
3077     __m64       ms;
3078
3079     ms = unpack_32_1x64 (src);
3080     return pack_565_32_16( pack_1x64_32 (over_1x64 (ms,
3081                                                    expand_alpha_1x64 (ms),
3082                                                    expand565_16_1x64 (dst))));
3083 }
3084
3085 static void
3086 sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
3087                               pixman_op_t op,
3088                              pixman_image_t * src_image,
3089                              pixman_image_t * mask_image,
3090                              pixman_image_t * dst_image,
3091                              int32_t      src_x,
3092                              int32_t      src_y,
3093                              int32_t      mask_x,
3094                              int32_t      mask_y,
3095                              int32_t      dest_x,
3096                              int32_t      dest_y,
3097                              int32_t     width,
3098                              int32_t     height)
3099 {
3100     uint16_t    *dst_line, *dst, d;
3101     uint32_t    *src_line, *src, s;
3102     int dst_stride, src_stride;
3103     uint16_t    w;
3104
3105     __m128i xmm_alpha_lo, xmm_alpha_hi;
3106     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
3107     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
3108
3109     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
3110     PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3111
3112 #if 0
3113     /* FIXME
3114      *
3115      * I copy the code from MMX one and keep the fixme.
3116      * If it's a problem there, probably is a problem here.
3117      */
3118     assert (src_image->drawable == mask_image->drawable);
3119 #endif
3120
3121     while (height--)
3122     {
3123         dst = dst_line;
3124         src = src_line;
3125
3126         /* call prefetch hint to optimize cache load*/
3127         cache_prefetch ((__m128i*)src);
3128         cache_prefetch ((__m128i*)dst);
3129
3130         dst_line += dst_stride;
3131         src_line += src_stride;
3132         w = width;
3133
3134         /* Align dst on a 16-byte boundary */
3135         while (w &&
3136                ((unsigned long)dst & 15))
3137         {
3138             s = *src++;
3139             d = *dst;
3140
3141             *dst++ = fast_composite_over_8888_0565pixel (s, d);
3142             w--;
3143         }
3144
3145         /* call prefetch hint to optimize cache load*/
3146         cache_prefetch ((__m128i*)src);
3147         cache_prefetch ((__m128i*)dst);
3148
3149         /* It's a 8 pixel loop */
3150         while (w >= 8)
3151         {
3152             /* fill cache line with next memory */
3153             cache_prefetch_next ((__m128i*)src);
3154             cache_prefetch_next ((__m128i*)dst);
3155
3156             /* I'm loading unaligned because I'm not sure about the address alignment. */
3157             xmm_src = load_128_unaligned ((__m128i*) src);
3158             xmm_dst = load_128_aligned ((__m128i*) dst);
3159
3160             /* Unpacking */
3161             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
3162             unpack_565_128_4x128 (xmm_dst, &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
3163             expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
3164
3165             /* I'm loading next 4 pixels from memory before to optimze the memory read. */
3166             xmm_src = load_128_unaligned ((__m128i*) (src+4));
3167
3168             over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst0, &xmm_dst1);
3169
3170             /* Unpacking */
3171             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
3172             expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
3173
3174             over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst2, &xmm_dst3);
3175
3176             save_128_aligned ((__m128i*)dst, pack_565_4x128_128 (&xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
3177
3178             w -= 8;
3179             dst += 8;
3180             src += 8;
3181         }
3182
3183         while (w--)
3184         {
3185             s = *src++;
3186             d = *dst;
3187
3188             *dst++ = fast_composite_over_8888_0565pixel (s, d);
3189         }
3190     }
3191
3192     _mm_empty();
3193 }
3194
3195 /* -------------------------------------------------------------------------------------------------
3196  * fast_composite_over_n_8_8888
3197  */
3198
3199 static void
3200 sse2_composite_over_n_8_8888 (pixman_implementation_t *imp,
3201                                    pixman_op_t op,
3202                                   pixman_image_t * src_image,
3203                                   pixman_image_t * mask_image,
3204                                   pixman_image_t * dst_image,
3205                                   int32_t      src_x,
3206                                   int32_t      src_y,
3207                                   int32_t      mask_x,
3208                                   int32_t      mask_y,
3209                                   int32_t      dest_x,
3210                                   int32_t      dest_y,
3211                                   int32_t     width,
3212                                   int32_t     height)
3213 {
3214     uint32_t    src, srca;
3215     uint32_t    *dst_line, *dst;
3216     uint8_t     *mask_line, *mask;
3217     int dst_stride, mask_stride;
3218     uint16_t    w;
3219     uint32_t m, d;
3220
3221     __m128i xmm_src, xmm_alpha, xmm_def;
3222     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
3223     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
3224
3225     __m64 mmsrc_x, mmx_alpha, mmmask_x, mmx_dest;
3226
3227     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
3228
3229     srca = src >> 24;
3230     if (src == 0)
3231         return;
3232
3233     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3234     PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
3235
3236     xmm_def = create_mask_2x32_128 (src, src);
3237     xmm_src = expand_pixel_32_1x128 (src);
3238     xmm_alpha = expand_alpha_1x128 (xmm_src);
3239     mmsrc_x   = _mm_movepi64_pi64 (xmm_src);
3240     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
3241
3242     while (height--)
3243     {
3244         dst = dst_line;
3245         dst_line += dst_stride;
3246         mask = mask_line;
3247         mask_line += mask_stride;
3248         w = width;
3249
3250         /* call prefetch hint to optimize cache load*/
3251         cache_prefetch ((__m128i*)mask);
3252         cache_prefetch ((__m128i*)dst);
3253
3254         while (w && (unsigned long)dst & 15)
3255         {
3256             uint8_t m = *mask++;
3257
3258             if (m)
3259             {
3260                 d = *dst;
3261                 mmmask_x = expand_pixel_8_1x64 (m);
3262                 mmx_dest = unpack_32_1x64 (d);
3263
3264                 *dst = pack_1x64_32 (in_over_1x64 (&mmsrc_x,
3265                                                   &mmx_alpha,
3266                                                   &mmmask_x,
3267                                                   &mmx_dest));
3268             }
3269
3270             w--;
3271             dst++;
3272         }
3273
3274         /* call prefetch hint to optimize cache load*/
3275         cache_prefetch ((__m128i*)mask);
3276         cache_prefetch ((__m128i*)dst);
3277
3278         while (w >= 4)
3279         {
3280             /* fill cache line with next memory */
3281             cache_prefetch_next ((__m128i*)mask);
3282             cache_prefetch_next ((__m128i*)dst);
3283
3284             m = *((uint32_t*)mask);
3285
3286             if (srca == 0xff && m == 0xffffffff)
3287             {
3288                 save_128_aligned ((__m128i*)dst, xmm_def);
3289             }
3290             else if (m)
3291             {
3292                 xmm_dst = load_128_aligned ((__m128i*) dst);
3293                 xmm_mask = unpack_32_1x128 (m);
3294                 xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128());
3295
3296                 /* Unpacking */
3297                 unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
3298                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
3299
3300                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
3301
3302                 in_over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
3303
3304                 save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
3305             }
3306
3307             w -= 4;
3308             dst += 4;
3309             mask += 4;
3310         }
3311
3312         while (w)
3313         {
3314             uint8_t m = *mask++;
3315
3316             if (m)
3317             {
3318                 d = *dst;
3319                 mmmask_x = expand_pixel_8_1x64 (m);
3320                 mmx_dest = unpack_32_1x64 (d);
3321
3322                 *dst = pack_1x64_32 (in_over_1x64 (&mmsrc_x,
3323                                                   &mmx_alpha,
3324                                                   &mmmask_x,
3325                                                   &mmx_dest));
3326             }
3327
3328             w--;
3329             dst++;
3330         }
3331     }
3332
3333     _mm_empty();
3334 }
3335
3336 /* -------------------------------------------------------------------------------------------------
3337  * fast_composite_over_n_8_8888
3338  */
3339
3340 pixman_bool_t
3341 pixman_fill_sse2 (uint32_t *bits,
3342                  int stride,
3343                  int bpp,
3344                  int x,
3345                  int y,
3346                  int width,
3347                  int height,
3348                  uint32_t data)
3349 {
3350     uint32_t    byte_width;
3351     uint8_t         *byte_line;
3352
3353     __m128i xmm_def;
3354
3355     if (bpp == 16 && (data >> 16 != (data & 0xffff)))
3356         return FALSE;
3357
3358     if (bpp != 16 && bpp != 32)
3359         return FALSE;
3360
3361     if (bpp == 16)
3362     {
3363         stride = stride * (int) sizeof (uint32_t) / 2;
3364         byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x);
3365         byte_width = 2 * width;
3366         stride *= 2;
3367     }
3368     else
3369     {
3370         stride = stride * (int) sizeof (uint32_t) / 4;
3371         byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x);
3372         byte_width = 4 * width;
3373         stride *= 4;
3374     }
3375
3376     cache_prefetch ((__m128i*)byte_line);
3377     xmm_def = create_mask_2x32_128 (data, data);
3378
3379     while (height--)
3380     {
3381         int w;
3382         uint8_t *d = byte_line;
3383         byte_line += stride;
3384         w = byte_width;
3385
3386
3387         cache_prefetch_next ((__m128i*)d);
3388
3389         while (w >= 2 && ((unsigned long)d & 3))
3390         {
3391             *(uint16_t *)d = data;
3392             w -= 2;
3393             d += 2;
3394         }
3395
3396         while (w >= 4 && ((unsigned long)d & 15))
3397         {
3398             *(uint32_t *)d = data;
3399
3400             w -= 4;
3401             d += 4;
3402         }
3403
3404         cache_prefetch_next ((__m128i*)d);
3405
3406         while (w >= 128)
3407         {
3408             cache_prefetch (((__m128i*)d) + 12);
3409
3410             save_128_aligned ((__m128i*)(d),     xmm_def);
3411             save_128_aligned ((__m128i*)(d+16),  xmm_def);
3412             save_128_aligned ((__m128i*)(d+32),  xmm_def);
3413             save_128_aligned ((__m128i*)(d+48),  xmm_def);
3414             save_128_aligned ((__m128i*)(d+64),  xmm_def);
3415             save_128_aligned ((__m128i*)(d+80),  xmm_def);
3416             save_128_aligned ((__m128i*)(d+96),  xmm_def);
3417             save_128_aligned ((__m128i*)(d+112), xmm_def);
3418
3419             d += 128;
3420             w -= 128;
3421         }
3422
3423         if (w >= 64)
3424         {
3425             cache_prefetch (((__m128i*)d) + 8);
3426
3427             save_128_aligned ((__m128i*)(d),     xmm_def);
3428             save_128_aligned ((__m128i*)(d+16),  xmm_def);
3429             save_128_aligned ((__m128i*)(d+32),  xmm_def);
3430             save_128_aligned ((__m128i*)(d+48),  xmm_def);
3431
3432             d += 64;
3433             w -= 64;
3434         }
3435
3436         cache_prefetch_next ((__m128i*)d);
3437
3438         if (w >= 32)
3439         {
3440             save_128_aligned ((__m128i*)(d),     xmm_def);
3441             save_128_aligned ((__m128i*)(d+16),  xmm_def);
3442
3443             d += 32;
3444             w -= 32;
3445         }
3446
3447         if (w >= 16)
3448         {
3449             save_128_aligned ((__m128i*)(d),     xmm_def);
3450
3451             d += 16;
3452             w -= 16;
3453         }
3454
3455         cache_prefetch_next ((__m128i*)d);
3456
3457         while (w >= 4)
3458         {
3459             *(uint32_t *)d = data;
3460
3461             w -= 4;
3462             d += 4;
3463         }
3464
3465         if (w >= 2)
3466         {
3467             *(uint16_t *)d = data;
3468             w -= 2;
3469             d += 2;
3470         }
3471     }
3472
3473     _mm_empty();
3474     return TRUE;
3475 }
3476
3477 static void
3478 sse2_composite_src_n_8_8888 (pixman_implementation_t *imp,
3479                                       pixman_op_t op,
3480                                      pixman_image_t * src_image,
3481                                      pixman_image_t * mask_image,
3482                                      pixman_image_t * dst_image,
3483                                      int32_t      src_x,
3484                                      int32_t      src_y,
3485                                      int32_t      mask_x,
3486                                      int32_t      mask_y,
3487                                      int32_t      dest_x,
3488                                      int32_t      dest_y,
3489                                      int32_t     width,
3490                                      int32_t     height)
3491 {
3492     uint32_t    src, srca;
3493     uint32_t    *dst_line, *dst;
3494     uint8_t     *mask_line, *mask;
3495     int dst_stride, mask_stride;
3496     uint16_t    w;
3497     uint32_t    m;
3498
3499     __m128i xmm_src, xmm_def;
3500     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
3501
3502     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
3503
3504     srca = src >> 24;
3505     if (src == 0)
3506     {
3507         pixman_fill_sse2 (dst_image->bits.bits, dst_image->bits.rowstride,
3508                         PIXMAN_FORMAT_BPP (dst_image->bits.format),
3509                         dest_x, dest_y, width, height, 0);
3510         return;
3511     }
3512
3513     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3514     PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
3515
3516     xmm_def = create_mask_2x32_128 (src, src);
3517     xmm_src = expand_pixel_32_1x128 (src);
3518
3519     while (height--)
3520     {
3521         dst = dst_line;
3522         dst_line += dst_stride;
3523         mask = mask_line;
3524         mask_line += mask_stride;
3525         w = width;
3526
3527         /* call prefetch hint to optimize cache load*/
3528         cache_prefetch ((__m128i*)mask);
3529         cache_prefetch ((__m128i*)dst);
3530
3531         while (w && (unsigned long)dst & 15)
3532         {
3533             uint8_t m = *mask++;
3534
3535             if (m)
3536             {
3537                 *dst = pack_1x64_32 (pix_multiply_1x64 (_mm_movepi64_pi64 (xmm_src), expand_pixel_8_1x64 (m)));
3538             }
3539             else
3540             {
3541                 *dst = 0;
3542             }
3543
3544             w--;
3545             dst++;
3546         }
3547
3548         /* call prefetch hint to optimize cache load*/
3549         cache_prefetch ((__m128i*)mask);
3550         cache_prefetch ((__m128i*)dst);
3551
3552         while (w >= 4)
3553         {
3554             /* fill cache line with next memory */
3555             cache_prefetch_next ((__m128i*)mask);
3556             cache_prefetch_next ((__m128i*)dst);
3557
3558             m = *((uint32_t*)mask);
3559
3560             if (srca == 0xff && m == 0xffffffff)
3561             {
3562                 save_128_aligned ((__m128i*)dst, xmm_def);
3563             }
3564             else if (m)
3565             {
3566                 xmm_mask = unpack_32_1x128 (m);
3567                 xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128());
3568
3569                 /* Unpacking */
3570                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
3571
3572                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
3573
3574                 pix_multiply_2x128 (&xmm_src, &xmm_src, &xmm_mask_lo, &xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
3575
3576                 save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_mask_lo, xmm_mask_hi));
3577             }
3578             else
3579             {
3580                 save_128_aligned ((__m128i*)dst, _mm_setzero_si128());
3581             }
3582
3583             w -= 4;
3584             dst += 4;
3585             mask += 4;
3586         }
3587
3588         while (w)
3589         {
3590             uint8_t m = *mask++;
3591
3592             if (m)
3593             {
3594                 *dst = pack_1x64_32 (pix_multiply_1x64 (_mm_movepi64_pi64 (xmm_src), expand_pixel_8_1x64 (m)));
3595             }
3596             else
3597             {
3598                 *dst = 0;
3599             }
3600
3601             w--;
3602             dst++;
3603         }
3604     }
3605
3606     _mm_empty();
3607 }
3608
3609 /* -------------------------------------------------------------------------------------------------
3610  * fast_composite_over_n_8_0565
3611  */
3612
3613 static void
3614 sse2_composite_over_n_8_0565 (pixman_implementation_t *imp,
3615                                    pixman_op_t op,
3616                                   pixman_image_t * src_image,
3617                                   pixman_image_t * mask_image,
3618                                   pixman_image_t * dst_image,
3619                                   int32_t      src_x,
3620                                   int32_t      src_y,
3621                                   int32_t      mask_x,
3622                                   int32_t      mask_y,
3623                                   int32_t      dest_x,
3624                                   int32_t      dest_y,
3625                                   int32_t     width,
3626                                   int32_t     height)
3627 {
3628     uint32_t    src, srca;
3629     uint16_t    *dst_line, *dst, d;
3630     uint8_t     *mask_line, *mask;
3631     int dst_stride, mask_stride;
3632     uint16_t    w;
3633     uint32_t m;
3634     __m64 mmsrc_x, mmx_alpha, mmmask_x, mmx_dest;
3635
3636     __m128i xmm_src, xmm_alpha;
3637     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
3638     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
3639
3640     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
3641
3642     srca = src >> 24;
3643     if (src == 0)
3644         return;
3645
3646     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
3647     PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
3648
3649     xmm_src = expand_pixel_32_1x128 (src);
3650     xmm_alpha = expand_alpha_1x128 (xmm_src);
3651     mmsrc_x = _mm_movepi64_pi64 (xmm_src);
3652     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
3653
3654     while (height--)
3655     {
3656         dst = dst_line;
3657         dst_line += dst_stride;
3658         mask = mask_line;
3659         mask_line += mask_stride;
3660         w = width;
3661
3662         /* call prefetch hint to optimize cache load*/
3663         cache_prefetch ((__m128i*)mask);
3664         cache_prefetch ((__m128i*)dst);
3665
3666         while (w && (unsigned long)dst & 15)
3667         {
3668             m = *mask++;
3669
3670             if (m)
3671             {
3672                 d = *dst;
3673                 mmmask_x = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
3674                 mmx_dest = expand565_16_1x64 (d);
3675
3676                 *dst = pack_565_32_16 (pack_1x64_32 (in_over_1x64 (&mmsrc_x,
3677                                                                  &mmx_alpha,
3678                                                                  &mmmask_x,
3679                                                                  &mmx_dest)));
3680             }
3681
3682             w--;
3683             dst++;
3684         }
3685
3686         /* call prefetch hint to optimize cache load*/
3687         cache_prefetch ((__m128i*)mask);
3688         cache_prefetch ((__m128i*)dst);
3689
3690         while (w >= 8)
3691         {
3692             /* fill cache line with next memory */
3693             cache_prefetch_next ((__m128i*)mask);
3694             cache_prefetch_next ((__m128i*)dst);
3695
3696             xmm_dst = load_128_aligned ((__m128i*) dst);
3697             unpack_565_128_4x128 (xmm_dst, &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
3698
3699             m = *((uint32_t*)mask);
3700             mask += 4;
3701
3702             if (m)
3703             {
3704                 xmm_mask = unpack_32_1x128 (m);
3705                 xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128());
3706
3707                 /* Unpacking */
3708                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
3709
3710                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
3711                 in_over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst0, &xmm_dst1);
3712             }
3713
3714             m = *((uint32_t*)mask);
3715             mask += 4;
3716
3717             if (m)
3718             {
3719                 xmm_mask = unpack_32_1x128 (m);
3720                 xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128());
3721
3722                 /* Unpacking */
3723                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
3724
3725                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
3726                 in_over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst2, &xmm_dst3);
3727             }
3728
3729             save_128_aligned ((__m128i*)dst, pack_565_4x128_128 (&xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
3730
3731             w -= 8;
3732             dst += 8;
3733         }
3734
3735         while (w)
3736         {
3737             m = *mask++;
3738
3739             if (m)
3740             {
3741                 d = *dst;
3742                 mmmask_x = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
3743                 mmx_dest = expand565_16_1x64 (d);
3744
3745                 *dst = pack_565_32_16 (pack_1x64_32 (in_over_1x64 (&mmsrc_x,
3746                                                                  &mmx_alpha,
3747                                                                  &mmmask_x,
3748                                                                  &mmx_dest)));
3749             }
3750
3751             w--;
3752             dst++;
3753         }
3754     }
3755
3756     _mm_empty();
3757 }
3758
3759 /* -------------------------------------------------------------------------------------------------
3760  * fast_composite_over_pixbuf_0565
3761  */
3762
3763 static void
3764 sse2_composite_over_pixbuf_0565 (pixman_implementation_t *imp,
3765                                    pixman_op_t op,
3766                                   pixman_image_t * src_image,
3767                                   pixman_image_t * mask_image,
3768                                   pixman_image_t * dst_image,
3769                                   int32_t      src_x,
3770                                   int32_t      src_y,
3771                                   int32_t      mask_x,
3772                                   int32_t      mask_y,
3773                                   int32_t      dest_x,
3774                                   int32_t      dest_y,
3775                                   int32_t     width,
3776                                   int32_t     height)
3777 {
3778     uint16_t    *dst_line, *dst, d;
3779     uint32_t    *src_line, *src, s;
3780     int         dst_stride, src_stride;
3781     uint16_t    w;
3782     uint32_t    opaque, zero;
3783
3784     __m64 ms;
3785     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
3786     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
3787
3788     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
3789     PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3790
3791 #if 0
3792     /* FIXME
3793      *
3794      * I copy the code from MMX one and keep the fixme.
3795      * If it's a problem there, probably is a problem here.
3796      */
3797     assert (src_image->drawable == mask_image->drawable);
3798 #endif
3799
3800     while (height--)
3801     {
3802         dst = dst_line;
3803         dst_line += dst_stride;
3804         src = src_line;
3805         src_line += src_stride;
3806         w = width;
3807
3808         /* call prefetch hint to optimize cache load*/
3809         cache_prefetch ((__m128i*)src);
3810         cache_prefetch ((__m128i*)dst);
3811
3812         while (w && (unsigned long)dst & 15)
3813         {
3814             s = *src++;
3815             d = *dst;
3816
3817             ms = unpack_32_1x64 (s);
3818
3819             *dst++ = pack_565_32_16 (pack_1x64_32 (over_rev_non_pre_1x64(ms, expand565_16_1x64 (d))));
3820             w--;
3821         }
3822
3823         /* call prefetch hint to optimize cache load*/
3824         cache_prefetch ((__m128i*)src);
3825         cache_prefetch ((__m128i*)dst);
3826
3827         while (w >= 8)
3828         {
3829             /* fill cache line with next memory */
3830             cache_prefetch_next ((__m128i*)src);
3831             cache_prefetch_next ((__m128i*)dst);
3832
3833             /* First round */
3834             xmm_src = load_128_unaligned((__m128i*)src);
3835             xmm_dst = load_128_aligned  ((__m128i*)dst);
3836
3837             opaque = is_opaque (xmm_src);
3838             zero = is_zero (xmm_src);
3839
3840             unpack_565_128_4x128 (xmm_dst, &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
3841             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
3842
3843             /* preload next round*/
3844             xmm_src = load_128_unaligned((__m128i*)(src+4));
3845             
3846             if (opaque)
3847             {
3848                 invert_colors_2x128 (xmm_src_lo, xmm_src_hi, &xmm_dst0, &xmm_dst1);
3849             }
3850             else if (!zero)
3851             {
3852                 over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi, &xmm_dst0, &xmm_dst1);
3853             }
3854
3855             /* Second round */
3856             opaque = is_opaque (xmm_src);
3857             zero = is_zero (xmm_src);
3858
3859             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
3860
3861             if (opaque)
3862             {
3863                 invert_colors_2x128 (xmm_src_lo, xmm_src_hi, &xmm_dst2, &xmm_dst3);
3864             }
3865             else if (zero)
3866             {
3867                 over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi, &xmm_dst2, &xmm_dst3);
3868             }
3869
3870             save_128_aligned ((__m128i*)dst, pack_565_4x128_128 (&xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
3871
3872             w -= 8;
3873             src += 8;
3874             dst += 8;
3875         }
3876
3877         while (w)
3878         {
3879             s = *src++;
3880             d = *dst;
3881
3882             ms = unpack_32_1x64 (s);
3883
3884             *dst++ = pack_565_32_16 (pack_1x64_32 (over_rev_non_pre_1x64(ms, expand565_16_1x64 (d))));
3885             w--;
3886         }
3887     }
3888
3889     _mm_empty();
3890 }
3891
3892 /* -------------------------------------------------------------------------------------------------
3893  * fast_composite_over_pixbuf_8888
3894  */
3895
3896 static void
3897 sse2_composite_over_pixbuf_8888 (pixman_implementation_t *imp,
3898                                    pixman_op_t op,
3899                                   pixman_image_t * src_image,
3900                                   pixman_image_t * mask_image,
3901                                   pixman_image_t * dst_image,
3902                                   int32_t      src_x,
3903                                   int32_t      src_y,
3904                                   int32_t      mask_x,
3905                                   int32_t      mask_y,
3906                                   int32_t      dest_x,
3907                                   int32_t      dest_y,
3908                                   int32_t     width,
3909                                   int32_t     height)
3910 {
3911     uint32_t    *dst_line, *dst, d;
3912     uint32_t    *src_line, *src, s;
3913     int dst_stride, src_stride;
3914     uint16_t    w;
3915     uint32_t    opaque, zero;
3916
3917     __m128i xmm_src_lo, xmm_src_hi;
3918     __m128i xmm_dst_lo, xmm_dst_hi;
3919
3920     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
3921     PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
3922
3923 #if 0
3924     /* FIXME
3925      *
3926      * I copy the code from MMX one and keep the fixme.
3927      * If it's a problem there, probably is a problem here.
3928      */
3929     assert (src_image->drawable == mask_image->drawable);
3930 #endif
3931
3932     while (height--)
3933     {
3934         dst = dst_line;
3935         dst_line += dst_stride;
3936         src = src_line;
3937         src_line += src_stride;
3938         w = width;
3939
3940         /* call prefetch hint to optimize cache load*/
3941         cache_prefetch ((__m128i*)src);
3942         cache_prefetch ((__m128i*)dst);
3943
3944         while (w && (unsigned long)dst & 15)
3945         {
3946             s = *src++;
3947             d = *dst;
3948
3949             *dst++ = pack_1x64_32 (over_rev_non_pre_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (d)));
3950
3951             w--;
3952         }
3953
3954         /* call prefetch hint to optimize cache load*/
3955         cache_prefetch ((__m128i*)src);
3956         cache_prefetch ((__m128i*)dst);
3957
3958         while (w >= 4)
3959         {
3960             /* fill cache line with next memory */
3961             cache_prefetch_next ((__m128i*)src);
3962             cache_prefetch_next ((__m128i*)dst);
3963
3964             xmm_src_hi = load_128_unaligned((__m128i*)src);
3965
3966             opaque = is_opaque (xmm_src_hi);
3967             zero = is_zero (xmm_src_hi);
3968
3969             unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
3970
3971             if (opaque)
3972             {
3973                 invert_colors_2x128( xmm_src_lo, xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi);
3974
3975                 save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
3976             }
3977             else if (!zero)
3978             {
3979                 xmm_dst_hi = load_128_aligned  ((__m128i*)dst);
3980
3981                 unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
3982
3983                 over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi);
3984
3985                 save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
3986             }
3987
3988             w -= 4;
3989             dst += 4;
3990             src += 4;
3991         }
3992
3993         while (w)
3994         {
3995             s = *src++;
3996             d = *dst;
3997
3998             *dst++ = pack_1x64_32 (over_rev_non_pre_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (d)));
3999
4000             w--;
4001         }
4002     }
4003
4004     _mm_empty();
4005 }
4006
4007 /* -------------------------------------------------------------------------------------------------
4008  * fast_composite_over_n_8888_0565_ca
4009  */
4010
4011 static void
4012 sse2_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
4013                                        pixman_op_t op,
4014                                       pixman_image_t * src_image,
4015                                       pixman_image_t * mask_image,
4016                                       pixman_image_t * dst_image,
4017                                       int32_t      src_x,
4018                                       int32_t      src_y,
4019                                       int32_t      mask_x,
4020                                       int32_t      mask_y,
4021                                       int32_t      dest_x,
4022                                       int32_t      dest_y,
4023                                       int32_t     width,
4024                                       int32_t     height)
4025 {
4026     uint32_t    src;
4027     uint16_t    *dst_line, *dst, d;
4028     uint32_t    *mask_line, *mask, m;
4029     int dst_stride, mask_stride;
4030     int w;
4031     uint32_t pack_cmp;
4032
4033     __m128i xmm_src, xmm_alpha;
4034     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
4035     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
4036
4037     __m64 mmsrc_x, mmx_alpha, mmmask_x, mmx_dest;
4038
4039     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
4040
4041     if (src == 0)
4042         return;
4043
4044     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
4045     PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
4046
4047     xmm_src = expand_pixel_32_1x128 (src);
4048     xmm_alpha = expand_alpha_1x128 (xmm_src);
4049     mmsrc_x = _mm_movepi64_pi64 (xmm_src);
4050     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
4051
4052     while (height--)
4053     {
4054         w = width;
4055         mask = mask_line;
4056         dst = dst_line;
4057         mask_line += mask_stride;
4058         dst_line += dst_stride;
4059
4060         /* call prefetch hint to optimize cache load*/
4061         cache_prefetch ((__m128i*)mask);
4062         cache_prefetch ((__m128i*)dst);
4063
4064         while (w && ((unsigned long)dst & 15))
4065         {
4066             m = *(uint32_t *) mask;
4067
4068             if (m)
4069             {
4070                 d = *dst;
4071                 mmmask_x = unpack_32_1x64 (m);
4072                 mmx_dest = expand565_16_1x64 (d);
4073
4074                 *dst = pack_565_32_16 (pack_1x64_32 (in_over_1x64 (&mmsrc_x,
4075                                                                  &mmx_alpha,
4076                                                                  &mmmask_x,
4077                                                                  &mmx_dest)));
4078             }
4079
4080             w--;
4081             dst++;
4082             mask++;
4083         }
4084
4085         /* call prefetch hint to optimize cache load*/
4086         cache_prefetch ((__m128i*)mask);
4087         cache_prefetch ((__m128i*)dst);
4088
4089         while (w >= 8)
4090         {
4091             /* fill cache line with next memory */
4092             cache_prefetch_next ((__m128i*)mask);
4093             cache_prefetch_next ((__m128i*)dst);
4094
4095             /* First round */
4096             xmm_mask = load_128_unaligned((__m128i*)mask);
4097             xmm_dst = load_128_aligned((__m128i*)dst);
4098
4099             pack_cmp = _mm_movemask_epi8 (_mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128()));
4100
4101             unpack_565_128_4x128 (xmm_dst, &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
4102             unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4103
4104             /* preload next round*/
4105             xmm_mask = load_128_unaligned((__m128i*)(mask+4));
4106             /* preload next round*/
4107
4108             if (pack_cmp != 0xffff)
4109             {
4110                 in_over_2x128(&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst0, &xmm_dst1);
4111             }
4112
4113             /* Second round */
4114             pack_cmp = _mm_movemask_epi8 (_mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128()));
4115
4116             unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4117
4118             if (pack_cmp != 0xffff)
4119             {
4120                 in_over_2x128(&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst2, &xmm_dst3);
4121             }
4122
4123             save_128_aligned ((__m128i*)dst, pack_565_4x128_128 (&xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
4124
4125             w -= 8;
4126             dst += 8;
4127             mask += 8;
4128         }
4129
4130         while (w)
4131         {
4132             m = *(uint32_t *) mask;
4133
4134             if (m)
4135             {
4136                 d = *dst;
4137                 mmmask_x = unpack_32_1x64 (m);
4138                 mmx_dest = expand565_16_1x64 (d);
4139
4140                 *dst = pack_565_32_16 (pack_1x64_32 (in_over_1x64 (&mmsrc_x,
4141                                                                  &mmx_alpha,
4142                                                                  &mmmask_x,
4143                                                                  &mmx_dest)));
4144             }
4145
4146             w--;
4147             dst++;
4148             mask++;
4149         }
4150     }
4151
4152     _mm_empty ();
4153 }
4154
4155 /* -------------------------------------------------------------------------------------------------
4156  * fast_composite_in_n_8_8
4157  */
4158
4159 static void
4160 sse2_composite_in_n_8_8 (pixman_implementation_t *imp,
4161                          pixman_op_t op,
4162                         pixman_image_t * src_image,
4163                         pixman_image_t * mask_image,
4164                         pixman_image_t * dst_image,
4165                         int32_t      src_x,
4166                         int32_t      src_y,
4167                         int32_t      mask_x,
4168                         int32_t      mask_y,
4169                         int32_t      dest_x,
4170                         int32_t      dest_y,
4171                         int32_t     width,
4172                         int32_t     height)
4173 {
4174     uint8_t     *dst_line, *dst;
4175     uint8_t     *mask_line, *mask;
4176     int dst_stride, mask_stride;
4177     uint16_t    w, d, m;
4178     uint32_t    src;
4179     uint8_t     sa;
4180
4181     __m128i xmm_alpha;
4182     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
4183     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
4184
4185     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
4186     PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
4187
4188     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
4189
4190     sa = src >> 24;
4191     if (sa == 0)
4192         return;
4193
4194     xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src));
4195
4196     while (height--)
4197     {
4198         dst = dst_line;
4199         dst_line += dst_stride;
4200         mask = mask_line;
4201         mask_line += mask_stride;
4202         w = width;
4203
4204         /* call prefetch hint to optimize cache load*/
4205         cache_prefetch ((__m128i*)mask);
4206         cache_prefetch ((__m128i*)dst);
4207
4208         while (w && ((unsigned long)dst & 15))
4209         {
4210             m = (uint32_t) *mask++;
4211             d = (uint32_t) *dst;
4212
4213             *dst++ = (uint8_t) pack_1x64_32 (pix_multiply_1x64 (pix_multiply_1x64 (_mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
4214                                                                unpack_32_1x64 (d)));
4215             w--;
4216         }
4217
4218         /* call prefetch hint to optimize cache load*/
4219         cache_prefetch ((__m128i*)mask);
4220         cache_prefetch ((__m128i*)dst);
4221
4222         while (w >= 16)
4223         {
4224             /* fill cache line with next memory */
4225             cache_prefetch_next ((__m128i*)mask);
4226             cache_prefetch_next ((__m128i*)dst);
4227
4228             xmm_mask = load_128_unaligned((__m128i*)mask);
4229             xmm_dst = load_128_aligned((__m128i*)dst);
4230
4231             unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4232             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
4233
4234             pix_multiply_2x128 (&xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
4235             pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
4236
4237             save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4238
4239             mask += 16;
4240             dst += 16;
4241             w -= 16;
4242         }
4243
4244         while (w)
4245         {
4246             m = (uint32_t) *mask++;
4247             d = (uint32_t) *dst;
4248
4249             *dst++ = (uint8_t) pack_1x64_32 (pix_multiply_1x64 (pix_multiply_1x64 (_mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
4250                                                                unpack_32_1x64 (d)));
4251             w--;
4252         }
4253     }
4254
4255     _mm_empty();
4256 }
4257
4258 /* -------------------------------------------------------------------------------------------------
4259  * fast_composite_in_8_8
4260  */
4261
4262 static void
4263 sse2_composite_in_8_8 (pixman_implementation_t *imp,
4264                        pixman_op_t op,
4265                       pixman_image_t * src_image,
4266                       pixman_image_t * mask_image,
4267                       pixman_image_t * dst_image,
4268                       int32_t      src_x,
4269                       int32_t      src_y,
4270                       int32_t      mask_x,
4271                       int32_t      mask_y,
4272                       int32_t      dest_x,
4273                       int32_t      dest_y,
4274                       int32_t     width,
4275                       int32_t     height)
4276 {
4277     uint8_t     *dst_line, *dst;
4278     uint8_t     *src_line, *src;
4279     int src_stride, dst_stride;
4280     uint16_t    w;
4281     uint32_t    s, d;
4282
4283     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
4284     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
4285
4286     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
4287     PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
4288
4289     while (height--)
4290     {
4291         dst = dst_line;
4292         dst_line += dst_stride;
4293         src = src_line;
4294         src_line += src_stride;
4295         w = width;
4296
4297         /* call prefetch hint to optimize cache load*/
4298         cache_prefetch ((__m128i*)src);
4299         cache_prefetch ((__m128i*)dst);
4300
4301         while (w && ((unsigned long)dst & 15))
4302         {
4303             s = (uint32_t) *src++;
4304             d = (uint32_t) *dst;
4305
4306             *dst++ = (uint8_t) pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (s),unpack_32_1x64 (d)));
4307             w--;
4308         }
4309
4310         /* call prefetch hint to optimize cache load*/
4311         cache_prefetch ((__m128i*)src);
4312         cache_prefetch ((__m128i*)dst);
4313
4314         while (w >= 16)
4315         {
4316             /* fill cache line with next memory */
4317             cache_prefetch_next ((__m128i*)src);
4318             cache_prefetch_next ((__m128i*)dst);
4319
4320             xmm_src = load_128_unaligned((__m128i*)src);
4321             xmm_dst = load_128_aligned((__m128i*)dst);
4322
4323             unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
4324             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
4325
4326             pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
4327
4328             save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4329
4330             src += 16;
4331             dst += 16;
4332             w -= 16;
4333         }
4334
4335         while (w)
4336         {
4337             s = (uint32_t) *src++;
4338             d = (uint32_t) *dst;
4339
4340             *dst++ = (uint8_t) pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (s),unpack_32_1x64 (d)));
4341             w--;
4342         }
4343     }
4344
4345     _mm_empty ();
4346 }
4347
4348 /* -------------------------------------------------------------------------------------------------
4349  * fast_composite_add_8888_8_8
4350  */
4351
4352 static void
4353 sse2_composite_add_8888_8_8 (pixman_implementation_t *imp,
4354                                 pixman_op_t op,
4355                                pixman_image_t * src_image,
4356                                pixman_image_t * mask_image,
4357                                pixman_image_t * dst_image,
4358                                int32_t      src_x,
4359                                int32_t      src_y,
4360                                int32_t      mask_x,
4361                                int32_t      mask_y,
4362                                int32_t      dest_x,
4363                                int32_t      dest_y,
4364                                int32_t     width,
4365                                int32_t     height)
4366 {
4367     uint8_t     *dst_line, *dst;
4368     uint8_t     *mask_line, *mask;
4369     int dst_stride, mask_stride;
4370     uint16_t    w;
4371     uint32_t    src;
4372     uint8_t     sa;
4373     uint32_t m, d;
4374
4375     __m128i xmm_alpha;
4376     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
4377     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
4378
4379     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
4380     PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
4381
4382     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
4383
4384     sa = src >> 24;
4385     if (sa == 0)
4386         return;
4387
4388     xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src));
4389
4390     while (height--)
4391     {
4392         dst = dst_line;
4393         dst_line += dst_stride;
4394         mask = mask_line;
4395         mask_line += mask_stride;
4396         w = width;
4397
4398         /* call prefetch hint to optimize cache load*/
4399         cache_prefetch ((__m128i*)mask);
4400         cache_prefetch ((__m128i*)dst);
4401
4402         while (w && ((unsigned long)dst & 15))
4403         {
4404             m = (uint32_t) *mask++;
4405             d = (uint32_t) *dst;
4406
4407             *dst++ = (uint8_t) pack_1x64_32 (_mm_adds_pu16 (pix_multiply_1x64 (_mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
4408                                                                               unpack_32_1x64 (d)));
4409             w--;
4410         }
4411
4412         /* call prefetch hint to optimize cache load*/
4413         cache_prefetch ((__m128i*)mask);
4414         cache_prefetch ((__m128i*)dst);
4415
4416         while (w >= 16)
4417         {
4418             /* fill cache line with next memory */
4419             cache_prefetch_next ((__m128i*)mask);
4420             cache_prefetch_next ((__m128i*)dst);
4421
4422             xmm_mask = load_128_unaligned((__m128i*)mask);
4423             xmm_dst = load_128_aligned((__m128i*)dst);
4424
4425             unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4426             unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
4427
4428             pix_multiply_2x128 (&xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
4429
4430             xmm_dst_lo = _mm_adds_epu16 (xmm_mask_lo, xmm_dst_lo);
4431             xmm_dst_hi = _mm_adds_epu16 (xmm_mask_hi, xmm_dst_hi);
4432
4433             save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4434
4435             mask += 16;
4436             dst += 16;
4437             w -= 16;
4438         }
4439
4440         while (w)
4441         {
4442             m = (uint32_t) *mask++;
4443             d = (uint32_t) *dst;
4444
4445             *dst++ = (uint8_t) pack_1x64_32 (_mm_adds_pu16 (pix_multiply_1x64 (_mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
4446                                                                               unpack_32_1x64 (d)));
4447             w--;
4448         }
4449     }
4450
4451     _mm_empty();
4452 }
4453
4454 /* -------------------------------------------------------------------------------------------------
4455  * fast_composite_add_8000_8000
4456  */
4457
4458 static void
4459 sse2_composite_add_8000_8000 (pixman_implementation_t *imp,
4460                                  pixman_op_t op,
4461                                 pixman_image_t * src_image,
4462                                 pixman_image_t * mask_image,
4463                                 pixman_image_t * dst_image,
4464                                 int32_t      src_x,
4465                                 int32_t      src_y,
4466                                 int32_t      mask_x,
4467                                 int32_t      mask_y,
4468                                 int32_t      dest_x,
4469                                 int32_t      dest_y,
4470                                 int32_t     width,
4471                                 int32_t     height)
4472 {
4473     uint8_t     *dst_line, *dst;
4474     uint8_t     *src_line, *src;
4475     int dst_stride, src_stride;
4476     uint16_t    w;
4477     uint16_t    t;
4478
4479     PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
4480     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
4481
4482     while (height--)
4483     {
4484         dst = dst_line;
4485         src = src_line;
4486
4487         /* call prefetch hint to optimize cache load*/
4488         cache_prefetch ((__m128i*)src);
4489         cache_prefetch ((__m128i*)dst);
4490
4491         dst_line += dst_stride;
4492         src_line += src_stride;
4493         w = width;
4494
4495         /* Small head */
4496         while (w && (unsigned long)dst & 3)
4497         {
4498             t = (*dst) + (*src++);
4499             *dst++ = t | (0 - (t >> 8));
4500             w--;
4501         }
4502
4503         core_combine_add_u_sse2 ((uint32_t*)dst, (uint32_t*)src, NULL, w >> 2);
4504
4505         /* Small tail */
4506         dst += w & 0xfffc;
4507         src += w & 0xfffc;
4508
4509         w &= 3;
4510
4511         while (w)
4512         {
4513             t = (*dst) + (*src++);
4514             *dst++ = t | (0 - (t >> 8));
4515             w--;
4516         }
4517     }
4518
4519     _mm_empty();
4520 }
4521
4522 /* -------------------------------------------------------------------------------------------------
4523  * fast_composite_add_8888_8888
4524  */
4525 static void
4526 sse2_composite_add_8888_8888 (pixman_implementation_t *imp,
4527                                  pixman_op_t    op,
4528                                 pixman_image_t *        src_image,
4529                                 pixman_image_t *        mask_image,
4530                                 pixman_image_t *         dst_image,
4531                                 int32_t          src_x,
4532                                 int32_t      src_y,
4533                                 int32_t      mask_x,
4534                                 int32_t      mask_y,
4535                                 int32_t      dest_x,
4536                                 int32_t      dest_y,
4537                                 int32_t     width,
4538                                 int32_t     height)
4539 {
4540     uint32_t    *dst_line, *dst;
4541     uint32_t    *src_line, *src;
4542     int dst_stride, src_stride;
4543
4544     PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
4545     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
4546
4547     while (height--)
4548     {
4549         dst = dst_line;
4550         dst_line += dst_stride;
4551         src = src_line;
4552         src_line += src_stride;
4553
4554         core_combine_add_u_sse2 (dst, src, NULL, width);
4555     }
4556
4557     _mm_empty();
4558 }
4559
4560 /* -------------------------------------------------------------------------------------------------
4561  * sse2_composite_copy_area
4562  */
4563
4564 static pixman_bool_t
4565 pixman_blt_sse2 (uint32_t *src_bits,
4566                uint32_t *dst_bits,
4567                int src_stride,
4568                int dst_stride,
4569                int src_bpp,
4570                int dst_bpp,
4571                int src_x, int src_y,
4572                int dst_x, int dst_y,
4573                int width, int height)
4574 {
4575     uint8_t *   src_bytes;
4576     uint8_t *   dst_bytes;
4577     int         byte_width;
4578
4579     if (src_bpp != dst_bpp)
4580         return FALSE;
4581
4582     if (src_bpp == 16)
4583     {
4584         src_stride = src_stride * (int) sizeof (uint32_t) / 2;
4585         dst_stride = dst_stride * (int) sizeof (uint32_t) / 2;
4586         src_bytes = (uint8_t *)(((uint16_t *)src_bits) + src_stride * (src_y) + (src_x));
4587         dst_bytes = (uint8_t *)(((uint16_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
4588         byte_width = 2 * width;
4589         src_stride *= 2;
4590         dst_stride *= 2;
4591     }
4592     else if (src_bpp == 32)
4593     {
4594         src_stride = src_stride * (int) sizeof (uint32_t) / 4;
4595         dst_stride = dst_stride * (int) sizeof (uint32_t) / 4;
4596         src_bytes = (uint8_t *)(((uint32_t *)src_bits) + src_stride * (src_y) + (src_x));
4597         dst_bytes = (uint8_t *)(((uint32_t *)dst_bits) + dst_stride * (dst_y) + (dst_x));
4598         byte_width = 4 * width;
4599         src_stride *= 4;
4600         dst_stride *= 4;
4601     }
4602     else
4603     {
4604         return FALSE;
4605     }
4606
4607     cache_prefetch ((__m128i*)src_bytes);
4608     cache_prefetch ((__m128i*)dst_bytes);
4609
4610     while (height--)
4611     {
4612         int w;
4613         uint8_t *s = src_bytes;
4614         uint8_t *d = dst_bytes;
4615         src_bytes += src_stride;
4616         dst_bytes += dst_stride;
4617         w = byte_width;
4618
4619         cache_prefetch_next ((__m128i*)s);
4620         cache_prefetch_next ((__m128i*)d);
4621
4622         while (w >= 2 && ((unsigned long)d & 3))
4623         {
4624             *(uint16_t *)d = *(uint16_t *)s;
4625             w -= 2;
4626             s += 2;
4627             d += 2;
4628         }
4629
4630         while (w >= 4 && ((unsigned long)d & 15))
4631         {
4632             *(uint32_t *)d = *(uint32_t *)s;
4633
4634             w -= 4;
4635             s += 4;
4636             d += 4;
4637         }
4638
4639         cache_prefetch_next ((__m128i*)s);
4640         cache_prefetch_next ((__m128i*)d);
4641
4642         while (w >= 64)
4643         {
4644             __m128i xmm0, xmm1, xmm2, xmm3;
4645
4646             /* 128 bytes ahead */
4647             cache_prefetch (((__m128i*)s) + 8);
4648             cache_prefetch (((__m128i*)d) + 8);
4649
4650             xmm0 = load_128_unaligned ((__m128i*)(s));
4651             xmm1 = load_128_unaligned ((__m128i*)(s+16));
4652             xmm2 = load_128_unaligned ((__m128i*)(s+32));
4653             xmm3 = load_128_unaligned ((__m128i*)(s+48));
4654
4655             save_128_aligned ((__m128i*)(d),    xmm0);
4656             save_128_aligned ((__m128i*)(d+16), xmm1);
4657             save_128_aligned ((__m128i*)(d+32), xmm2);
4658             save_128_aligned ((__m128i*)(d+48), xmm3);
4659
4660             s += 64;
4661             d += 64;
4662             w -= 64;
4663         }
4664
4665         cache_prefetch_next ((__m128i*)s);
4666         cache_prefetch_next ((__m128i*)d);
4667
4668         while (w >= 16)
4669         {
4670             save_128_aligned ((__m128i*)d, load_128_unaligned ((__m128i*)s) );
4671
4672             w -= 16;
4673             d += 16;
4674             s += 16;
4675         }
4676
4677         cache_prefetch_next ((__m128i*)s);
4678         cache_prefetch_next ((__m128i*)d);
4679
4680         while (w >= 4)
4681         {
4682             *(uint32_t *)d = *(uint32_t *)s;
4683
4684             w -= 4;
4685             s += 4;
4686             d += 4;
4687         }
4688
4689         if (w >= 2)
4690         {
4691             *(uint16_t *)d = *(uint16_t *)s;
4692             w -= 2;
4693             s += 2;
4694             d += 2;
4695         }
4696     }
4697
4698     _mm_empty();
4699
4700     return TRUE;
4701 }
4702
4703 static void
4704 sse2_composite_copy_area (pixman_implementation_t *imp,
4705                          pixman_op_t       op,
4706                         pixman_image_t *        src_image,
4707                         pixman_image_t *        mask_image,
4708                         pixman_image_t *        dst_image,
4709                         int32_t         src_x,
4710                         int32_t         src_y,
4711                         int32_t         mask_x,
4712                         int32_t         mask_y,
4713                         int32_t         dest_x,
4714                         int32_t         dest_y,
4715                         int32_t         width,
4716                         int32_t         height)
4717 {
4718     pixman_blt_sse2 (src_image->bits.bits,
4719                     dst_image->bits.bits,
4720                     src_image->bits.rowstride,
4721                     dst_image->bits.rowstride,
4722                     PIXMAN_FORMAT_BPP (src_image->bits.format),
4723                     PIXMAN_FORMAT_BPP (dst_image->bits.format),
4724                     src_x, src_y, dest_x, dest_y, width, height);
4725 }
4726
4727 #if 0
4728 /* This code are buggy in MMX version, now the bug was translated to SSE2 version */
4729 void
4730 sse2_composite_over_x888_8_8888 (pixman_implementation_t *imp,
4731                                  pixman_op_t      op,
4732                                 pixman_image_t * src_image,
4733                                 pixman_image_t * mask_image,
4734                                 pixman_image_t * dst_image,
4735                                 int32_t      src_x,
4736                                 int32_t      src_y,
4737                                 int32_t      mask_x,
4738                                 int32_t      mask_y,
4739                                 int32_t      dest_x,
4740                                 int32_t      dest_y,
4741                                 int32_t     width,
4742                                 int32_t     height)
4743 {
4744     uint32_t    *src, *src_line, s;
4745     uint32_t    *dst, *dst_line, d;
4746     uint8_t         *mask, *mask_line;
4747     uint32_t    m;
4748     int          src_stride, mask_stride, dst_stride;
4749     uint16_t w;
4750
4751     __m128i xmm_src, xmm_src_lo, xmm_src_hi;
4752     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
4753     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
4754
4755     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
4756     PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
4757     PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
4758
4759     while (height--)
4760     {
4761         src = src_line;
4762         src_line += src_stride;
4763         dst = dst_line;
4764         dst_line += dst_stride;
4765         mask = mask_line;
4766         mask_line += mask_stride;
4767
4768         w = width;
4769
4770         /* call prefetch hint to optimize cache load*/
4771         cache_prefetch ((__m128i*)src);
4772         cache_prefetch ((__m128i*)dst);
4773         cache_prefetch ((__m128i*)mask);
4774
4775         while (w && (unsigned long)dst & 15)
4776         {
4777             s = 0xff000000 | *src++;
4778             m = (uint32_t) *mask++;
4779             d = *dst;
4780
4781             __m64 ms = unpack_32_1x64 (s);
4782
4783             if (m != 0xff)
4784             {
4785                 ms = in_over_1x64 (ms,
4786                                   mask_x00ff,
4787                                   expand_alpha_rev_1x64 (unpack_32_1x64 (m)),
4788                                   unpack_32_1x64 (d));
4789             }
4790
4791             *dst++ = pack_1x64_32 (ms);
4792             w--;
4793         }
4794
4795         /* call prefetch hint to optimize cache load*/
4796         cache_prefetch ((__m128i*)src);
4797         cache_prefetch ((__m128i*)dst);
4798         cache_prefetch ((__m128i*)mask);
4799
4800         while (w >= 4)
4801         {
4802             /* fill cache line with next memory */
4803             cache_prefetch_next ((__m128i*)src);
4804             cache_prefetch_next ((__m128i*)dst);
4805             cache_prefetch_next ((__m128i*)mask);
4806
4807             m = *(uint32_t*) mask;
4808             xmm_src = _mm_or_si128 (load_128_unaligned ((__m128i*)src), mask_ff000000);
4809
4810             if (m == 0xffffffff)
4811             {
4812                 save_128_aligned ((__m128i*)dst, xmm_src);
4813             }
4814             else
4815             {
4816                 xmm_dst = load_128_aligned ((__m128i*)dst);
4817
4818                 xmm_mask = _mm_unpacklo_epi16 (unpack_32_1x128 (m), _mm_setzero_si128());
4819
4820                 unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
4821                 unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
4822                 unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
4823
4824                 expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
4825
4826                 in_over_2x128 (xmm_src_lo, xmm_src_hi, mask_00ff, mask_00ff, xmm_mask_lo, xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
4827
4828                 save_128_aligned( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
4829             }
4830
4831             src += 4;
4832             dst += 4;
4833             mask += 4;
4834             w -= 4;
4835         }
4836
4837         while (w)
4838         {
4839             m = (uint32_t) *mask++;
4840
4841             if (m)
4842             {
4843                 s = 0xff000000 | *src;
4844
4845                 if (m == 0xff)
4846                 {
4847                     *dst = s;
4848                 }
4849                 else
4850                 {
4851                     d = *dst;
4852
4853                     *dst = pack_1x64_32 (in_over_1x64 (unpack_32_1x64 (s),
4854                                                       mask_x00ff,
4855                                                       expand_alpha_rev_1x64 (unpack_32_1x64 (m)),
4856                                                       unpack_32_1x64 (d)));
4857                 }
4858
4859             }
4860
4861             src++;
4862             dst++;
4863             w--;
4864         }
4865     }
4866
4867     _mm_empty();
4868 }
4869 #endif
4870
4871 static const pixman_fast_path_t sse2_fast_paths[] =
4872 {
4873     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   sse2_composite_over_n_8_0565,     0 },
4874     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   sse2_composite_over_n_8_0565,     0 },
4875     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_over_n_8888,           0 },
4876     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_over_n_8888,           0 },
4877     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   sse2_composite_over_n_0565,           0 },
4878     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_over_8888_8888,          0 },
4879     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_over_8888_8888,          0 },
4880     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_composite_over_8888_8888,          0 },
4881     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_over_8888_8888,          0 },
4882     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   sse2_composite_over_8888_0565,          0 },
4883     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   sse2_composite_over_8888_0565,          0 },
4884     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_n_8_8888,     0 },
4885     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_n_8_8888,     0 },
4886     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_over_n_8_8888,     0 },
4887     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_n_8_8888,     0 },
4888 #if 0
4889     /* FIXME: This code are buggy in MMX version, now the bug was translated to SSE2 version */
4890     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_x888_8_8888,       0 },
4891     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_x888_8_8888,       0 },
4892     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_x888_8_8888,       0 },
4893     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_x888_8_8888,       0 },
4894 #endif
4895     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_x888_n_8888,        NEED_SOLID_MASK },
4896     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_x888_n_8888,        NEED_SOLID_MASK },
4897     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_over_x888_n_8888,        NEED_SOLID_MASK },
4898     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_x888_n_8888,        NEED_SOLID_MASK },
4899     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_8888_n_8888,        NEED_SOLID_MASK },
4900     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_8888_n_8888,        NEED_SOLID_MASK },
4901     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_over_8888_n_8888,        NEED_SOLID_MASK },
4902     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_8888_n_8888,        NEED_SOLID_MASK },
4903     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
4904     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
4905     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
4906     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
4907     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   sse2_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
4908     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   sse2_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
4909     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_composite_over_pixbuf_8888,     NEED_PIXBUF },
4910     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, sse2_composite_over_pixbuf_8888,     NEED_PIXBUF },
4911     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_composite_over_pixbuf_8888,     NEED_PIXBUF },
4912     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, sse2_composite_over_pixbuf_8888,     NEED_PIXBUF },
4913     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, sse2_composite_over_pixbuf_8888,     NEED_PIXBUF },
4914     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_composite_over_pixbuf_8888,     NEED_PIXBUF },
4915     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, sse2_composite_over_pixbuf_8888,     NEED_PIXBUF },
4916     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_composite_over_pixbuf_8888,     NEED_PIXBUF },
4917     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   sse2_composite_over_pixbuf_0565,     NEED_PIXBUF },
4918     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5,   sse2_composite_over_pixbuf_0565,     NEED_PIXBUF },
4919     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5,   sse2_composite_over_pixbuf_0565,     NEED_PIXBUF },
4920     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   sse2_composite_over_pixbuf_0565,     NEED_PIXBUF },
4921     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_copy_area,               0 },
4922     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_copy_area,               0 },
4923
4924     { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       sse2_composite_add_8000_8000,       0 },
4925     { PIXMAN_OP_ADD,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_add_8888_8888,       0 },
4926     { PIXMAN_OP_ADD,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_composite_add_8888_8888,       0 },
4927     { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       sse2_composite_add_8888_8_8,        0 },
4928
4929     { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_src_n_8_8888,  0 },
4930     { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_src_n_8_8888,  0 },
4931     { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_src_n_8_8888,  0 },
4932     { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_src_n_8_8888,  0 },
4933     { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_copy_area,               0 },
4934     { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_composite_copy_area,               0 },
4935     { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_copy_area,              0 },
4936     { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_copy_area,              0 },
4937     { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_copy_area,               0 },
4938     { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_copy_area,               0 },
4939     { PIXMAN_OP_SRC, PIXMAN_r5g6b5,    PIXMAN_null,     PIXMAN_r5g6b5,   sse2_composite_copy_area,               0 },
4940     { PIXMAN_OP_SRC, PIXMAN_b5g6r5,    PIXMAN_null,     PIXMAN_b5g6r5,   sse2_composite_copy_area,               0 },
4941
4942     { PIXMAN_OP_IN,  PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       sse2_composite_in_8_8,                 0 },
4943     { PIXMAN_OP_IN,  PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8,       sse2_composite_in_n_8_8,               0 },
4944
4945     { PIXMAN_OP_NONE },
4946 };
4947
4948 /*
4949  * Work around GCC bug causing crashes in Mozilla with SSE2
4950  * 
4951  * When using -msse, gcc generates movdqa instructions assuming that
4952  * the stack is 16 byte aligned. Unfortunately some applications, such
4953  * as Mozilla and Mono, end up aligning the stack to 4 bytes, which
4954  * causes the movdqa instructions to fail.
4955  *
4956  * The __force_align_arg_pointer__ makes gcc generate a prologue that
4957  * realigns the stack pointer to 16 bytes.
4958  *
4959  * On x86-64 this is not necessary because the standard ABI already
4960  * calls for a 16 byte aligned stack.
4961  *
4962  * See https://bugs.freedesktop.org/show_bug.cgi?id=15693
4963  */
4964 #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
4965 __attribute__((__force_align_arg_pointer__))
4966 #endif
4967 static void
4968 sse2_composite (pixman_implementation_t *imp,
4969                 pixman_op_t     op,
4970                 pixman_image_t *src,
4971                 pixman_image_t *mask,
4972                 pixman_image_t *dest,
4973                 int32_t         src_x,
4974                 int32_t         src_y,
4975                 int32_t         mask_x,
4976                 int32_t         mask_y,
4977                 int32_t         dest_x,
4978                 int32_t         dest_y,
4979                 int32_t        width,
4980                 int32_t        height)
4981 {
4982     if (_pixman_run_fast_path (sse2_fast_paths, imp,
4983                                op, src, mask, dest,
4984                                src_x, src_y,
4985                                mask_x, mask_y,
4986                                dest_x, dest_y,
4987                                width, height))
4988     {
4989         return;
4990     }
4991
4992     _pixman_implementation_composite (imp->delegate, op,
4993                                       src, mask, dest,
4994                                       src_x, src_y,
4995                                       mask_x, mask_y,
4996                                       dest_x, dest_y,
4997                                       width, height);
4998 }
4999
5000 #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
5001 __attribute__((__force_align_arg_pointer__))
5002 #endif
5003 static pixman_bool_t
5004 sse2_blt (pixman_implementation_t *imp,
5005           uint32_t *src_bits,
5006           uint32_t *dst_bits,
5007           int src_stride,
5008           int dst_stride,
5009           int src_bpp,
5010           int dst_bpp,
5011           int src_x, int src_y,
5012           int dst_x, int dst_y,
5013           int width, int height)
5014 {
5015     if (!pixman_blt_sse2 (
5016             src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
5017             src_x, src_y, dst_x, dst_y, width, height))
5018
5019     {
5020         return _pixman_implementation_blt (
5021             imp->delegate,
5022             src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
5023             src_x, src_y, dst_x, dst_y, width, height);
5024     }
5025
5026     return TRUE;
5027 }
5028
5029 #if defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
5030 __attribute__((__force_align_arg_pointer__))
5031 #endif
5032 static pixman_bool_t
5033 sse2_fill (pixman_implementation_t *imp,
5034            uint32_t *bits,
5035            int stride,
5036            int bpp,
5037            int x,
5038            int y,
5039            int width,
5040            int height,
5041            uint32_t xor)
5042 {
5043     if (!pixman_fill_sse2 (bits, stride, bpp, x, y, width, height, xor))
5044     {
5045         return _pixman_implementation_fill (
5046             imp->delegate, bits, stride, bpp, x, y, width, height, xor);
5047     }
5048
5049     return TRUE;
5050 }
5051
5052 pixman_implementation_t *
5053 _pixman_implementation_create_sse2 (void)
5054 {
5055     pixman_implementation_t *mmx = _pixman_implementation_create_mmx ();
5056     pixman_implementation_t *imp = _pixman_implementation_create (mmx);
5057
5058     /* SSE2 constants */
5059     mask_565_r  = create_mask_2x32_128 (0x00f80000, 0x00f80000);
5060     mask_565_g1 = create_mask_2x32_128 (0x00070000, 0x00070000);
5061     mask_565_g2 = create_mask_2x32_128 (0x000000e0, 0x000000e0);
5062     mask_565_b  = create_mask_2x32_128 (0x0000001f, 0x0000001f);
5063     mask_red   = create_mask_2x32_128 (0x00f80000, 0x00f80000);
5064     mask_green = create_mask_2x32_128 (0x0000fc00, 0x0000fc00);
5065     mask_blue  = create_mask_2x32_128 (0x000000f8, 0x000000f8);
5066     mask_565_fix_rb = create_mask_2x32_128 (0x00e000e0, 0x00e000e0);
5067     mask_565_fix_g = create_mask_2x32_128  (0x0000c000, 0x0000c000);
5068     mask_0080 = create_mask_16_128 (0x0080);
5069     mask_00ff = create_mask_16_128 (0x00ff);
5070     mask_0101 = create_mask_16_128 (0x0101);
5071     mask_ffff = create_mask_16_128 (0xffff);
5072     mask_ff000000 = create_mask_2x32_128 (0xff000000, 0xff000000);
5073     mask_alpha = create_mask_2x32_128 (0x00ff0000, 0x00000000);
5074     
5075     /* MMX constants */
5076     mask_x565_rgb = create_mask_2x32_64 (0x000001f0, 0x003f001f);
5077     mask_x565_unpack = create_mask_2x32_64 (0x00000084, 0x04100840);
5078     
5079     mask_x0080 = create_mask_16_64 (0x0080);
5080     mask_x00ff = create_mask_16_64 (0x00ff);
5081     mask_x0101 = create_mask_16_64 (0x0101);
5082     mask_x_alpha = create_mask_2x32_64 (0x00ff0000, 0x00000000);
5083
5084     _mm_empty();
5085
5086     /* Set up function pointers */
5087     
5088     /* SSE code patch for fbcompose.c */
5089     imp->combine_32[PIXMAN_OP_OVER] = sse2combine_over_u;
5090     imp->combine_32[PIXMAN_OP_OVER_REVERSE] = sse2combine_over_reverse_u;
5091     imp->combine_32[PIXMAN_OP_IN] = sse2combine_in_u;
5092     imp->combine_32[PIXMAN_OP_IN_REVERSE] = sse2combine_in_reverse_u;
5093     imp->combine_32[PIXMAN_OP_OUT] = sse2combine_out_u;
5094     imp->combine_32[PIXMAN_OP_OUT_REVERSE] = sse2combine_out_reverse_u;
5095     imp->combine_32[PIXMAN_OP_ATOP] = sse2combine_atop_u;
5096     imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = sse2combine_atop_reverse_u;
5097     imp->combine_32[PIXMAN_OP_XOR] = sse2combine_xor_u;
5098     imp->combine_32[PIXMAN_OP_ADD] = sse2combine_add_u;
5099     
5100     imp->combine_32[PIXMAN_OP_SATURATE] = sse2combine_saturate_u;
5101     
5102     imp->combine_32_ca[PIXMAN_OP_SRC] = sse2combine_src_c;
5103     imp->combine_32_ca[PIXMAN_OP_OVER] = sse2combine_over_c;
5104     imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = sse2combine_over_reverse_c;
5105     imp->combine_32_ca[PIXMAN_OP_IN] = sse2combine_in_c;
5106     imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = sse2combine_in_reverse_c;
5107     imp->combine_32_ca[PIXMAN_OP_OUT] = sse2combine_out_c;
5108     imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = sse2combine_out_reverse_c;
5109     imp->combine_32_ca[PIXMAN_OP_ATOP] = sse2combine_atop_c;
5110     imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = sse2combine_atop_reverse_c;
5111     imp->combine_32_ca[PIXMAN_OP_XOR] = sse2combine_xor_c;
5112     imp->combine_32_ca[PIXMAN_OP_ADD] = sse2combine_add_c;
5113     
5114     imp->composite = sse2_composite;
5115     imp->blt = sse2_blt;
5116     imp->fill = sse2_fill;
5117     
5118     return imp;
5119 }
5120
5121 #endif /* USE_SSE2 */