2 * Copyright 2014 Google Inc.
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
8 #include "SkColorPriv.h"
9 #include "SkColor_opts_SSE2.h"
10 #include "SkMathPriv.h"
11 #include "SkMath_opts_SSE2.h"
12 #include "SkXfermode.h"
13 #include "SkXfermode_opts_SSE2.h"
14 #include "SkXfermode_proccoeff.h"
16 ////////////////////////////////////////////////////////////////////////////////
17 // 4 pixels SSE2 version functions
18 ////////////////////////////////////////////////////////////////////////////////
20 static inline __m128i SkDiv255Round_SSE2(const __m128i& a) {
21 __m128i prod = _mm_add_epi32(a, _mm_set1_epi32(128)); // prod += 128;
22 prod = _mm_add_epi32(prod, _mm_srli_epi32(prod, 8)); // prod + (prod >> 8)
23 prod = _mm_srli_epi32(prod, 8); // >> 8
28 static inline __m128i saturated_add_SSE2(const __m128i& a, const __m128i& b) {
29 __m128i sum = _mm_add_epi32(a, b);
30 __m128i cmp = _mm_cmpgt_epi32(sum, _mm_set1_epi32(255));
32 sum = _mm_or_si128(_mm_and_si128(cmp, _mm_set1_epi32(255)),
33 _mm_andnot_si128(cmp, sum));
37 static inline __m128i clamp_signed_byte_SSE2(const __m128i& n) {
38 __m128i cmp1 = _mm_cmplt_epi32(n, _mm_setzero_si128());
39 __m128i cmp2 = _mm_cmpgt_epi32(n, _mm_set1_epi32(255));
40 __m128i ret = _mm_and_si128(cmp2, _mm_set1_epi32(255));
42 __m128i cmp = _mm_or_si128(cmp1, cmp2);
43 ret = _mm_or_si128(_mm_and_si128(cmp, ret), _mm_andnot_si128(cmp, n));
48 static inline __m128i clamp_div255round_SSE2(const __m128i& prod) {
50 __m128i cmp1 = _mm_cmpgt_epi32(prod, _mm_setzero_si128());
52 __m128i cmp2 = _mm_cmplt_epi32(prod, _mm_set1_epi32(255*255));
54 __m128i ret = _mm_setzero_si128();
56 // if value >= 255*255, value = 255
57 ret = _mm_andnot_si128(cmp2, _mm_set1_epi32(255));
59 __m128i div = SkDiv255Round_SSE2(prod);
61 // test if > 0 && < 255*255
62 __m128i cmp = _mm_and_si128(cmp1, cmp2);
64 ret = _mm_or_si128(_mm_and_si128(cmp, div), _mm_andnot_si128(cmp, ret));
69 static __m128i srcover_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
70 __m128i isa = _mm_sub_epi32(_mm_set1_epi32(256), SkGetPackedA32_SSE2(src));
71 return _mm_add_epi32(src, SkAlphaMulQ_SSE2(dst, isa));
74 static __m128i dstover_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
75 __m128i ida = _mm_sub_epi32(_mm_set1_epi32(256), SkGetPackedA32_SSE2(dst));
76 return _mm_add_epi32(dst, SkAlphaMulQ_SSE2(src, ida));
79 static __m128i srcin_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
80 __m128i da = SkGetPackedA32_SSE2(dst);
81 return SkAlphaMulQ_SSE2(src, SkAlpha255To256_SSE2(da));
84 static __m128i dstin_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
85 __m128i sa = SkGetPackedA32_SSE2(src);
86 return SkAlphaMulQ_SSE2(dst, SkAlpha255To256_SSE2(sa));
89 static __m128i srcout_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
90 __m128i ida = _mm_sub_epi32(_mm_set1_epi32(256), SkGetPackedA32_SSE2(dst));
91 return SkAlphaMulQ_SSE2(src, ida);
94 static __m128i dstout_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
95 __m128i isa = _mm_sub_epi32(_mm_set1_epi32(256), SkGetPackedA32_SSE2(src));
96 return SkAlphaMulQ_SSE2(dst, isa);
99 static __m128i srcatop_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
100 __m128i sa = SkGetPackedA32_SSE2(src);
101 __m128i da = SkGetPackedA32_SSE2(dst);
102 __m128i isa = _mm_sub_epi32(_mm_set1_epi32(255), sa);
106 __m128i r1 = SkAlphaMulAlpha_SSE2(da, SkGetPackedR32_SSE2(src));
107 __m128i r2 = SkAlphaMulAlpha_SSE2(isa, SkGetPackedR32_SSE2(dst));
108 __m128i r = _mm_add_epi32(r1, r2);
110 __m128i g1 = SkAlphaMulAlpha_SSE2(da, SkGetPackedG32_SSE2(src));
111 __m128i g2 = SkAlphaMulAlpha_SSE2(isa, SkGetPackedG32_SSE2(dst));
112 __m128i g = _mm_add_epi32(g1, g2);
114 __m128i b1 = SkAlphaMulAlpha_SSE2(da, SkGetPackedB32_SSE2(src));
115 __m128i b2 = SkAlphaMulAlpha_SSE2(isa, SkGetPackedB32_SSE2(dst));
116 __m128i b = _mm_add_epi32(b1, b2);
118 return SkPackARGB32_SSE2(a, r, g, b);
121 static __m128i dstatop_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
122 __m128i sa = SkGetPackedA32_SSE2(src);
123 __m128i da = SkGetPackedA32_SSE2(dst);
124 __m128i ida = _mm_sub_epi32(_mm_set1_epi32(255), da);
128 __m128i r1 = SkAlphaMulAlpha_SSE2(ida, SkGetPackedR32_SSE2(src));
129 __m128i r2 = SkAlphaMulAlpha_SSE2(sa, SkGetPackedR32_SSE2(dst));
130 __m128i r = _mm_add_epi32(r1, r2);
132 __m128i g1 = SkAlphaMulAlpha_SSE2(ida, SkGetPackedG32_SSE2(src));
133 __m128i g2 = SkAlphaMulAlpha_SSE2(sa, SkGetPackedG32_SSE2(dst));
134 __m128i g = _mm_add_epi32(g1, g2);
136 __m128i b1 = SkAlphaMulAlpha_SSE2(ida, SkGetPackedB32_SSE2(src));
137 __m128i b2 = SkAlphaMulAlpha_SSE2(sa, SkGetPackedB32_SSE2(dst));
138 __m128i b = _mm_add_epi32(b1, b2);
140 return SkPackARGB32_SSE2(a, r, g, b);
143 static __m128i xor_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
144 __m128i sa = SkGetPackedA32_SSE2(src);
145 __m128i da = SkGetPackedA32_SSE2(dst);
146 __m128i isa = _mm_sub_epi32(_mm_set1_epi32(255), sa);
147 __m128i ida = _mm_sub_epi32(_mm_set1_epi32(255), da);
149 __m128i a1 = _mm_add_epi32(sa, da);
150 __m128i a2 = SkAlphaMulAlpha_SSE2(sa, da);
151 a2 = _mm_slli_epi32(a2, 1);
152 __m128i a = _mm_sub_epi32(a1, a2);
154 __m128i r1 = SkAlphaMulAlpha_SSE2(ida, SkGetPackedR32_SSE2(src));
155 __m128i r2 = SkAlphaMulAlpha_SSE2(isa, SkGetPackedR32_SSE2(dst));
156 __m128i r = _mm_add_epi32(r1, r2);
158 __m128i g1 = SkAlphaMulAlpha_SSE2(ida, SkGetPackedG32_SSE2(src));
159 __m128i g2 = SkAlphaMulAlpha_SSE2(isa, SkGetPackedG32_SSE2(dst));
160 __m128i g = _mm_add_epi32(g1, g2);
162 __m128i b1 = SkAlphaMulAlpha_SSE2(ida, SkGetPackedB32_SSE2(src));
163 __m128i b2 = SkAlphaMulAlpha_SSE2(isa, SkGetPackedB32_SSE2(dst));
164 __m128i b = _mm_add_epi32(b1, b2);
166 return SkPackARGB32_SSE2(a, r, g, b);
169 static __m128i plus_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
170 __m128i b = saturated_add_SSE2(SkGetPackedB32_SSE2(src),
171 SkGetPackedB32_SSE2(dst));
172 __m128i g = saturated_add_SSE2(SkGetPackedG32_SSE2(src),
173 SkGetPackedG32_SSE2(dst));
174 __m128i r = saturated_add_SSE2(SkGetPackedR32_SSE2(src),
175 SkGetPackedR32_SSE2(dst));
176 __m128i a = saturated_add_SSE2(SkGetPackedA32_SSE2(src),
177 SkGetPackedA32_SSE2(dst));
178 return SkPackARGB32_SSE2(a, r, g, b);
181 static __m128i modulate_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
182 __m128i a = SkAlphaMulAlpha_SSE2(SkGetPackedA32_SSE2(src),
183 SkGetPackedA32_SSE2(dst));
184 __m128i r = SkAlphaMulAlpha_SSE2(SkGetPackedR32_SSE2(src),
185 SkGetPackedR32_SSE2(dst));
186 __m128i g = SkAlphaMulAlpha_SSE2(SkGetPackedG32_SSE2(src),
187 SkGetPackedG32_SSE2(dst));
188 __m128i b = SkAlphaMulAlpha_SSE2(SkGetPackedB32_SSE2(src),
189 SkGetPackedB32_SSE2(dst));
190 return SkPackARGB32_SSE2(a, r, g, b);
193 static inline __m128i SkMin32_SSE2(const __m128i& a, const __m128i& b) {
194 __m128i cmp = _mm_cmplt_epi32(a, b);
195 return _mm_or_si128(_mm_and_si128(cmp, a), _mm_andnot_si128(cmp, b));
198 static inline __m128i srcover_byte_SSE2(const __m128i& a, const __m128i& b) {
199 // a + b - SkAlphaMulAlpha(a, b);
200 return _mm_sub_epi32(_mm_add_epi32(a, b), SkAlphaMulAlpha_SSE2(a, b));
204 static inline __m128i blendfunc_multiply_byte_SSE2(const __m128i& sc, const __m128i& dc,
205 const __m128i& sa, const __m128i& da) {
207 __m128i ret1 = _mm_sub_epi32(_mm_set1_epi32(255), da);
208 ret1 = _mm_mullo_epi16(sc, ret1);
211 __m128i ret2 = _mm_sub_epi32(_mm_set1_epi32(255), sa);
212 ret2 = _mm_mullo_epi16(dc, ret2);
215 __m128i ret3 = _mm_mullo_epi16(sc, dc);
217 __m128i ret = _mm_add_epi32(ret1, ret2);
218 ret = _mm_add_epi32(ret, ret3);
220 return clamp_div255round_SSE2(ret);
223 static __m128i multiply_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
224 __m128i sa = SkGetPackedA32_SSE2(src);
225 __m128i da = SkGetPackedA32_SSE2(dst);
226 __m128i a = srcover_byte_SSE2(sa, da);
228 __m128i sr = SkGetPackedR32_SSE2(src);
229 __m128i dr = SkGetPackedR32_SSE2(dst);
230 __m128i r = blendfunc_multiply_byte_SSE2(sr, dr, sa, da);
232 __m128i sg = SkGetPackedG32_SSE2(src);
233 __m128i dg = SkGetPackedG32_SSE2(dst);
234 __m128i g = blendfunc_multiply_byte_SSE2(sg, dg, sa, da);
237 __m128i sb = SkGetPackedB32_SSE2(src);
238 __m128i db = SkGetPackedB32_SSE2(dst);
239 __m128i b = blendfunc_multiply_byte_SSE2(sb, db, sa, da);
241 return SkPackARGB32_SSE2(a, r, g, b);
244 static __m128i screen_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
245 __m128i a = srcover_byte_SSE2(SkGetPackedA32_SSE2(src),
246 SkGetPackedA32_SSE2(dst));
247 __m128i r = srcover_byte_SSE2(SkGetPackedR32_SSE2(src),
248 SkGetPackedR32_SSE2(dst));
249 __m128i g = srcover_byte_SSE2(SkGetPackedG32_SSE2(src),
250 SkGetPackedG32_SSE2(dst));
251 __m128i b = srcover_byte_SSE2(SkGetPackedB32_SSE2(src),
252 SkGetPackedB32_SSE2(dst));
253 return SkPackARGB32_SSE2(a, r, g, b);
256 // Portable version overlay_byte() is in SkXfermode.cpp.
257 static inline __m128i overlay_byte_SSE2(const __m128i& sc, const __m128i& dc,
258 const __m128i& sa, const __m128i& da) {
259 __m128i ida = _mm_sub_epi32(_mm_set1_epi32(255), da);
260 __m128i tmp1 = _mm_mullo_epi16(sc, ida);
261 __m128i isa = _mm_sub_epi32(_mm_set1_epi32(255), sa);
262 __m128i tmp2 = _mm_mullo_epi16(dc, isa);
263 __m128i tmp = _mm_add_epi32(tmp1, tmp2);
265 __m128i cmp = _mm_cmpgt_epi32(_mm_slli_epi32(dc, 1), da);
266 __m128i rc1 = _mm_slli_epi32(sc, 1); // 2 * sc
267 rc1 = Multiply32_SSE2(rc1, dc); // *dc
269 __m128i rc2 = _mm_mullo_epi16(sa, da); // sa * da
270 __m128i tmp3 = _mm_slli_epi32(_mm_sub_epi32(da, dc), 1); // 2 * (da - dc)
271 tmp3 = Multiply32_SSE2(tmp3, _mm_sub_epi32(sa, sc)); // * (sa - sc)
272 rc2 = _mm_sub_epi32(rc2, tmp3);
274 __m128i rc = _mm_or_si128(_mm_andnot_si128(cmp, rc1),
275 _mm_and_si128(cmp, rc2));
276 return clamp_div255round_SSE2(_mm_add_epi32(rc, tmp));
279 static __m128i overlay_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
280 __m128i sa = SkGetPackedA32_SSE2(src);
281 __m128i da = SkGetPackedA32_SSE2(dst);
283 __m128i a = srcover_byte_SSE2(sa, da);
284 __m128i r = overlay_byte_SSE2(SkGetPackedR32_SSE2(src),
285 SkGetPackedR32_SSE2(dst), sa, da);
286 __m128i g = overlay_byte_SSE2(SkGetPackedG32_SSE2(src),
287 SkGetPackedG32_SSE2(dst), sa, da);
288 __m128i b = overlay_byte_SSE2(SkGetPackedB32_SSE2(src),
289 SkGetPackedB32_SSE2(dst), sa, da);
290 return SkPackARGB32_SSE2(a, r, g, b);
293 static inline __m128i darken_byte_SSE2(const __m128i& sc, const __m128i& dc,
294 const __m128i& sa, const __m128i& da) {
295 __m128i sd = _mm_mullo_epi16(sc, da);
296 __m128i ds = _mm_mullo_epi16(dc, sa);
298 __m128i cmp = _mm_cmplt_epi32(sd, ds);
300 __m128i tmp = _mm_add_epi32(sc, dc);
301 __m128i ret1 = _mm_sub_epi32(tmp, SkDiv255Round_SSE2(ds));
302 __m128i ret2 = _mm_sub_epi32(tmp, SkDiv255Round_SSE2(sd));
303 __m128i ret = _mm_or_si128(_mm_and_si128(cmp, ret1),
304 _mm_andnot_si128(cmp, ret2));
308 static __m128i darken_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
309 __m128i sa = SkGetPackedA32_SSE2(src);
310 __m128i da = SkGetPackedA32_SSE2(dst);
312 __m128i a = srcover_byte_SSE2(sa, da);
313 __m128i r = darken_byte_SSE2(SkGetPackedR32_SSE2(src),
314 SkGetPackedR32_SSE2(dst), sa, da);
315 __m128i g = darken_byte_SSE2(SkGetPackedG32_SSE2(src),
316 SkGetPackedG32_SSE2(dst), sa, da);
317 __m128i b = darken_byte_SSE2(SkGetPackedB32_SSE2(src),
318 SkGetPackedB32_SSE2(dst), sa, da);
319 return SkPackARGB32_SSE2(a, r, g, b);
322 static inline __m128i lighten_byte_SSE2(const __m128i& sc, const __m128i& dc,
323 const __m128i& sa, const __m128i& da) {
324 __m128i sd = _mm_mullo_epi16(sc, da);
325 __m128i ds = _mm_mullo_epi16(dc, sa);
327 __m128i cmp = _mm_cmpgt_epi32(sd, ds);
329 __m128i tmp = _mm_add_epi32(sc, dc);
330 __m128i ret1 = _mm_sub_epi32(tmp, SkDiv255Round_SSE2(ds));
331 __m128i ret2 = _mm_sub_epi32(tmp, SkDiv255Round_SSE2(sd));
332 __m128i ret = _mm_or_si128(_mm_and_si128(cmp, ret1),
333 _mm_andnot_si128(cmp, ret2));
337 static __m128i lighten_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
338 __m128i sa = SkGetPackedA32_SSE2(src);
339 __m128i da = SkGetPackedA32_SSE2(dst);
341 __m128i a = srcover_byte_SSE2(sa, da);
342 __m128i r = lighten_byte_SSE2(SkGetPackedR32_SSE2(src),
343 SkGetPackedR32_SSE2(dst), sa, da);
344 __m128i g = lighten_byte_SSE2(SkGetPackedG32_SSE2(src),
345 SkGetPackedG32_SSE2(dst), sa, da);
346 __m128i b = lighten_byte_SSE2(SkGetPackedB32_SSE2(src),
347 SkGetPackedB32_SSE2(dst), sa, da);
348 return SkPackARGB32_SSE2(a, r, g, b);
351 static inline __m128i colordodge_byte_SSE2(const __m128i& sc, const __m128i& dc,
352 const __m128i& sa, const __m128i& da) {
353 __m128i diff = _mm_sub_epi32(sa, sc);
354 __m128i ida = _mm_sub_epi32(_mm_set1_epi32(255), da);
355 __m128i isa = _mm_sub_epi32(_mm_set1_epi32(255), sa);
358 __m128i cmp1 = _mm_cmpeq_epi32(dc, _mm_setzero_si128());
359 __m128i rc1 = _mm_and_si128(cmp1, SkAlphaMulAlpha_SSE2(sc, ida));
361 // else if (0 == diff)
362 __m128i cmp2 = _mm_cmpeq_epi32(diff, _mm_setzero_si128());
363 __m128i cmp = _mm_andnot_si128(cmp1, cmp2);
364 __m128i tmp1 = _mm_mullo_epi16(sa, da);
365 __m128i tmp2 = _mm_mullo_epi16(sc, ida);
366 __m128i tmp3 = _mm_mullo_epi16(dc, isa);
367 __m128i rc2 = _mm_add_epi32(tmp1, tmp2);
368 rc2 = _mm_add_epi32(rc2, tmp3);
369 rc2 = clamp_div255round_SSE2(rc2);
370 rc2 = _mm_and_si128(cmp, rc2);
373 __m128i cmp3 = _mm_or_si128(cmp1, cmp2);
374 __m128i value = _mm_mullo_epi16(dc, sa);
375 diff = shim_mm_div_epi32(value, diff);
377 __m128i tmp4 = SkMin32_SSE2(da, diff);
378 tmp4 = Multiply32_SSE2(sa, tmp4);
379 __m128i rc3 = _mm_add_epi32(tmp4, tmp2);
380 rc3 = _mm_add_epi32(rc3, tmp3);
381 rc3 = clamp_div255round_SSE2(rc3);
382 rc3 = _mm_andnot_si128(cmp3, rc3);
384 __m128i rc = _mm_or_si128(rc1, rc2);
385 rc = _mm_or_si128(rc, rc3);
390 static __m128i colordodge_modeproc_SSE2(const __m128i& src,
391 const __m128i& dst) {
392 __m128i sa = SkGetPackedA32_SSE2(src);
393 __m128i da = SkGetPackedA32_SSE2(dst);
395 __m128i a = srcover_byte_SSE2(sa, da);
396 __m128i r = colordodge_byte_SSE2(SkGetPackedR32_SSE2(src),
397 SkGetPackedR32_SSE2(dst), sa, da);
398 __m128i g = colordodge_byte_SSE2(SkGetPackedG32_SSE2(src),
399 SkGetPackedG32_SSE2(dst), sa, da);
400 __m128i b = colordodge_byte_SSE2(SkGetPackedB32_SSE2(src),
401 SkGetPackedB32_SSE2(dst), sa, da);
402 return SkPackARGB32_SSE2(a, r, g, b);
405 static inline __m128i colorburn_byte_SSE2(const __m128i& sc, const __m128i& dc,
406 const __m128i& sa, const __m128i& da) {
407 __m128i ida = _mm_sub_epi32(_mm_set1_epi32(255), da);
408 __m128i isa = _mm_sub_epi32(_mm_set1_epi32(255), sa);
411 __m128i cmp1 = _mm_cmpeq_epi32(dc, da);
412 __m128i tmp1 = _mm_mullo_epi16(sa, da);
413 __m128i tmp2 = _mm_mullo_epi16(sc, ida);
414 __m128i tmp3 = _mm_mullo_epi16(dc, isa);
415 __m128i rc1 = _mm_add_epi32(tmp1, tmp2);
416 rc1 = _mm_add_epi32(rc1, tmp3);
417 rc1 = clamp_div255round_SSE2(rc1);
418 rc1 = _mm_and_si128(cmp1, rc1);
421 __m128i cmp2 = _mm_cmpeq_epi32(sc, _mm_setzero_si128());
422 __m128i rc2 = SkAlphaMulAlpha_SSE2(dc, isa);
423 __m128i cmp = _mm_andnot_si128(cmp1, cmp2);
424 rc2 = _mm_and_si128(cmp, rc2);
427 __m128i cmp3 = _mm_or_si128(cmp1, cmp2);
428 __m128i tmp4 = _mm_sub_epi32(da, dc);
429 tmp4 = Multiply32_SSE2(tmp4, sa);
430 tmp4 = shim_mm_div_epi32(tmp4, sc);
432 __m128i tmp5 = _mm_sub_epi32(da, SkMin32_SSE2(da, tmp4));
433 tmp5 = Multiply32_SSE2(sa, tmp5);
434 __m128i rc3 = _mm_add_epi32(tmp5, tmp2);
435 rc3 = _mm_add_epi32(rc3, tmp3);
436 rc3 = clamp_div255round_SSE2(rc3);
437 rc3 = _mm_andnot_si128(cmp3, rc3);
439 __m128i rc = _mm_or_si128(rc1, rc2);
440 rc = _mm_or_si128(rc, rc3);
445 static __m128i colorburn_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
446 __m128i sa = SkGetPackedA32_SSE2(src);
447 __m128i da = SkGetPackedA32_SSE2(dst);
449 __m128i a = srcover_byte_SSE2(sa, da);
450 __m128i r = colorburn_byte_SSE2(SkGetPackedR32_SSE2(src),
451 SkGetPackedR32_SSE2(dst), sa, da);
452 __m128i g = colorburn_byte_SSE2(SkGetPackedG32_SSE2(src),
453 SkGetPackedG32_SSE2(dst), sa, da);
454 __m128i b = colorburn_byte_SSE2(SkGetPackedB32_SSE2(src),
455 SkGetPackedB32_SSE2(dst), sa, da);
456 return SkPackARGB32_SSE2(a, r, g, b);
459 static inline __m128i hardlight_byte_SSE2(const __m128i& sc, const __m128i& dc,
460 const __m128i& sa, const __m128i& da) {
462 __m128i tmp1 = _mm_slli_epi32(sc, 1);
463 __m128i cmp1 = _mm_cmpgt_epi32(tmp1, sa);
464 __m128i rc1 = _mm_mullo_epi16(sc, dc); // sc * dc;
465 rc1 = _mm_slli_epi32(rc1, 1); // 2 * sc * dc
466 rc1 = _mm_andnot_si128(cmp1, rc1);
469 tmp1 = _mm_mullo_epi16(sa, da);
470 __m128i tmp2 = Multiply32_SSE2(_mm_sub_epi32(da, dc),
471 _mm_sub_epi32(sa, sc));
472 tmp2 = _mm_slli_epi32(tmp2, 1);
473 __m128i rc2 = _mm_sub_epi32(tmp1, tmp2);
474 rc2 = _mm_and_si128(cmp1, rc2);
476 __m128i rc = _mm_or_si128(rc1, rc2);
478 __m128i ida = _mm_sub_epi32(_mm_set1_epi32(255), da);
479 tmp1 = _mm_mullo_epi16(sc, ida);
480 __m128i isa = _mm_sub_epi32(_mm_set1_epi32(255), sa);
481 tmp2 = _mm_mullo_epi16(dc, isa);
482 rc = _mm_add_epi32(rc, tmp1);
483 rc = _mm_add_epi32(rc, tmp2);
484 return clamp_div255round_SSE2(rc);
487 static __m128i hardlight_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
488 __m128i sa = SkGetPackedA32_SSE2(src);
489 __m128i da = SkGetPackedA32_SSE2(dst);
491 __m128i a = srcover_byte_SSE2(sa, da);
492 __m128i r = hardlight_byte_SSE2(SkGetPackedR32_SSE2(src),
493 SkGetPackedR32_SSE2(dst), sa, da);
494 __m128i g = hardlight_byte_SSE2(SkGetPackedG32_SSE2(src),
495 SkGetPackedG32_SSE2(dst), sa, da);
496 __m128i b = hardlight_byte_SSE2(SkGetPackedB32_SSE2(src),
497 SkGetPackedB32_SSE2(dst), sa, da);
498 return SkPackARGB32_SSE2(a, r, g, b);
501 static __m128i sqrt_unit_byte_SSE2(const __m128i& n) {
502 return SkSqrtBits_SSE2(n, 15+4);
505 static inline __m128i softlight_byte_SSE2(const __m128i& sc, const __m128i& dc,
506 const __m128i& sa, const __m128i& da) {
507 __m128i tmp1, tmp2, tmp3;
509 // int m = da ? dc * 256 / da : 0;
510 __m128i cmp = _mm_cmpeq_epi32(da, _mm_setzero_si128());
511 __m128i m = _mm_slli_epi32(dc, 8);
512 __m128 x = _mm_cvtepi32_ps(m);
513 __m128 y = _mm_cvtepi32_ps(da);
514 m = _mm_cvttps_epi32(_mm_div_ps(x, y));
515 m = _mm_andnot_si128(cmp, m);
518 tmp1 = _mm_slli_epi32(sc, 1); // 2 * sc
519 __m128i cmp1 = _mm_cmpgt_epi32(tmp1, sa);
520 tmp1 = _mm_sub_epi32(tmp1, sa); // 2 * sc - sa
521 tmp2 = _mm_sub_epi32(_mm_set1_epi32(256), m); // 256 - m
522 tmp1 = Multiply32_SSE2(tmp1, tmp2);
523 tmp1 = _mm_srai_epi32(tmp1, 8);
524 tmp1 = _mm_add_epi32(sa, tmp1);
525 tmp1 = Multiply32_SSE2(dc, tmp1);
526 __m128i rc1 = _mm_andnot_si128(cmp1, tmp1);
528 // else if (4 * dc <= da)
529 tmp2 = _mm_slli_epi32(dc, 2); // dc * 4
530 __m128i cmp2 = _mm_cmpgt_epi32(tmp2, da);
531 __m128i i = _mm_slli_epi32(m, 2); // 4 * m
532 __m128i j = _mm_add_epi32(i, _mm_set1_epi32(256)); // 4 * m + 256
533 __m128i k = Multiply32_SSE2(i, j); // 4 * m * (4 * m + 256)
534 __m128i t = _mm_sub_epi32(m, _mm_set1_epi32(256)); // m - 256
535 i = Multiply32_SSE2(k, t); // 4 * m * (4 * m + 256) * (m - 256)
536 i = _mm_srai_epi32(i, 16); // >> 16
537 j = Multiply32_SSE2(_mm_set1_epi32(7), m); // 7 * m
538 tmp2 = _mm_add_epi32(i, j);
539 i = Multiply32_SSE2(dc, sa); // dc * sa
540 j = _mm_slli_epi32(sc, 1); // 2 * sc
541 j = _mm_sub_epi32(j, sa); // 2 * sc - sa
542 j = Multiply32_SSE2(da, j); // da * (2 * sc - sa)
543 tmp2 = Multiply32_SSE2(j, tmp2); // * tmp
544 tmp2 = _mm_srai_epi32(tmp2, 8); // >> 8
545 tmp2 = _mm_add_epi32(i, tmp2);
546 cmp = _mm_andnot_si128(cmp2, cmp1);
547 __m128i rc2 = _mm_and_si128(cmp, tmp2);
548 __m128i rc = _mm_or_si128(rc1, rc2);
551 tmp3 = sqrt_unit_byte_SSE2(m);
552 tmp3 = _mm_sub_epi32(tmp3, m);
553 tmp3 = Multiply32_SSE2(j, tmp3); // j = da * (2 * sc - sa)
554 tmp3 = _mm_srai_epi32(tmp3, 8);
555 tmp3 = _mm_add_epi32(i, tmp3); // i = dc * sa
556 cmp = _mm_and_si128(cmp1, cmp2);
557 __m128i rc3 = _mm_and_si128(cmp, tmp3);
558 rc = _mm_or_si128(rc, rc3);
560 tmp1 = _mm_sub_epi32(_mm_set1_epi32(255), da); // 255 - da
561 tmp1 = _mm_mullo_epi16(sc, tmp1);
562 tmp2 = _mm_sub_epi32(_mm_set1_epi32(255), sa); // 255 - sa
563 tmp2 = _mm_mullo_epi16(dc, tmp2);
564 rc = _mm_add_epi32(rc, tmp1);
565 rc = _mm_add_epi32(rc, tmp2);
566 return clamp_div255round_SSE2(rc);
569 static __m128i softlight_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
570 __m128i sa = SkGetPackedA32_SSE2(src);
571 __m128i da = SkGetPackedA32_SSE2(dst);
573 __m128i a = srcover_byte_SSE2(sa, da);
574 __m128i r = softlight_byte_SSE2(SkGetPackedR32_SSE2(src),
575 SkGetPackedR32_SSE2(dst), sa, da);
576 __m128i g = softlight_byte_SSE2(SkGetPackedG32_SSE2(src),
577 SkGetPackedG32_SSE2(dst), sa, da);
578 __m128i b = softlight_byte_SSE2(SkGetPackedB32_SSE2(src),
579 SkGetPackedB32_SSE2(dst), sa, da);
580 return SkPackARGB32_SSE2(a, r, g, b);
583 static inline __m128i difference_byte_SSE2(const __m128i& sc, const __m128i& dc,
584 const __m128i& sa, const __m128i& da) {
585 __m128i tmp1 = _mm_mullo_epi16(sc, da);
586 __m128i tmp2 = _mm_mullo_epi16(dc, sa);
587 __m128i tmp = SkMin32_SSE2(tmp1, tmp2);
589 __m128i ret1 = _mm_add_epi32(sc, dc);
590 __m128i ret2 = _mm_slli_epi32(SkDiv255Round_SSE2(tmp), 1);
591 __m128i ret = _mm_sub_epi32(ret1, ret2);
593 ret = clamp_signed_byte_SSE2(ret);
597 static __m128i difference_modeproc_SSE2(const __m128i& src,
598 const __m128i& dst) {
599 __m128i sa = SkGetPackedA32_SSE2(src);
600 __m128i da = SkGetPackedA32_SSE2(dst);
602 __m128i a = srcover_byte_SSE2(sa, da);
603 __m128i r = difference_byte_SSE2(SkGetPackedR32_SSE2(src),
604 SkGetPackedR32_SSE2(dst), sa, da);
605 __m128i g = difference_byte_SSE2(SkGetPackedG32_SSE2(src),
606 SkGetPackedG32_SSE2(dst), sa, da);
607 __m128i b = difference_byte_SSE2(SkGetPackedB32_SSE2(src),
608 SkGetPackedB32_SSE2(dst), sa, da);
609 return SkPackARGB32_SSE2(a, r, g, b);
612 static inline __m128i exclusion_byte_SSE2(const __m128i& sc, const __m128i& dc,
613 const __m128i&, __m128i&) {
614 __m128i tmp1 = _mm_mullo_epi16(_mm_set1_epi32(255), sc); // 255 * sc
615 __m128i tmp2 = _mm_mullo_epi16(_mm_set1_epi32(255), dc); // 255 * dc
616 tmp1 = _mm_add_epi32(tmp1, tmp2);
617 tmp2 = _mm_mullo_epi16(sc, dc); // sc * dc
618 tmp2 = _mm_slli_epi32(tmp2, 1); // 2 * sc * dc
620 __m128i r = _mm_sub_epi32(tmp1, tmp2);
621 return clamp_div255round_SSE2(r);
624 static __m128i exclusion_modeproc_SSE2(const __m128i& src, const __m128i& dst) {
625 __m128i sa = SkGetPackedA32_SSE2(src);
626 __m128i da = SkGetPackedA32_SSE2(dst);
628 __m128i a = srcover_byte_SSE2(sa, da);
629 __m128i r = exclusion_byte_SSE2(SkGetPackedR32_SSE2(src),
630 SkGetPackedR32_SSE2(dst), sa, da);
631 __m128i g = exclusion_byte_SSE2(SkGetPackedG32_SSE2(src),
632 SkGetPackedG32_SSE2(dst), sa, da);
633 __m128i b = exclusion_byte_SSE2(SkGetPackedB32_SSE2(src),
634 SkGetPackedB32_SSE2(dst), sa, da);
635 return SkPackARGB32_SSE2(a, r, g, b);
638 ////////////////////////////////////////////////////////////////////////////////
640 typedef __m128i (*SkXfermodeProcSIMD)(const __m128i& src, const __m128i& dst);
642 extern SkXfermodeProcSIMD gSSE2XfermodeProcs[];
644 #ifdef SK_SUPPORT_LEGACY_DEEPFLATTENING
645 SkSSE2ProcCoeffXfermode::SkSSE2ProcCoeffXfermode(SkReadBuffer& buffer) : INHERITED(buffer) {
646 fProcSIMD = reinterpret_cast<void*>(gSSE2XfermodeProcs[this->getMode()]);
647 buffer.validate(fProcSIMD != NULL);
651 void SkSSE2ProcCoeffXfermode::xfer32(SkPMColor dst[], const SkPMColor src[],
652 int count, const SkAlpha aa[]) const {
653 SkASSERT(dst && src && count >= 0);
655 SkXfermodeProc proc = this->getProc();
656 SkXfermodeProcSIMD procSIMD = reinterpret_cast<SkXfermodeProcSIMD>(fProcSIMD);
657 SkASSERT(procSIMD != NULL);
661 while (((size_t)dst & 0x0F) != 0) {
662 *dst = proc(*src, *dst);
668 const __m128i* s = reinterpret_cast<const __m128i*>(src);
669 __m128i* d = reinterpret_cast<__m128i*>(dst);
672 __m128i src_pixel = _mm_loadu_si128(s++);
673 __m128i dst_pixel = _mm_load_si128(d);
675 dst_pixel = procSIMD(src_pixel, dst_pixel);
676 _mm_store_si128(d++, dst_pixel);
680 src = reinterpret_cast<const SkPMColor*>(s);
681 dst = reinterpret_cast<SkPMColor*>(d);
684 for (int i = count - 1; i >= 0; --i) {
685 *dst = proc(*src, *dst);
690 for (int i = count - 1; i >= 0; --i) {
693 SkPMColor dstC = dst[i];
694 SkPMColor C = proc(src[i], dstC);
696 C = SkFourByteInterp(C, dstC, a);
704 void SkSSE2ProcCoeffXfermode::xfer16(uint16_t dst[], const SkPMColor src[],
705 int count, const SkAlpha aa[]) const {
706 SkASSERT(dst && src && count >= 0);
708 SkXfermodeProc proc = this->getProc();
709 SkXfermodeProcSIMD procSIMD = reinterpret_cast<SkXfermodeProcSIMD>(fProcSIMD);
710 SkASSERT(procSIMD != NULL);
714 while (((size_t)dst & 0x0F) != 0) {
715 SkPMColor dstC = SkPixel16ToPixel32(*dst);
716 *dst = SkPixel32ToPixel16_ToU16(proc(*src, dstC));
722 const __m128i* s = reinterpret_cast<const __m128i*>(src);
723 __m128i* d = reinterpret_cast<__m128i*>(dst);
726 __m128i src_pixel1 = _mm_loadu_si128(s++);
727 __m128i src_pixel2 = _mm_loadu_si128(s++);
728 __m128i dst_pixel = _mm_load_si128(d);
730 __m128i dst_pixel1 = _mm_unpacklo_epi16(dst_pixel, _mm_setzero_si128());
731 __m128i dst_pixel2 = _mm_unpackhi_epi16(dst_pixel, _mm_setzero_si128());
733 __m128i dstC1 = SkPixel16ToPixel32_SSE2(dst_pixel1);
734 __m128i dstC2 = SkPixel16ToPixel32_SSE2(dst_pixel2);
736 dst_pixel1 = procSIMD(src_pixel1, dstC1);
737 dst_pixel2 = procSIMD(src_pixel2, dstC2);
738 dst_pixel = SkPixel32ToPixel16_ToU16_SSE2(dst_pixel1, dst_pixel2);
740 _mm_store_si128(d++, dst_pixel);
744 src = reinterpret_cast<const SkPMColor*>(s);
745 dst = reinterpret_cast<uint16_t*>(d);
748 for (int i = count - 1; i >= 0; --i) {
749 SkPMColor dstC = SkPixel16ToPixel32(*dst);
750 *dst = SkPixel32ToPixel16_ToU16(proc(*src, dstC));
755 for (int i = count - 1; i >= 0; --i) {
758 SkPMColor dstC = SkPixel16ToPixel32(dst[i]);
759 SkPMColor C = proc(src[i], dstC);
761 C = SkFourByteInterp(C, dstC, a);
763 dst[i] = SkPixel32ToPixel16_ToU16(C);
769 #ifndef SK_IGNORE_TO_STRING
770 void SkSSE2ProcCoeffXfermode::toString(SkString* str) const {
771 this->INHERITED::toString(str);
775 ////////////////////////////////////////////////////////////////////////////////
777 // 4 pixels modeprocs with SSE2
778 SkXfermodeProcSIMD gSSE2XfermodeProcs[] = {
782 srcover_modeproc_SSE2,
783 dstover_modeproc_SSE2,
786 srcout_modeproc_SSE2,
787 dstout_modeproc_SSE2,
788 srcatop_modeproc_SSE2,
789 dstatop_modeproc_SSE2,
792 modulate_modeproc_SSE2,
793 screen_modeproc_SSE2,
795 overlay_modeproc_SSE2,
796 darken_modeproc_SSE2,
797 lighten_modeproc_SSE2,
798 colordodge_modeproc_SSE2,
799 colorburn_modeproc_SSE2,
800 hardlight_modeproc_SSE2,
801 softlight_modeproc_SSE2,
802 difference_modeproc_SSE2,
803 exclusion_modeproc_SSE2,
804 multiply_modeproc_SSE2,
807 NULL, // kSaturation_Mode
809 NULL, // kLuminosity_Mode
812 SkProcCoeffXfermode* SkPlatformXfermodeFactory_impl_SSE2(const ProcCoeff& rec,
813 SkXfermode::Mode mode) {
814 void* procSIMD = reinterpret_cast<void*>(gSSE2XfermodeProcs[mode]);
816 if (procSIMD != NULL) {
817 return SkNEW_ARGS(SkSSE2ProcCoeffXfermode, (rec, mode, procSIMD));