From: msarett Date: Mon, 25 Jan 2016 16:54:50 +0000 (-0800) Subject: Revert of AVX 2 SrcOver blits: color32, blitmask. (patchset #24 id:450001 of https... X-Git-Tag: accepted/tizen/5.0/unified/20181102.025319~129^2~2404 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=0dfffbeeec3078f6a03e83a4efa4c45fefdd338d;p=platform%2Fupstream%2FlibSkiaSharp.git Revert of AVX 2 SrcOver blits: color32, blitmask. (patchset #24 id:450001 of https://codereview.chromium.org/1532613002/ ) Reason for revert: Bot failures Original issue's description: > AVX 2 SrcOver blits: color32, blitmask. > > As a follow up to the SSE 4.1 CL, this should look pretty familiar. > > I've made some organizational changes around how we load, store, pack, and unpack data that I think makes things clearer and more orthogonal, and it'll make it easier to try out a pmaddubsw lerp. I have backported these changes to the SSE 4.1 code, and I hope that I can actually get a lot of this code templated for sharing between the two later. > > Perf changes (relative to SSE 4.1): > Xfermode_SrcOver: 1650 -> 1180 (0.71x) // large opaque blit > Xfermode_SrcOver_aa: 1794 -> 1653 (0.92x) // large opaque + small transparent > text_16_AA_{FF,BK,WT}: 1.72 -> 1.59 (0.92x) // small opaque blit > text_16_AA_88: 1.83 -> 1.77 (0.97x) // small transparent blit > > This should be a big throughout win, and a small latency win. > This should all be pixel-exact to the previous SSE 4.1 code. > > > GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1532613002 > CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot;client.skia.compile:Build-Ubuntu-GCC-x86_64-Release-CMake-Trybot,Build-Mac10.9-Clang-x86_64-Release-CMake-Trybot > > Committed: https://skia.googlesource.com/skia/+/5d2117015eb271e09faf4a7ddd89093c9d618a36 TBR=herb@google.com,mtklein@google.com,mtklein@chromium.org # Skipping CQ checks because original CL landed less than 1 days ago. NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true Review URL: https://codereview.chromium.org/1632713002 --- diff --git a/gyp/opts.gyp b/gyp/opts.gyp index ae4b294..396a37e 100644 --- a/gyp/opts.gyp +++ b/gyp/opts.gyp @@ -149,7 +149,7 @@ ], 'sources': [ '<@(avx_sources)' ], 'msvs_settings': { 'VCCLCompilerTool': { 'EnableEnhancedInstructionSet': '3' } }, - 'xcode_settings': { 'OTHER_CPLUSPLUSFLAGS': [ '-mavx' ] }, + 'xcode_settings': { 'OTHER_CFLAGS': [ '-mavx' ] }, 'conditions': [ [ 'not skia_android_framework', { 'cflags': [ '-mavx' ] }], ], @@ -167,7 +167,7 @@ ], 'sources': [ '<@(avx2_sources)' ], 'msvs_settings': { 'VCCLCompilerTool': { 'EnableEnhancedInstructionSet': '5' } }, - 'xcode_settings': { 'OTHER_CPLUSPLUSFLAGS': [ '-mavx2' ] }, + 'xcode_settings': { 'OTHER_CFLAGS': [ '-mavx2' ] }, 'conditions': [ [ 'not skia_android_framework', { 'cflags': [ '-mavx2' ] }], ], diff --git a/gyp/opts.gypi b/gyp/opts.gypi index 1a0d030..f2d36b9 100644 --- a/gyp/opts.gypi +++ b/gyp/opts.gypi @@ -60,6 +60,6 @@ '<(skia_src_path)/opts/SkOpts_avx.cpp', ], 'avx2_sources': [ - '<(skia_src_path)/opts/SkOpts_avx2.cpp', + '<(skia_src_path)/core/SkForceCPlusPlusLinking.cpp', ], } diff --git a/src/core/SkOpts.cpp b/src/core/SkOpts.cpp index 674a1b7..28dd1af 100644 --- a/src/core/SkOpts.cpp +++ b/src/core/SkOpts.cpp @@ -92,7 +92,7 @@ namespace SkOpts { void Init_sse41(); void Init_sse42() {} void Init_avx(); - void Init_avx2(); + void Init_avx2() {} void Init_neon(); static void init() { diff --git a/src/opts/SkOpts_avx2.cpp b/src/opts/SkOpts_avx2.cpp deleted file mode 100644 index b943317..0000000 --- a/src/opts/SkOpts_avx2.cpp +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Copyright 2015 Google Inc. - * - * Use of this source code is governed by a BSD-style license that can be - * found in the LICENSE file. - */ - -#include "SkOpts.h" -#define SK_OPTS_NS sk_avx2 - -#ifndef SK_SUPPORT_LEGACY_X86_BLITS - -namespace sk_avx2 { - -// AVX2 has masked loads and stores. We'll use them for N<4 pixels. -static __m128i mask(int n) { - static const int masks[][4] = { - { 0, 0, 0, 0}, - {~0, 0, 0, 0}, - {~0,~0, 0, 0}, - {~0,~0,~0, 0}, - }; - return _mm_load_si128((const __m128i*)masks+n); -} - -// Load 8, 4, or 1-3 constant pixels or coverages (4x replicated). -static __m256i next8( uint32_t val) { return _mm256_set1_epi32(val); } -static __m128i next4( uint32_t val) { return _mm_set1_epi32(val); } -static __m128i tail(int, uint32_t val) { return _mm_set1_epi32(val); } - -static __m256i next8( uint8_t val) { return _mm256_set1_epi8(val); } -static __m128i next4( uint8_t val) { return _mm_set1_epi8(val); } -static __m128i tail(int, uint8_t val) { return _mm_set1_epi8(val); } - -// Load 8, 4, or 1-3 variable pixels or coverages (4x replicated). -// next8() and next4() increment their pointer past what they just read. tail() doesn't bother. -static __m256i next8(const uint32_t*& ptr) { - auto r = _mm256_loadu_si256((const __m256i*)ptr); - ptr += 8; - return r; -} -static __m128i next4(const uint32_t*& ptr) { - auto r = _mm_loadu_si128((const __m128i*)ptr); - ptr += 4; - return r; -} -static __m128i tail(int n, const uint32_t* ptr) { - return _mm_maskload_epi32((const int*)ptr, mask(n)); -} - -static __m256i next8(const uint8_t*& ptr) { - auto r = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*)ptr)); - r = _mm256_shuffle_epi8(r, _mm256_setr_epi8(0,0,0,0, 4,4,4,4, 8,8,8,8, 12,12,12,12, - 0,0,0,0, 4,4,4,4, 8,8,8,8, 12,12,12,12)); - ptr += 8; - return r; -} -static __m128i next4(const uint8_t*& ptr) { - auto r = _mm_shuffle_epi8(_mm_cvtsi32_si128(*(const uint32_t*)ptr), - _mm_setr_epi8(0,0,0,0, 1,1,1,1, 2,2,2,2, 3,3,3,3)); - ptr += 4; - return r; -} -static __m128i tail(int n, const uint8_t* ptr) { - uint32_t x = 0; - switch (n) { - case 3: x |= (uint32_t)ptr[2] << 16; - case 2: x |= (uint32_t)ptr[1] << 8; - case 1: x |= (uint32_t)ptr[0] << 0; - } - auto p = (const uint8_t*)&x; - return next4(p); -} - -// For i = 0...n, tgt = fn(dst,src,cov), where Dst,Src,and Cov can be constants or arrays. -template -static void loop(int n, uint32_t* t, const Dst dst, const Src src, const Cov cov, Fn&& fn) { - // We don't want to muck with the callers' pointers, so we make them const and copy here. - Dst d = dst; - Src s = src; - Cov c = cov; - - // Writing this as a single while-loop helps hoist loop invariants from fn. - while (n) { - if (n >= 8) { - _mm256_storeu_si256((__m256i*)t, fn(next8(d), next8(s), next8(c))); - t += 8; - n -= 8; - continue; - } - if (n >= 4) { - _mm_storeu_si128((__m128i*)t, fn(next4(d), next4(s), next4(c))); - t += 4; - n -= 4; - } - if (n) { - _mm_maskstore_epi32((int*)t, mask(n), fn(tail(n,d), tail(n,s), tail(n,c))); - } - return; - } -} - -// packed // -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // -// unpacked // - -// Everything on the packed side of the squiggly line deals with densely packed 8-bit data, -// e.g [ BGRA bgra ... ] for pixels or [ CCCC cccc ... ] for coverage. -// -// Everything on the unpacked side of the squiggly line deals with unpacked 8-bit data, -// e.g. [ B_G_ R_A_ b_g_ r_a_ ... ] for pixels or [ C_C_ C_C_ c_c_ c_c_ ... ] for coverage, -// where _ is a zero byte. -// -// Adapt / adapt(fn) allow the two sides to interoperate, -// by unpacking arguments, calling fn, then packing the results. -// -// This lets us write most of our code in terms of unpacked inputs (considerably simpler) -// and all the packing and unpacking is handled automatically. - -template -struct Adapt { - Fn fn; - - __m256i operator()(__m256i d, __m256i s, __m256i c) { - auto lo = [](__m256i x) { return _mm256_unpacklo_epi8(x, _mm256_setzero_si256()); }; - auto hi = [](__m256i x) { return _mm256_unpackhi_epi8(x, _mm256_setzero_si256()); }; - return _mm256_packus_epi16(fn(lo(d), lo(s), lo(c)), - fn(hi(d), hi(s), hi(c))); - } - - __m128i operator()(__m128i d, __m128i s, __m128i c) { - auto unpack = [](__m128i x) { return _mm256_cvtepu8_epi16(x); }; - auto pack = [](__m256i x) { - auto x01 = x, - x23 = _mm256_permute4x64_epi64(x, 0xe); // 0b1110 - return _mm256_castsi256_si128(_mm256_packus_epi16(x01, x23)); - }; - return pack(fn(unpack(d), unpack(s), unpack(c))); - } -}; - -template -static Adapt adapt(Fn&& fn) { return { fn }; } - -// These helpers all work exclusively with unpacked 8-bit values, -// except div255() which is 16-bit -> unpacked 8-bit, and mul255() which is the reverse. - -// Divide by 255 with rounding. -// (x+127)/255 == ((x+128)*257)>>16. -// Sometimes we can be more efficient by breaking this into two parts. -static __m256i div255_part1(__m256i x) { return _mm256_add_epi16 (x, _mm256_set1_epi16(128)); } -static __m256i div255_part2(__m256i x) { return _mm256_mulhi_epu16(x, _mm256_set1_epi16(257)); } -static __m256i div255(__m256i x) { return div255_part2(div255_part1(x)); } - -// (x*y+127)/255, a byte multiply. -static __m256i scale(__m256i x, __m256i y) { return div255(_mm256_mullo_epi16(x, y)); } - -// (255 * x). -static __m256i mul255(__m256i x) { return _mm256_sub_epi16(_mm256_slli_epi16(x, 8), x); } - -// (255 - x). -static __m256i inv(__m256i x) { return _mm256_xor_si256(_mm256_set1_epi16(0x00ff), x); } - -// ARGB argb ... -> AAAA aaaa ... -static __m256i alphas(__m256i px) { - const int a = 2 * (SK_A32_SHIFT/8); // SK_A32_SHIFT is typically 24, so this is typically 6. - const int _ = ~0; - return _mm256_shuffle_epi8(px, _mm256_setr_epi8(a+0,_,a+0,_,a+0,_,a+0,_, - a+8,_,a+8,_,a+8,_,a+8,_, - a+0,_,a+0,_,a+0,_,a+0,_, - a+8,_,a+8,_,a+8,_,a+8,_)); -} - - -// SrcOver, with a constant source and full coverage. -static void blit_row_color32(SkPMColor* tgt, const SkPMColor* dst, int n, SkPMColor src) { - // We want to calculate s + (d * inv(alphas(s)) + 127)/255. - // We'd generally do that div255 as s + ((d * inv(alphas(s)) + 128)*257)>>16. - - // But we can go one step further to ((s*255 + 128 + d*inv(alphas(s)))*257)>>16. - // This lets us hoist (s*255+128) and inv(alphas(s)) out of the loop. - auto s = _mm256_cvtepu8_epi16(_mm_set1_epi32(src)), - s_255_128 = div255_part1(mul255(s)), - A = inv(alphas(s)); - - const uint8_t cov = 0xff; - loop(n, tgt, dst, src, cov, adapt([=](__m256i d, __m256i, __m256i) { - return div255_part2(_mm256_add_epi16(s_255_128, _mm256_mullo_epi16(d, A))); - })); -} - -// SrcOver, with a constant source and variable coverage. -// If the source is opaque, SrcOver becomes Src. -static void blit_mask_d32_a8(SkPMColor* dst, size_t dstRB, - const SkAlpha* cov, size_t covRB, - SkColor color, int w, int h) { - if (SkColorGetA(color) == 0xFF) { - const SkPMColor src = SkSwizzle_BGRA_to_PMColor(color); - while (h --> 0) { - loop(w, dst, (const SkPMColor*)dst, src, cov, - adapt([](__m256i d, __m256i s, __m256i c) { - // Src blend mode: a simple lerp from d to s by c. - // TODO: try a pmaddubsw version? - return div255(_mm256_add_epi16(_mm256_mullo_epi16(inv(c),d), - _mm256_mullo_epi16( c ,s))); - })); - dst += dstRB / sizeof(*dst); - cov += covRB / sizeof(*cov); - } - } else { - const SkPMColor src = SkPreMultiplyColor(color); - while (h --> 0) { - loop(w, dst, (const SkPMColor*)dst, src, cov, - adapt([](__m256i d, __m256i s, __m256i c) { - // SrcOver blend mode, with coverage folded into source alpha. - auto sc = scale(s,c), - AC = inv(alphas(sc)); - return _mm256_add_epi16(sc, scale(d,AC)); - })); - dst += dstRB / sizeof(*dst); - cov += covRB / sizeof(*cov); - } - } -} - -} // namespace sk_avx2 - -#endif - -namespace SkOpts { - void Init_avx2() { - #ifndef SK_SUPPORT_LEGACY_X86_BLITS - blit_row_color32 = sk_avx2::blit_row_color32; - blit_mask_d32_a8 = sk_avx2::blit_mask_d32_a8; - #endif - } -} diff --git a/src/opts/SkOpts_sse41.cpp b/src/opts/SkOpts_sse41.cpp index f097e56..16ba87a 100644 --- a/src/opts/SkOpts_sse41.cpp +++ b/src/opts/SkOpts_sse41.cpp @@ -12,67 +12,88 @@ #ifndef SK_SUPPORT_LEGACY_X86_BLITS -namespace sk_sse41 { +// This file deals mostly with unpacked 8-bit values, +// i.e. values between 0 and 255, but in 16-bit lanes with 0 at the top. + +// So __m128i typically represents 1 or 2 pixels, and m128ix2 represents 4. +struct m128ix2 { __m128i lo, hi; }; + +// unpack{lo,hi}() get our raw pixels unpacked, from half of 4 packed pixels to 2 unpacked pixels. +static inline __m128i unpacklo(__m128i x) { return _mm_cvtepu8_epi16(x); } +static inline __m128i unpackhi(__m128i x) { return _mm_unpackhi_epi8(x, _mm_setzero_si128()); } + +// pack() converts back, from 4 unpacked pixels to 4 packed pixels. +static inline __m128i pack(__m128i lo, __m128i hi) { return _mm_packus_epi16(lo, hi); } + +// These nextN() functions abstract over the difference between iterating over +// an array of values and returning a constant value, for uint8_t and uint32_t. +// The nextN() taking pointers increment that pointer past where they read. +// +// nextN() returns N unpacked pixels or 4N unpacked coverage values. + +static inline __m128i next1(uint8_t val) { return _mm_set1_epi16(val); } +static inline __m128i next2(uint8_t val) { return _mm_set1_epi16(val); } +static inline m128ix2 next4(uint8_t val) { return { next2(val), next2(val) }; } + +static inline __m128i next1(uint32_t val) { return unpacklo(_mm_cvtsi32_si128(val)); } +static inline __m128i next2(uint32_t val) { return unpacklo(_mm_set1_epi32(val)); } +static inline m128ix2 next4(uint32_t val) { return { next2(val), next2(val) }; } -// An SSE register holding at most 64 bits of useful data in the low lanes. -struct m64i { - __m128i v; - /*implicit*/ m64i(__m128i v) : v(v) {} - operator __m128i() const { return v; } -}; - -// Load 4, 2, or 1 constant pixels or coverages (4x replicated). -static __m128i next4(uint32_t val) { return _mm_set1_epi32(val); } -static m64i next2(uint32_t val) { return _mm_set1_epi32(val); } -static m64i next1(uint32_t val) { return _mm_set1_epi32(val); } - -static __m128i next4(uint8_t val) { return _mm_set1_epi8(val); } -static m64i next2(uint8_t val) { return _mm_set1_epi8(val); } -static m64i next1(uint8_t val) { return _mm_set1_epi8(val); } - -// Load 4, 2, or 1 variable pixels or coverages (4x replicated), -// incrementing the pointer past what we read. -static __m128i next4(const uint32_t*& ptr) { - auto r = _mm_loadu_si128((const __m128i*)ptr); +static inline __m128i next1(const uint8_t*& ptr) { return _mm_set1_epi16(*ptr++); } +static inline __m128i next2(const uint8_t*& ptr) { + auto r = _mm_cvtsi32_si128(*(const uint16_t*)ptr); + ptr += 2; + const int _ = ~0; + return _mm_shuffle_epi8(r, _mm_setr_epi8(0,_,0,_,0,_,0,_, 1,_,1,_,1,_,1,_)); +} +static inline m128ix2 next4(const uint8_t*& ptr) { + auto r = _mm_cvtsi32_si128(*(const uint32_t*)ptr); ptr += 4; - return r; + const int _ = ~0; + auto lo = _mm_shuffle_epi8(r, _mm_setr_epi8(0,_,0,_,0,_,0,_, 1,_,1,_,1,_,1,_)), + hi = _mm_shuffle_epi8(r, _mm_setr_epi8(2,_,2,_,2,_,2,_, 3,_,3,_,3,_,3,_)); + return { lo, hi }; } -static m64i next2(const uint32_t*& ptr) { - auto r = _mm_loadl_epi64((const __m128i*)ptr); + +static inline __m128i next1(const uint32_t*& ptr) { return unpacklo(_mm_cvtsi32_si128(*ptr++)); } +static inline __m128i next2(const uint32_t*& ptr) { + auto r = unpacklo(_mm_loadl_epi64((const __m128i*)ptr)); ptr += 2; return r; } -static m64i next1(const uint32_t*& ptr) { - auto r = _mm_cvtsi32_si128(*ptr); - ptr += 1; - return r; +static inline m128ix2 next4(const uint32_t*& ptr) { + auto packed = _mm_loadu_si128((const __m128i*)ptr); + ptr += 4; + return { unpacklo(packed), unpackhi(packed) }; } -// xyzw -> xxxx yyyy zzzz wwww -static __m128i replicate_coverage(__m128i xyzw) { - const uint8_t mask[] = { 0,0,0,0, 1,1,1,1, 2,2,2,2, 3,3,3,3 }; - return _mm_shuffle_epi8(xyzw, _mm_load_si128((const __m128i*)mask)); -} +// Divide by 255 with rounding. +// (x+127)/255 == ((x+128)*257)>>16. +// Sometimes we can be more efficient by breaking this into two parts. +static inline __m128i div255_part1(__m128i x) { return _mm_add_epi16(x, _mm_set1_epi16(128)); } +static inline __m128i div255_part2(__m128i x) { return _mm_mulhi_epu16(x, _mm_set1_epi16(257)); } +static inline __m128i div255(__m128i x) { return div255_part2(div255_part1(x)); } -static __m128i next4(const uint8_t*& ptr) { - auto r = replicate_coverage(_mm_cvtsi32_si128(*(const uint32_t*)ptr)); - ptr += 4; - return r; +// (x*y+127)/255, a byte multiply. +static inline __m128i scale(__m128i x, __m128i y) { + return div255(_mm_mullo_epi16(x, y)); } -static m64i next2(const uint8_t*& ptr) { - auto r = replicate_coverage(_mm_cvtsi32_si128(*(const uint16_t*)ptr)); - ptr += 2; - return r; + +// (255 - x). +static inline __m128i inv(__m128i x) { + return _mm_xor_si128(_mm_set1_epi16(0x00ff), x); // This seems a bit faster than _mm_sub_epi16. } -static m64i next1(const uint8_t*& ptr) { - auto r = replicate_coverage(_mm_cvtsi32_si128(*ptr)); - ptr += 1; - return r; + +// ARGB argb -> AAAA aaaa +static inline __m128i alphas(__m128i px) { + const int a = 2 * (SK_A32_SHIFT/8); // SK_A32_SHIFT is typically 24, so this is typically 6. + const int _ = ~0; + return _mm_shuffle_epi8(px, _mm_setr_epi8(a+0,_,a+0,_,a+0,_,a+0,_, a+8,_,a+8,_,a+8,_,a+8,_)); } // For i = 0...n, tgt = fn(dst,src,cov), where Dst,Src,and Cov can be constants or arrays. template -static void loop(int n, uint32_t* t, const Dst dst, const Src src, const Cov cov, Fn&& fn) { +static inline void loop(int n, uint32_t* t, const Dst dst, const Src src, const Cov cov, Fn&& fn) { // We don't want to muck with the callers' pointers, so we make them const and copy here. Dst d = dst; Src s = src; @@ -81,85 +102,30 @@ static void loop(int n, uint32_t* t, const Dst dst, const Src src, const Cov cov // Writing this as a single while-loop helps hoist loop invariants from fn. while (n) { if (n >= 4) { - _mm_storeu_si128((__m128i*)t, fn(next4(d), next4(s), next4(c))); + auto d4 = next4(d), + s4 = next4(s), + c4 = next4(c); + auto lo = fn(d4.lo, s4.lo, c4.lo), + hi = fn(d4.hi, s4.hi, c4.hi); + _mm_storeu_si128((__m128i*)t, pack(lo,hi)); t += 4; n -= 4; continue; } if (n & 2) { - _mm_storel_epi64((__m128i*)t, fn(next2(d), next2(s), next2(c))); + auto r = fn(next2(d), next2(s), next2(c)); + _mm_storel_epi64((__m128i*)t, pack(r,r)); t += 2; } if (n & 1) { - *t = _mm_cvtsi128_si32(fn(next1(d), next1(s), next1(c))); + auto r = fn(next1(d), next1(s), next1(c)); + *t = _mm_cvtsi128_si32(pack(r,r)); } return; } } -// packed -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // -// unpacked - -// Everything on the packed side of the squiggly line deals with densely packed 8-bit data, -// e.g. [BGRA bgra ... ] for pixels or [ CCCC cccc ... ] for coverage. -// -// Everything on the unpacked side of the squiggly line deals with unpacked 8-bit data, -// e.g [B_G_ R_A_ b_g_ r_a_ ] for pixels or [ C_C_ C_C_ c_c_ c_c_ c_c_ ] for coverage, -// where _ is a zero byte. -// -// Adapt / adapt(fn) allow the two sides to interoperate, -// by unpacking arguments, calling fn, then packing the results. -// -// This lets us write most of our code in terms of unpacked inputs (considerably simpler) -// and all the packing and unpacking is handled automatically. - -template -struct Adapt { - Fn fn; - - __m128i operator()(__m128i d, __m128i s, __m128i c) { - auto lo = [](__m128i x) { return _mm_unpacklo_epi8(x, _mm_setzero_si128()); }; - auto hi = [](__m128i x) { return _mm_unpackhi_epi8(x, _mm_setzero_si128()); }; - return _mm_packus_epi16(fn(lo(d), lo(s), lo(c)), - fn(hi(d), hi(s), hi(c))); - } - - m64i operator()(const m64i& d, const m64i& s, const m64i& c) { - auto lo = [](__m128i x) { return _mm_unpacklo_epi8(x, _mm_setzero_si128()); }; - auto r = fn(lo(d), lo(s), lo(c)); - return _mm_packus_epi16(r, r); - } -}; - -template -static Adapt adapt(Fn&& fn) { return { fn }; } - -// These helpers all work exclusively with unpacked 8-bit values, -// except div255() with is 16-bit -> unpacked 8-bit, and mul255() which is the reverse. - -// Divide by 255 with rounding. -// (x+127)/255 == ((x+128)*257)>>16. -// Sometimes we can be more efficient by breaking this into two parts. -static __m128i div255_part1(__m128i x) { return _mm_add_epi16(x, _mm_set1_epi16(128)); } -static __m128i div255_part2(__m128i x) { return _mm_mulhi_epu16(x, _mm_set1_epi16(257)); } -static __m128i div255(__m128i x) { return div255_part2(div255_part1(x)); } - -// (x*y+127)/255, a byte multiply. -static __m128i scale(__m128i x, __m128i y) { return div255(_mm_mullo_epi16(x, y)); } - -// (255 * x). -static __m128i mul255(__m128i x) { return _mm_sub_epi16(_mm_slli_epi16(x, 8), x); } - -// (255 - x). -static __m128i inv(__m128i x) { return _mm_xor_si128(_mm_set1_epi16(0x00ff), x); } - -// ARGB argb -> AAAA aaaa -static __m128i alphas(__m128i px) { - const int a = 2 * (SK_A32_SHIFT/8); // SK_A32_SHIFT is typically 24, so this is typically 6. - const int _ = ~0; - return _mm_shuffle_epi8(px, _mm_setr_epi8(a+0,_,a+0,_,a+0,_,a+0,_, a+8,_,a+8,_,a+8,_,a+8,_)); -} +namespace sk_sse41 { // SrcOver, with a constant source and full coverage. static void blit_row_color32(SkPMColor* tgt, const SkPMColor* dst, int n, SkPMColor src) { @@ -168,14 +134,14 @@ static void blit_row_color32(SkPMColor* tgt, const SkPMColor* dst, int n, SkPMCo // But we can go one step further to ((s*255 + 128 + d*inv(alphas(s)))*257)>>16. // This lets us hoist (s*255+128) and inv(alphas(s)) out of the loop. - __m128i s = _mm_unpacklo_epi8(_mm_set1_epi32(src), _mm_setzero_si128()), - s_255_128 = div255_part1(mul255(s)), + __m128i s = next2(src), + s_255_128 = div255_part1(_mm_mullo_epi16(s, _mm_set1_epi16(255))), A = inv(alphas(s)); const uint8_t cov = 0xff; - loop(n, tgt, dst, src, cov, adapt([=](__m128i d, __m128i, __m128i) { + loop(n, tgt, dst, src, cov, [=](__m128i d, __m128i, __m128i) { return div255_part2(_mm_add_epi16(s_255_128, _mm_mullo_epi16(d, A))); - })); + }); } // SrcOver, with a constant source and variable coverage. @@ -186,26 +152,23 @@ static void blit_mask_d32_a8(SkPMColor* dst, size_t dstRB, if (SkColorGetA(color) == 0xFF) { const SkPMColor src = SkSwizzle_BGRA_to_PMColor(color); while (h --> 0) { - loop(w, dst, (const SkPMColor*)dst, src, cov, - adapt([](__m128i d, __m128i s, __m128i c) { + loop(w, dst, (const SkPMColor*)dst, src, cov, [](__m128i d, __m128i s, __m128i c) { // Src blend mode: a simple lerp from d to s by c. // TODO: try a pmaddubsw version? - return div255(_mm_add_epi16(_mm_mullo_epi16(inv(c),d), - _mm_mullo_epi16( c ,s))); - })); + return div255(_mm_add_epi16(_mm_mullo_epi16(inv(c),d), _mm_mullo_epi16(c,s))); + }); dst += dstRB / sizeof(*dst); cov += covRB / sizeof(*cov); } } else { const SkPMColor src = SkPreMultiplyColor(color); while (h --> 0) { - loop(w, dst, (const SkPMColor*)dst, src, cov, - adapt([](__m128i d, __m128i s, __m128i c) { + loop(w, dst, (const SkPMColor*)dst, src, cov, [](__m128i d, __m128i s, __m128i c) { // SrcOver blend mode, with coverage folded into source alpha. __m128i sc = scale(s,c), AC = inv(alphas(sc)); return _mm_add_epi16(sc, scale(d,AC)); - })); + }); dst += dstRB / sizeof(*dst); cov += covRB / sizeof(*cov); } @@ -213,7 +176,6 @@ static void blit_mask_d32_a8(SkPMColor* dst, size_t dstRB, } } // namespace sk_sse41 - #endif namespace SkOpts {