From: mtklein Date: Mon, 22 Jun 2015 17:39:38 +0000 (-0700) Subject: Update some Sk4px APIs. X-Git-Tag: accepted/tizen/5.0/unified/20181102.025319~1992 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=059ac00446404506a46cd303db15239c7aae49d5;p=platform%2Fupstream%2FlibSkiaSharp.git Update some Sk4px APIs. Mostly this is about ergonomics, making it easier to do good operations and hard / impossible to do bad ones. - SkAlpha / SkPMColor constructors become static factories. - Remove div255TruncNarrow(), rename div255RoundNarrow() to div255(). In practice we always want to round, and the narrowing to 8-bit is contextually obvious. - Rename fastMulDiv255Round() approxMulDiv255() to stress it's approximate-ness over its speed. Drop Round for the same reason as above... we should always round. - Add operator overloads so we don't have to keep throwing in seemingly-random Sk4px() or Sk4px::Wide() casts. - use operator*() for 8-bit x 8-bit -> 16-bit math. It's always what we want, and there's generally no 8x8->8 alternative. - MapFoo can take a const Func&. Don't think it makes a big difference, but nice to do. BUG=skia: Review URL: https://codereview.chromium.org/1202013002 --- diff --git a/src/core/Sk4px.h b/src/core/Sk4px.h index 8fb546a..26d4d0f 100644 --- a/src/core/Sk4px.h +++ b/src/core/Sk4px.h @@ -21,8 +21,9 @@ namespace { // 1, 2 or 4 SkPMColors, generally vectorized. class Sk4px : public Sk16b { public: - Sk4px(SkAlpha a) : INHERITED(a) {} // Duplicate 16x: a -> aaaa aaaa aaaa aaaa - Sk4px(SkPMColor); // Duplicate 4x: argb -> argb argb argb argb + static Sk4px DupAlpha(SkAlpha a) { return Sk16b(a); } // a -> aaaa aaaa aaaa aaaa + static Sk4px DupPMColor(SkPMColor c); // argb -> argb argb argb argb + Sk4px(const Sk16b& v) : INHERITED(v) {} Sk4px alphas() const; // ARGB argb XYZW xyzw -> AAAA aaaa XXXX xxxx @@ -55,11 +56,21 @@ public: // Pack the top byte of each component back down into 4 SkPMColors. Sk4px addNarrowHi(const Sk16h&) const; - Sk4px div255TruncNarrow() const { return this->addNarrowHi(*this >> 8); } - Sk4px div255RoundNarrow() const { - return Sk4px::Wide(*this + Sk16h(128)).div255TruncNarrow(); + // Rounds, i.e. (x+127) / 255. + Sk4px div255() const { + // Calculated as ((x+128) + ((x+128)>>8)) >> 8. + auto v = *this + Sk16h(128); + return v.addNarrowHi(v >> 8); } + // These just keep the types as Wide so the user doesn't have to keep casting. + Wide operator * (const Wide& o) const { return INHERITED::operator*(o); } + Wide operator + (const Wide& o) const { return INHERITED::operator+(o); } + Wide operator - (const Wide& o) const { return INHERITED::operator-(o); } + Wide operator >> (int bits) const { return INHERITED::operator>>(bits); } + Wide operator << (int bits) const { return INHERITED::operator<<(bits); } + static Wide Min(const Wide& a, const Wide& b) { return INHERITED::Min(a,b); } + private: typedef Sk16h INHERITED; }; @@ -67,45 +78,46 @@ public: Wide widenLo() const; // ARGB -> 0A 0R 0G 0B Wide widenHi() const; // ARGB -> A0 R0 G0 B0 Wide mulWiden(const Sk16b&) const; // 8-bit x 8-bit -> 16-bit components. - Wide mul255Widen() const { - // TODO: x*255 = x*256-x, so something like this->widenHi() - this->widenLo()? - return this->mulWiden(Sk16b(255)); - } - // Generally faster than this->mulWiden(other).div255RoundNarrow(). - // May be incorrect by +-1, but is always exactly correct when *this or other is 0 or 255. - Sk4px fastMulDiv255Round(const Sk16b& other) const { + // The only 8-bit multiply we use is 8-bit x 8-bit -> 16-bit. Might as well make it pithy. + Wide operator * (const Sk4px& o) const { return this->mulWiden(o); } + + // These just keep the types as Sk4px so the user doesn't have to keep casting. + Sk4px operator + (const Sk4px& o) const { return INHERITED::operator+(o); } + Sk4px operator - (const Sk4px& o) const { return INHERITED::operator-(o); } + + // Generally faster than (*this * o).div255(). + // May be incorrect by +-1, but is always exactly correct when *this or o is 0 or 255. + Sk4px approxMulDiv255(const Sk16b& o) const { // (x*y + x) / 256 meets these criteria. (As of course does (x*y + y) / 256 by symmetry.) - Sk4px::Wide x = this->widenLo(), - xy = this->mulWiden(other); - return x.addNarrowHi(xy); + return this->widenLo().addNarrowHi(*this * o); } // A generic driver that maps fn over a src array into a dst array. // fn should take an Sk4px (4 src pixels) and return an Sk4px (4 dst pixels). template - static void MapSrc(int count, SkPMColor* dst, const SkPMColor* src, Fn fn) { + static void MapSrc(int n, SkPMColor* dst, const SkPMColor* src, const Fn& fn) { // This looks a bit odd, but it helps loop-invariant hoisting across different calls to fn. // Basically, we need to make sure we keep things inside a single loop. - while (count > 0) { - if (count >= 8) { + while (n > 0) { + if (n >= 8) { Sk4px dst0 = fn(Load4(src+0)), dst4 = fn(Load4(src+4)); dst0.store4(dst+0); dst4.store4(dst+4); - dst += 8; src += 8; count -= 8; + dst += 8; src += 8; n -= 8; continue; // Keep our stride at 8 pixels as long as possible. } - SkASSERT(count <= 7); - if (count >= 4) { + SkASSERT(n <= 7); + if (n >= 4) { fn(Load4(src)).store4(dst); - dst += 4; src += 4; count -= 4; + dst += 4; src += 4; n -= 4; } - if (count >= 2) { + if (n >= 2) { fn(Load2(src)).store2(dst); - dst += 2; src += 2; count -= 2; + dst += 2; src += 2; n -= 2; } - if (count >= 1) { + if (n >= 1) { fn(Load1(src)).store1(dst); } break; @@ -114,26 +126,26 @@ public: // As above, but with dst4' = fn(dst4, src4). template - static void MapDstSrc(int count, SkPMColor* dst, const SkPMColor* src, Fn fn) { - while (count > 0) { - if (count >= 8) { + static void MapDstSrc(int n, SkPMColor* dst, const SkPMColor* src, const Fn& fn) { + while (n > 0) { + if (n >= 8) { Sk4px dst0 = fn(Load4(dst+0), Load4(src+0)), dst4 = fn(Load4(dst+4), Load4(src+4)); dst0.store4(dst+0); dst4.store4(dst+4); - dst += 8; src += 8; count -= 8; + dst += 8; src += 8; n -= 8; continue; // Keep our stride at 8 pixels as long as possible. } - SkASSERT(count <= 7); - if (count >= 4) { + SkASSERT(n <= 7); + if (n >= 4) { fn(Load4(dst), Load4(src)).store4(dst); - dst += 4; src += 4; count -= 4; + dst += 4; src += 4; n -= 4; } - if (count >= 2) { + if (n >= 2) { fn(Load2(dst), Load2(src)).store2(dst); - dst += 2; src += 2; count -= 2; + dst += 2; src += 2; n -= 2; } - if (count >= 1) { + if (n >= 1) { fn(Load1(dst), Load1(src)).store1(dst); } break; @@ -142,33 +154,28 @@ public: // As above, but with dst4' = fn(dst4, src4, alpha4). template - static void MapDstSrcAlpha( - int count, SkPMColor* dst, const SkPMColor* src, const SkAlpha* a, Fn fn) { - while (count > 0) { - if (count >= 8) { - Sk4px alpha0 = Load4Alphas(a+0), - alpha4 = Load4Alphas(a+4); - Sk4px dst0 = fn(Load4(dst+0), Load4(src+0), alpha0), - dst4 = fn(Load4(dst+4), Load4(src+4), alpha4); + static void MapDstSrcAlpha(int n, SkPMColor* dst, const SkPMColor* src, const SkAlpha* a, + const Fn& fn) { + while (n > 0) { + if (n >= 8) { + Sk4px dst0 = fn(Load4(dst+0), Load4(src+0), Load4Alphas(a+0)), + dst4 = fn(Load4(dst+4), Load4(src+4), Load4Alphas(a+4)); dst0.store4(dst+0); dst4.store4(dst+4); - dst += 8; src += 8; a += 8; count -= 8; + dst += 8; src += 8; a += 8; n -= 8; continue; // Keep our stride at 8 pixels as long as possible. } - SkASSERT(count <= 7); - if (count >= 4) { - Sk4px alpha = Load4Alphas(a); - fn(Load4(dst), Load4(src), alpha).store4(dst); - dst += 4; src += 4; a += 4; count -= 4; + SkASSERT(n <= 7); + if (n >= 4) { + fn(Load4(dst), Load4(src), Load4Alphas(a)).store4(dst); + dst += 4; src += 4; a += 4; n -= 4; } - if (count >= 2) { - Sk4px alpha = Load2Alphas(a); - fn(Load2(dst), Load2(src), alpha).store2(dst); - dst += 2; src += 2; a += 2; count -= 2; + if (n >= 2) { + fn(Load2(dst), Load2(src), Load2Alphas(a)).store2(dst); + dst += 2; src += 2; a += 2; n -= 2; } - if (count >= 1) { - Sk4px alpha(*a); - fn(Load1(dst), Load1(src), alpha).store1(dst); + if (n >= 1) { + fn(Load1(dst), Load1(src), DupAlpha(*a)).store1(dst); } break; } diff --git a/src/core/Sk4pxXfermode.h b/src/core/Sk4pxXfermode.h index e8610ed..b4ebd85 100644 --- a/src/core/Sk4pxXfermode.h +++ b/src/core/Sk4pxXfermode.h @@ -21,53 +21,41 @@ namespace { }; \ inline Sk4px Name::Xfer(const Sk4px& s, const Sk4px& d) -XFERMODE(Clear) { return Sk4px((SkPMColor)0); } +XFERMODE(Clear) { return Sk4px::DupPMColor(0); } XFERMODE(Src) { return s; } XFERMODE(Dst) { return d; } -XFERMODE(SrcIn) { return s.fastMulDiv255Round(d.alphas() ); } -XFERMODE(SrcOut) { return s.fastMulDiv255Round(d.alphas().inv()); } -XFERMODE(SrcOver) { return s + d.fastMulDiv255Round(s.alphas().inv()); } +XFERMODE(SrcIn) { return s.approxMulDiv255(d.alphas() ); } +XFERMODE(SrcOut) { return s.approxMulDiv255(d.alphas().inv()); } +XFERMODE(SrcOver) { return s + d.approxMulDiv255(s.alphas().inv()); } XFERMODE(DstIn) { return SrcIn ::Xfer(d,s); } XFERMODE(DstOut) { return SrcOut ::Xfer(d,s); } XFERMODE(DstOver) { return SrcOver::Xfer(d,s); } // [ S * Da + (1 - Sa) * D] -XFERMODE(SrcATop) { - return Sk4px::Wide(s.mulWiden(d.alphas()) + d.mulWiden(s.alphas().inv())) - .div255RoundNarrow(); -} +XFERMODE(SrcATop) { return (s * d.alphas() + d * s.alphas().inv()).div255(); } XFERMODE(DstATop) { return SrcATop::Xfer(d,s); } //[ S * (1 - Da) + (1 - Sa) * D ] -XFERMODE(Xor) { - return Sk4px::Wide(s.mulWiden(d.alphas().inv()) + d.mulWiden(s.alphas().inv())) - .div255RoundNarrow(); -} +XFERMODE(Xor) { return (s * d.alphas().inv() + d * s.alphas().inv()).div255(); } // [S + D ] XFERMODE(Plus) { return s.saturatedAdd(d); } // [S * D ] -XFERMODE(Modulate) { return s.fastMulDiv255Round(d); } +XFERMODE(Modulate) { return s.approxMulDiv255(d); } // [S + D - S * D] XFERMODE(Screen) { // Doing the math as S + (1-S)*D or S + (D - S*D) means the add and subtract can be done // in 8-bit space without overflow. S + (1-S)*D is a touch faster because inv() is cheap. - return s + d.fastMulDiv255Round(s.inv()); -} -XFERMODE(Multiply) { - return Sk4px::Wide(s.mulWiden(d.alphas().inv()) + - d.mulWiden(s.alphas().inv()) + - s.mulWiden(d)) - .div255RoundNarrow(); + return s + d.approxMulDiv255(s.inv()); } +XFERMODE(Multiply) { return (s * d.alphas().inv() + d * s.alphas().inv() + s*d).div255(); } // [ Sa + Da - Sa*Da, Sc + Dc - 2*min(Sc*Da, Dc*Sa) ] (And notice Sa*Da == min(Sa*Da, Da*Sa).) XFERMODE(Difference) { - auto m = Sk4px::Wide(Sk16h::Min(s.mulWiden(d.alphas()), d.mulWiden(s.alphas()))) - .div255RoundNarrow(); + auto m = Sk4px::Wide::Min(s * d.alphas(), d * s.alphas()).div255(); // There's no chance of underflow, and if we subtract m before adding s+d, no overflow. return (s - m) + (d - m.zeroAlphas()); } // [ Sa + Da - Sa*Da, Sc + Dc - 2*Sc*Dc ] XFERMODE(Exclusion) { - auto p = s.fastMulDiv255Round(d); + auto p = s.approxMulDiv255(d); // There's no chance of underflow, and if we subtract p before adding src+dst, no overflow. return (s - p) + (d - p.zeroAlphas()); } @@ -77,20 +65,19 @@ XFERMODE(Exclusion) { // A reasonable fallback mode for doing AA is to simply apply the transfermode first, // then linearly interpolate the AA. template -static Sk4px xfer_aa(const Sk4px& s, const Sk4px& d, const Sk16b& aa) { - Sk4px noAA = Mode::Xfer(s, d); - return Sk4px::Wide(noAA.mulWiden(aa) + d.mulWiden(Sk4px(aa).inv())) - .div255RoundNarrow(); +static Sk4px xfer_aa(const Sk4px& s, const Sk4px& d, const Sk4px& aa) { + Sk4px bw = Mode::Xfer(s, d); + return (bw * aa + d * aa.inv()).div255(); } // For some transfermodes we specialize AA, either for correctness or performance. #ifndef SK_NO_SPECIALIZED_AA_XFERMODES #define XFERMODE_AA(Name) \ - template <> Sk4px xfer_aa(const Sk4px& s, const Sk4px& d, const Sk16b& aa) + template <> Sk4px xfer_aa(const Sk4px& s, const Sk4px& d, const Sk4px& aa) // Plus' clamp needs to happen after AA. skia:3852 XFERMODE_AA(Plus) { // [ clamp( (1-AA)D + (AA)(S+D) ) == clamp(D + AA*S) ] - return d.saturatedAdd(s.fastMulDiv255Round(aa)); + return d.saturatedAdd(s.approxMulDiv255(aa)); } #undef XFERMODE_AA @@ -110,7 +97,7 @@ public: }); } else { Sk4px::MapDstSrcAlpha(n, dst, src, aa, - [&](const Sk4px& dst4, const Sk4px& src4, const Sk16b& alpha) { + [&](const Sk4px& dst4, const Sk4px& src4, const Sk4px& alpha) { return xfer_aa(src4, dst4, alpha); }); } diff --git a/src/core/SkBlitRow_D32.cpp b/src/core/SkBlitRow_D32.cpp index 07a5d87..57d0ead 100644 --- a/src/core/SkBlitRow_D32.cpp +++ b/src/core/SkBlitRow_D32.cpp @@ -153,10 +153,10 @@ void SkBlitRow::Color32(SkPMColor dst[], const SkPMColor src[], int count, SkPMC invA += invA >> 7; SkASSERT(invA < 256); // We've already handled alpha == 0 above. - Sk16h colorHighAndRound = Sk4px(color).widenHi() + Sk16h(128); + Sk16h colorHighAndRound = Sk4px::DupPMColor(color).widenHi() + Sk16h(128); Sk16b invA_16x(invA); Sk4px::MapSrc(count, dst, src, [&](const Sk4px& src4) -> Sk4px { - return src4.mulWiden(invA_16x).addNarrowHi(colorHighAndRound); + return (src4 * invA_16x).addNarrowHi(colorHighAndRound); }); } diff --git a/src/opts/Sk4px_NEON.h b/src/opts/Sk4px_NEON.h index 494052c..644a71f 100644 --- a/src/opts/Sk4px_NEON.h +++ b/src/opts/Sk4px_NEON.h @@ -7,7 +7,7 @@ namespace { // See Sk4px.h -inline Sk4px::Sk4px(SkPMColor px) : INHERITED((uint8x16_t)vdupq_n_u32(px)) {} +inline Sk4px Sk4px::DupPMColor(SkPMColor px) { return Sk16b((uint8x16_t)vdupq_n_u32(px)); } inline Sk4px Sk4px::Load4(const SkPMColor px[4]) { return Sk16b((uint8x16_t)vld1q_u32(px)); diff --git a/src/opts/Sk4px_SSE2.h b/src/opts/Sk4px_SSE2.h index ee58c38..74ccffc 100644 --- a/src/opts/Sk4px_SSE2.h +++ b/src/opts/Sk4px_SSE2.h @@ -7,7 +7,7 @@ namespace { // See Sk4px.h -inline Sk4px::Sk4px(SkPMColor px) : INHERITED(_mm_set1_epi32(px)) {} +inline Sk4px Sk4px::DupPMColor(SkPMColor px) { return Sk16b(_mm_set1_epi32(px)); } inline Sk4px Sk4px::Load4(const SkPMColor px[4]) { return Sk16b(_mm_loadu_si128((const __m128i*)px)); diff --git a/src/opts/Sk4px_none.h b/src/opts/Sk4px_none.h index d3ead31..ce2f845 100644 --- a/src/opts/Sk4px_none.h +++ b/src/opts/Sk4px_none.h @@ -11,8 +11,10 @@ namespace { // See Sk4px.h static_assert(sizeof(Sk4px) == 16, "This file uses memcpy / sk_memset32, so exact size matters."); -inline Sk4px::Sk4px(SkPMColor px) { - sk_memset32((uint32_t*)this, px, 4); +inline Sk4px Sk4px::DupPMColor(SkPMColor px) { + Sk4px px4 = Sk16b(); + sk_memset32((uint32_t*)&px4, px, 4); + return px4; } inline Sk4px Sk4px::Load4(const SkPMColor px[4]) { diff --git a/src/opts/SkBlitRow_opts_arm_neon.cpp b/src/opts/SkBlitRow_opts_arm_neon.cpp index 93bd851..0755f5d 100644 --- a/src/opts/SkBlitRow_opts_arm_neon.cpp +++ b/src/opts/SkBlitRow_opts_arm_neon.cpp @@ -1740,10 +1740,10 @@ void sk_blitrow_color32_arm_neon(SkPMColor* dst, const SkPMColor* src, int count invA += invA >> 7; SkASSERT(invA < 256); // Our caller has already handled the alpha == 0 case. - Sk16h colorHighAndRound = Sk4px(color).widenHi() + Sk16h(128); + Sk16h colorHighAndRound = Sk4px::DupPMColor(color).widenHi() + Sk16h(128); Sk16b invA_16x(invA); Sk4px::MapSrc(count, dst, src, [&](const Sk4px& src4) -> Sk4px { - return src4.mulWiden(invA_16x).addNarrowHi(colorHighAndRound); + return (src4 * invA_16x).addNarrowHi(colorHighAndRound); }); } diff --git a/src/opts/SkNx_neon.h b/src/opts/SkNx_neon.h index 2087b88..da926e0 100644 --- a/src/opts/SkNx_neon.h +++ b/src/opts/SkNx_neon.h @@ -361,10 +361,6 @@ public: SkNi operator + (const SkNi& o) const { return vaddq_u8(fVec, o.fVec); } SkNi operator - (const SkNi& o) const { return vsubq_u8(fVec, o.fVec); } - SkNi operator * (const SkNi& o) const { return vmulq_u8(fVec, o.fVec); } - - SkNi operator << (int bits) const { SHIFT8(vshlq_n_u8, fVec, bits); } - SkNi operator >> (int bits) const { SHIFT8(vshrq_n_u8, fVec, bits); } static SkNi Min(const SkNi& a, const SkNi& b) { return vminq_u8(a.fVec, b.fVec); } diff --git a/src/opts/SkNx_sse.h b/src/opts/SkNx_sse.h index ed78892..12a4719 100644 --- a/src/opts/SkNx_sse.h +++ b/src/opts/SkNx_sse.h @@ -311,11 +311,6 @@ public: SkNi operator + (const SkNi& o) const { return _mm_add_epi8(fVec, o.fVec); } SkNi operator - (const SkNi& o) const { return _mm_sub_epi8(fVec, o.fVec); } - // SSE cannot multiply or shift vectors of uint8_t. - SkNi operator * (const SkNi& o) const { SkASSERT(false); return fVec; } - SkNi operator << (int bits) const { SkASSERT(false); return fVec; } - SkNi operator >> (int bits) const { SkASSERT(false); return fVec; } - static SkNi Min(const SkNi& a, const SkNi& b) { return _mm_min_epu8(a.fVec, b.fVec); } template uint8_t kth() const { diff --git a/tests/SkNxTest.cpp b/tests/SkNxTest.cpp index eab625d..3719044 100644 --- a/tests/SkNxTest.cpp +++ b/tests/SkNxTest.cpp @@ -174,15 +174,15 @@ DEF_TEST(Sk4px_muldiv255round, r) { int exact = (a*b+127)/255; // Duplicate a and b 16x each. - Sk4px av((SkAlpha)a), - bv((SkAlpha)b); + auto av = Sk4px::DupAlpha(a), + bv = Sk4px::DupAlpha(b); // This way should always be exactly correct. - int correct = av.mulWiden(bv).div255RoundNarrow().kth<0>(); + int correct = (av * bv).div255().kth<0>(); REPORTER_ASSERT(r, correct == exact); // We're a bit more flexible on this method: correct for 0 or 255, otherwise off by <=1. - int fast = av.fastMulDiv255Round(bv).kth<0>(); + int fast = av.approxMulDiv255(bv).kth<0>(); REPORTER_ASSERT(r, fast-exact >= -1 && fast-exact <= 1); if (a == 0 || a == 255 || b == 0 || b == 255) { REPORTER_ASSERT(r, fast == exact);