Revert "SkRasterPipeline: 8x pipelines, attempt 2"
authorMike Klein <mtklein@chromium.org>
Mon, 10 Oct 2016 14:23:37 +0000 (14:23 +0000)
committerMike Klein <mtklein@chromium.org>
Mon, 10 Oct 2016 14:32:01 +0000 (14:32 +0000)
This reverts commit Id0ba250037e271a9475fe2f0989d64f0aa909bae.

crbug.com/654213
Looks like Chrome Canary's picking up Haswell code on non-Haswell machines.

Change-Id: I16f976da24db86d5c99636c472ffad56db213a2a
Reviewed-on: https://skia-review.googlesource.com/3108
Commit-Queue: Mike Klein <mtklein@chromium.org>
Reviewed-by: Mike Klein <mtklein@chromium.org>
src/core/SkHalf.h
src/core/SkNx.h
src/core/SkOpts.cpp
src/core/SkOpts.h
src/core/SkRasterPipeline.cpp
src/core/SkRasterPipeline.h
src/core/SkSRGB.h
src/opts/SkNx_sse.h
src/opts/SkOpts_hsw.cpp
src/opts/SkOpts_sse41.cpp
src/opts/SkRasterPipeline_opts.h

index e71cb87..dd978a2 100644 (file)
 #include "SkNx.h"
 #include "SkTypes.h"
 
-#if !defined(_MSC_VER) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
-    #include <x86intrin.h>
-#endif
-
 // 16-bit floating point value
 // format is 1 bit sign, 5 bits exponent, 10 bits mantissa
 // only used for storage
@@ -89,29 +85,4 @@ static inline Sk4h SkFloatToHalf_finite_ftz(const Sk4f& fs) {
 #endif
 }
 
-static inline Sk8f SkHalfToFloat_finite_ftz(const Sk8h& hs) {
-#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
-    return _mm256_cvtph_ps(hs.fVec);
-
-#else
-    uint64_t parts[2];
-    hs.store(parts);
-    return SkNx_join(SkHalfToFloat_finite_ftz(parts[0]),
-                     SkHalfToFloat_finite_ftz(parts[1]));
-
-#endif
-}
-
-static inline Sk8h SkFloatToHalf_finite_ftz(const Sk8f& fs) {
-#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
-    return _mm256_cvtps_ph(fs.fVec, _MM_FROUND_CUR_DIRECTION);
-
-#else
-    uint64_t parts[2];
-    SkFloatToHalf_finite_ftz(fs.fLo).store(parts+0);
-    SkFloatToHalf_finite_ftz(fs.fHi).store(parts+1);
-    return Sk8h::Load(parts);
-#endif
-}
-
 #endif
index 6b63199..383f2aa 100644 (file)
@@ -307,11 +307,6 @@ SI SkNx<1,Dst> SkNx_cast(const SkNx<1,Src>& v) {
     return static_cast<Dst>(v.fVal);
 }
 
-template <int N, typename T>
-SI SkNx<N,T> SkNx_fma(const SkNx<N,T>& f, const SkNx<N,T>& m, const SkNx<N,T>& a) {
-    return f*m+a;
-}
-
 typedef SkNx<2,     float> Sk2f;
 typedef SkNx<4,     float> Sk4f;
 typedef SkNx<8,     float> Sk8f;
@@ -331,7 +326,6 @@ typedef SkNx<8,  uint16_t> Sk8h;
 typedef SkNx<16, uint16_t> Sk16h;
 
 typedef SkNx<4,  int32_t> Sk4i;
-typedef SkNx<8,  int32_t> Sk8i;
 typedef SkNx<4, uint32_t> Sk4u;
 
 // Include platform specific specializations if available.
index 88261f6..7784e7f 100644 (file)
@@ -88,109 +88,105 @@ namespace SkOpts {
     DEFINE_DEFAULT(srcover_srgb_srgb);
 
     DEFINE_DEFAULT(hash_fn);
-
-    DEFINE_DEFAULT(run_pipeline);
 #undef DEFINE_DEFAULT
 
-    SkOpts::VoidFn body[] = {
-        (SkOpts::VoidFn)SK_OPTS_NS::just_return,
-
-        (SkOpts::VoidFn)SK_OPTS_NS::store_565,
-        (SkOpts::VoidFn)SK_OPTS_NS::store_srgb,
-        (SkOpts::VoidFn)SK_OPTS_NS::store_f16,
-
-        (SkOpts::VoidFn)SK_OPTS_NS::load_s_565,
-        (SkOpts::VoidFn)SK_OPTS_NS::load_s_srgb,
-        (SkOpts::VoidFn)SK_OPTS_NS::load_s_f16,
-
-        (SkOpts::VoidFn)SK_OPTS_NS::load_d_565,
-        (SkOpts::VoidFn)SK_OPTS_NS::load_d_srgb,
-        (SkOpts::VoidFn)SK_OPTS_NS::load_d_f16,
-
-        (SkOpts::VoidFn)SK_OPTS_NS::scale_u8,
-
-        (SkOpts::VoidFn)SK_OPTS_NS::lerp_u8,
-        (SkOpts::VoidFn)SK_OPTS_NS::lerp_565,
-        (SkOpts::VoidFn)SK_OPTS_NS::lerp_constant_float,
-
-        (SkOpts::VoidFn)SK_OPTS_NS::constant_color,
-
-        (SkOpts::VoidFn)SK_OPTS_NS::dst,
-        (SkOpts::VoidFn)SK_OPTS_NS::dstatop,
-        (SkOpts::VoidFn)SK_OPTS_NS::dstin,
-        (SkOpts::VoidFn)SK_OPTS_NS::dstout,
-        (SkOpts::VoidFn)SK_OPTS_NS::dstover,
-        (SkOpts::VoidFn)SK_OPTS_NS::srcatop,
-        (SkOpts::VoidFn)SK_OPTS_NS::srcin,
-        (SkOpts::VoidFn)SK_OPTS_NS::srcout,
-        (SkOpts::VoidFn)SK_OPTS_NS::srcover,
-        (SkOpts::VoidFn)SK_OPTS_NS::clear,
-        (SkOpts::VoidFn)SK_OPTS_NS::modulate,
-        (SkOpts::VoidFn)SK_OPTS_NS::multiply,
-        (SkOpts::VoidFn)SK_OPTS_NS::plus_,
-        (SkOpts::VoidFn)SK_OPTS_NS::screen,
-        (SkOpts::VoidFn)SK_OPTS_NS::xor_,
-        (SkOpts::VoidFn)SK_OPTS_NS::colorburn,
-        (SkOpts::VoidFn)SK_OPTS_NS::colordodge,
-        (SkOpts::VoidFn)SK_OPTS_NS::darken,
-        (SkOpts::VoidFn)SK_OPTS_NS::difference,
-        (SkOpts::VoidFn)SK_OPTS_NS::exclusion,
-        (SkOpts::VoidFn)SK_OPTS_NS::hardlight,
-        (SkOpts::VoidFn)SK_OPTS_NS::lighten,
-        (SkOpts::VoidFn)SK_OPTS_NS::overlay,
-        (SkOpts::VoidFn)SK_OPTS_NS::softlight,
+    // TODO: might be nice to only create one instance of tail-insensitive stages.
+
+    SkRasterPipeline::Fn stages_4[] = {
+        stage_4<SK_OPTS_NS::store_565 , false>,
+        stage_4<SK_OPTS_NS::store_srgb, false>,
+        stage_4<SK_OPTS_NS::store_f16 , false>,
+
+        stage_4<SK_OPTS_NS::load_s_565 , true>,
+        stage_4<SK_OPTS_NS::load_s_srgb, true>,
+        stage_4<SK_OPTS_NS::load_s_f16 , true>,
+
+        stage_4<SK_OPTS_NS::load_d_565 , true>,
+        stage_4<SK_OPTS_NS::load_d_srgb, true>,
+        stage_4<SK_OPTS_NS::load_d_f16 , true>,
+
+        stage_4<SK_OPTS_NS::scale_u8, true>,
+
+        stage_4<SK_OPTS_NS::lerp_u8            , true>,
+        stage_4<SK_OPTS_NS::lerp_565           , true>,
+        stage_4<SK_OPTS_NS::lerp_constant_float, true>,
+
+        stage_4<SK_OPTS_NS::constant_color, true>,
+
+        SK_OPTS_NS::dst,
+        SK_OPTS_NS::dstatop,
+        SK_OPTS_NS::dstin,
+        SK_OPTS_NS::dstout,
+        SK_OPTS_NS::dstover,
+        SK_OPTS_NS::srcatop,
+        SK_OPTS_NS::srcin,
+        SK_OPTS_NS::srcout,
+        SK_OPTS_NS::srcover,
+        SK_OPTS_NS::clear,
+        SK_OPTS_NS::modulate,
+        SK_OPTS_NS::multiply,
+        SK_OPTS_NS::plus_,
+        SK_OPTS_NS::screen,
+        SK_OPTS_NS::xor_,
+        SK_OPTS_NS::colorburn,
+        SK_OPTS_NS::colordodge,
+        SK_OPTS_NS::darken,
+        SK_OPTS_NS::difference,
+        SK_OPTS_NS::exclusion,
+        SK_OPTS_NS::hardlight,
+        SK_OPTS_NS::lighten,
+        SK_OPTS_NS::overlay,
+        SK_OPTS_NS::softlight,
     };
-    static_assert(SK_ARRAY_COUNT(body) == SkRasterPipeline::kNumStockStages, "");
-
-    SkOpts::VoidFn tail[] = {
-        (SkOpts::VoidFn)SK_OPTS_NS::just_return,
-
-        (SkOpts::VoidFn)SK_OPTS_NS::store_565_tail,
-        (SkOpts::VoidFn)SK_OPTS_NS::store_srgb_tail,
-        (SkOpts::VoidFn)SK_OPTS_NS::store_f16_tail,
-
-        (SkOpts::VoidFn)SK_OPTS_NS::load_s_565_tail,
-        (SkOpts::VoidFn)SK_OPTS_NS::load_s_srgb_tail,
-        (SkOpts::VoidFn)SK_OPTS_NS::load_s_f16_tail,
-
-        (SkOpts::VoidFn)SK_OPTS_NS::load_d_565_tail,
-        (SkOpts::VoidFn)SK_OPTS_NS::load_d_srgb_tail,
-        (SkOpts::VoidFn)SK_OPTS_NS::load_d_f16_tail,
-
-        (SkOpts::VoidFn)SK_OPTS_NS::scale_u8_tail,
-
-        (SkOpts::VoidFn)SK_OPTS_NS::lerp_u8_tail,
-        (SkOpts::VoidFn)SK_OPTS_NS::lerp_565_tail,
-        (SkOpts::VoidFn)SK_OPTS_NS::lerp_constant_float,
-
-        (SkOpts::VoidFn)SK_OPTS_NS::constant_color,
-
-        (SkOpts::VoidFn)SK_OPTS_NS::dst,
-        (SkOpts::VoidFn)SK_OPTS_NS::dstatop,
-        (SkOpts::VoidFn)SK_OPTS_NS::dstin,
-        (SkOpts::VoidFn)SK_OPTS_NS::dstout,
-        (SkOpts::VoidFn)SK_OPTS_NS::dstover,
-        (SkOpts::VoidFn)SK_OPTS_NS::srcatop,
-        (SkOpts::VoidFn)SK_OPTS_NS::srcin,
-        (SkOpts::VoidFn)SK_OPTS_NS::srcout,
-        (SkOpts::VoidFn)SK_OPTS_NS::srcover,
-        (SkOpts::VoidFn)SK_OPTS_NS::clear,
-        (SkOpts::VoidFn)SK_OPTS_NS::modulate,
-        (SkOpts::VoidFn)SK_OPTS_NS::multiply,
-        (SkOpts::VoidFn)SK_OPTS_NS::plus_,
-        (SkOpts::VoidFn)SK_OPTS_NS::screen,
-        (SkOpts::VoidFn)SK_OPTS_NS::xor_,
-        (SkOpts::VoidFn)SK_OPTS_NS::colorburn,
-        (SkOpts::VoidFn)SK_OPTS_NS::colordodge,
-        (SkOpts::VoidFn)SK_OPTS_NS::darken,
-        (SkOpts::VoidFn)SK_OPTS_NS::difference,
-        (SkOpts::VoidFn)SK_OPTS_NS::exclusion,
-        (SkOpts::VoidFn)SK_OPTS_NS::hardlight,
-        (SkOpts::VoidFn)SK_OPTS_NS::lighten,
-        (SkOpts::VoidFn)SK_OPTS_NS::overlay,
-        (SkOpts::VoidFn)SK_OPTS_NS::softlight,
+    static_assert(SK_ARRAY_COUNT(stages_4) == SkRasterPipeline::kNumStockStages, "");
+
+    SkRasterPipeline::Fn stages_1_3[] = {
+        stage_1_3<SK_OPTS_NS::store_565 , false>,
+        stage_1_3<SK_OPTS_NS::store_srgb, false>,
+        stage_1_3<SK_OPTS_NS::store_f16 , false>,
+
+        stage_1_3<SK_OPTS_NS::load_s_565 , true>,
+        stage_1_3<SK_OPTS_NS::load_s_srgb, true>,
+        stage_1_3<SK_OPTS_NS::load_s_f16 , true>,
+
+        stage_1_3<SK_OPTS_NS::load_d_565 , true>,
+        stage_1_3<SK_OPTS_NS::load_d_srgb, true>,
+        stage_1_3<SK_OPTS_NS::load_d_f16 , true>,
+
+        stage_1_3<SK_OPTS_NS::scale_u8, true>,
+
+        stage_1_3<SK_OPTS_NS::lerp_u8            , true>,
+        stage_1_3<SK_OPTS_NS::lerp_565           , true>,
+        stage_1_3<SK_OPTS_NS::lerp_constant_float, true>,
+
+        stage_1_3<SK_OPTS_NS::constant_color, true>,
+
+        SK_OPTS_NS::dst,
+        SK_OPTS_NS::dstatop,
+        SK_OPTS_NS::dstin,
+        SK_OPTS_NS::dstout,
+        SK_OPTS_NS::dstover,
+        SK_OPTS_NS::srcatop,
+        SK_OPTS_NS::srcin,
+        SK_OPTS_NS::srcout,
+        SK_OPTS_NS::srcover,
+        SK_OPTS_NS::clear,
+        SK_OPTS_NS::modulate,
+        SK_OPTS_NS::multiply,
+        SK_OPTS_NS::plus_,
+        SK_OPTS_NS::screen,
+        SK_OPTS_NS::xor_,
+        SK_OPTS_NS::colorburn,
+        SK_OPTS_NS::colordodge,
+        SK_OPTS_NS::darken,
+        SK_OPTS_NS::difference,
+        SK_OPTS_NS::exclusion,
+        SK_OPTS_NS::hardlight,
+        SK_OPTS_NS::lighten,
+        SK_OPTS_NS::overlay,
+        SK_OPTS_NS::softlight,
     };
-    static_assert(SK_ARRAY_COUNT(tail) == SkRasterPipeline::kNumStockStages, "");
+    static_assert(SK_ARRAY_COUNT(stages_1_3) == SkRasterPipeline::kNumStockStages, "");
 
     // Each Init_foo() is defined in src/opts/SkOpts_foo.cpp.
     void Init_ssse3();
index 4685d86..50de9c4 100644 (file)
@@ -73,13 +73,8 @@ namespace SkOpts {
         return hash_fn(data, bytes, seed);
     }
 
-    // SkRasterPipeline::Fn has different types in different files (notably, in SkOpts_hsw.cpp
-    // they're all in terms of Sk8f.)  We store them with a type everyone can agree on, void(*)().
-    using VoidFn = void(*)();
-    extern VoidFn body[SkRasterPipeline::kNumStockStages],
-                  tail[SkRasterPipeline::kNumStockStages];
-    extern void (*run_pipeline)(size_t, size_t, void(*)(), SkRasterPipeline::Stage*,
-                                                void(*)(), SkRasterPipeline::Stage*);
+    extern SkRasterPipeline::Fn stages_4  [SkRasterPipeline::kNumStockStages],
+                                stages_1_3[SkRasterPipeline::kNumStockStages];
 }
 
 #endif//SkOpts_DEFINED
index bc7feac..72d5b7b 100644 (file)
@@ -8,12 +8,11 @@
 #include "SkOpts.h"
 #include "SkRasterPipeline.h"
 
-SkRasterPipeline::SkRasterPipeline() {
-    fBodyStart = SkOpts::body[just_return];
-    fTailStart = SkOpts::tail[just_return];
-}
+SkRasterPipeline::SkRasterPipeline() {}
 
-void SkRasterPipeline::append(void (*body)(), void (*tail)(), void* ctx) {
+void SkRasterPipeline::append(SkRasterPipeline::Fn body,
+                              SkRasterPipeline::Fn tail,
+                              void* ctx) {
     // Each stage holds its own context and the next function to call.
     // So the pipeline itself has to hold onto the first function that starts the pipeline.
     (fBody.empty() ? fBodyStart : fBody.back().fNext) = body;
@@ -21,19 +20,19 @@ void SkRasterPipeline::append(void (*body)(), void (*tail)(), void* ctx) {
 
     // Each last stage starts with its next function set to JustReturn as a safety net.
     // It'll be overwritten by the next call to append().
-    fBody.push_back({ SkOpts::body[just_return], ctx });
-    fTail.push_back({ SkOpts::tail[just_return], ctx });
+    fBody.push_back({ &JustReturn, ctx });
+    fTail.push_back({ &JustReturn, ctx });
 }
 
 void SkRasterPipeline::append(StockStage stage, void* ctx) {
-    this->append(SkOpts::body[stage], SkOpts::tail[stage], ctx);
+    this->append(SkOpts::stages_4[stage], SkOpts::stages_1_3[stage], ctx);
 }
 
 void SkRasterPipeline::extend(const SkRasterPipeline& src) {
     SkASSERT(src.fBody.count() == src.fTail.count());
 
-    auto body = src.fBodyStart,
-         tail = src.fTailStart;
+    Fn body = src.fBodyStart,
+       tail = src.fTailStart;
     for (int i = 0; i < src.fBody.count(); i++) {
         SkASSERT(src.fBody[i].fCtx == src.fTail[i].fCtx);
         this->append(body, tail, src.fBody[i].fCtx);
@@ -43,5 +42,18 @@ void SkRasterPipeline::extend(const SkRasterPipeline& src) {
 }
 
 void SkRasterPipeline::run(size_t x, size_t n) {
-    SkOpts::run_pipeline(x,n, fBodyStart,fBody.begin(), fTailStart,fTail.begin());
+    // It's fastest to start uninitialized if the compilers all let us.  If not, next fastest is 0.
+    Sk4f v;
+
+    while (n >= 4) {
+        fBodyStart(fBody.begin(), x,0, v,v,v,v, v,v,v,v);
+        x += 4;
+        n -= 4;
+    }
+    if (n > 0) {
+        fTailStart(fTail.begin(), x,n, v,v,v,v, v,v,v,v);
+    }
 }
+
+void SK_VECTORCALL SkRasterPipeline::JustReturn(Stage*, size_t, size_t, Sk4f,Sk4f,Sk4f,Sk4f,
+                                                                        Sk4f,Sk4f,Sk4f,Sk4f) {}
index 3ef8c50..996c783 100644 (file)
 class SkRasterPipeline {
 public:
     struct Stage;
-#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
-    using V = Sk8f;
-#else
-    using V = Sk4f;
-#endif
-    using Fn = void(SK_VECTORCALL *)(Stage*, size_t, size_t, V,V,V,V,
-                                                             V,V,V,V);
-
+    using Fn = void(SK_VECTORCALL *)(Stage*, size_t, size_t, Sk4f,Sk4f,Sk4f,Sk4f,
+                                                             Sk4f,Sk4f,Sk4f,Sk4f);
     struct Stage {
         template <typename T>
         T ctx() { return static_cast<T>(fCtx); }
 
-        void SK_VECTORCALL next(size_t x, size_t tail, V v0, V v1, V v2, V v3,
-                                                       V v4, V v5, V v6, V v7) {
+        void SK_VECTORCALL next(size_t x, size_t tail, Sk4f v0, Sk4f v1, Sk4f v2, Sk4f v3,
+                                                       Sk4f v4, Sk4f v5, Sk4f v6, Sk4f v7) {
             // Stages are logically a pipeline, and physically are contiguous in an array.
             // To get to the next stage, we just increment our pointer to the next array element.
-            ((Fn)fNext)(this+1, x,tail, v0,v1,v2,v3, v4,v5,v6,v7);
+            fNext(this+1, x,tail, v0,v1,v2,v3, v4,v5,v6,v7);
         }
 
         // It makes next() a good bit cheaper if we hold the next function to call here,
         // rather than logically simpler choice of the function implementing this stage.
-        void (*fNext)();
+        Fn fNext;
         void* fCtx;
     };
 
@@ -90,8 +84,6 @@ public:
     void run(size_t n) { this->run(0, n); }
 
     enum StockStage {
-        just_return,
-
         store_565,
         store_srgb,
         store_f16,
@@ -142,18 +134,24 @@ public:
     void append(StockStage, void* = nullptr);
     void append(StockStage stage, const void* ctx) { this->append(stage, const_cast<void*>(ctx)); }
 
+
     // Append all stages to this pipeline.
     void extend(const SkRasterPipeline&);
 
 private:
     using Stages = SkSTArray<10, Stage, /*MEM_COPY=*/true>;
 
-    void append(void (*body)(), void (*tail)(), void*);
+    void append(Fn body, Fn tail, void*);
 
+    // This no-op default makes fBodyStart and fTailStart unconditionally safe to call,
+    // and is always the last stage's fNext as a sort of safety net to make sure even a
+    // buggy pipeline can't walk off its own end.
+    static void SK_VECTORCALL JustReturn(Stage*, size_t, size_t, Sk4f,Sk4f,Sk4f,Sk4f,
+                                                                 Sk4f,Sk4f,Sk4f,Sk4f);
     Stages fBody,
            fTail;
-    void (*fBodyStart)() = nullptr;
-    void (*fTailStart)() = nullptr;
+    Fn fBodyStart = &JustReturn,
+       fTailStart = &JustReturn;
 };
 
 #endif//SkRasterPipeline_DEFINED
index a12ce96..e60e288 100644 (file)
 
 extern const float sk_linear_from_srgb[256];
 
-template <typename V>
-static inline V sk_clamp_0_255(const V& x) {
+static inline Sk4f sk_clamp_0_255(const Sk4f& x) {
     // The order of the arguments is important here.  We want to make sure that NaN
     // clamps to zero.  Note that max(NaN, 0) = 0, while max(0, NaN) = NaN.
-    return V::Min(V::Max(x, 0.0f), 255.0f);
+    return Sk4f::Min(Sk4f::Max(x, 0.0f), 255.0f);
 }
 
 // This should probably only be called from sk_linear_to_srgb() or sk_linear_to_srgb_noclamp().
 // It generally doesn't make sense to work with sRGB floats.
-template <typename V>
-static inline V sk_linear_to_srgb_needs_trunc(const V& x) {
+static inline Sk4f sk_linear_to_srgb_needs_trunc(const Sk4f& x) {
     // Approximation of the sRGB gamma curve (within 1 when scaled to 8-bit pixels).
     //
     // Constants tuned by brute force to minimize (in order of importance) after truncation:
@@ -45,21 +43,19 @@ static inline V sk_linear_to_srgb_needs_trunc(const V& x) {
 
     auto lo = (13.0471f * 255.0f) * x;
 
-    auto hi = SkNx_fma(V{+0.412999f  * 255.0f}, ftrt,
-              SkNx_fma(V{+0.687999f  * 255.0f}, sqrt,
-                       V{-0.0974983f * 255.0f}));
+    auto hi = (-0.0974983f * 255.0f)
+            + (+0.687999f  * 255.0f) * sqrt
+            + (+0.412999f  * 255.0f) * ftrt;
     return (x < 0.0048f).thenElse(lo, hi);
 }
 
-template <int N>
-static inline SkNx<N,int> sk_linear_to_srgb(const SkNx<N,float>& x) {
-    auto f = sk_linear_to_srgb_needs_trunc(x);
+static inline Sk4i sk_linear_to_srgb(const Sk4f& x) {
+    Sk4f f = sk_linear_to_srgb_needs_trunc(x);
     return SkNx_cast<int>(sk_clamp_0_255(f));
 }
 
-template <int N>
-static inline SkNx<N,int> sk_linear_to_srgb_noclamp(const SkNx<N,float>& x) {
-    auto f = sk_linear_to_srgb_needs_trunc(x);
+static inline Sk4i sk_linear_to_srgb_noclamp(const Sk4f& x) {
+    Sk4f f = sk_linear_to_srgb_needs_trunc(x);
     for (int i = 0; i < 4; i++) {
         SkASSERTF(0.0f <= f[i] && f[i] < 256.0f, "f[%d] was %g, outside [0,256)\n", i, f[i]);
     }
@@ -67,18 +63,17 @@ static inline SkNx<N,int> sk_linear_to_srgb_noclamp(const SkNx<N,float>& x) {
 }
 
 // sRGB -> linear, using math instead of table lookups, scaling better to larger SIMD vectors.
-template <int N>
-static inline SkNx<N,float> sk_linear_from_srgb_math(const SkNx<N,int>& s) {
+static inline Sk4f sk_linear_from_srgb_math(const Sk4i& s) {
     auto x = SkNx_cast<float>(s);
 
     const float u = 1/255.0f;  // x is [0,255], so x^n needs scaling by u^n.
 
     // Non-linear segment of sRGB curve approximated by
     // l = 0.0025 + 0.6975x^2 + 0.3x^3
-    const SkNx<N,float> k0 = 0.0025f,
-                        k2 = 0.6975f * u*u,
-                        k3 = 0.3000f * u*u*u;
-    auto hi = SkNx_fma(x*x, SkNx_fma(x, k3, k2), k0);
+    const float k0 = 0.0025f,
+                k2 = 0.6975f * u*u,
+                k3 = 0.3000f * u*u*u;
+    auto hi = k0 + (k2 + k3*x) * (x*x);
 
     // Linear segment of sRGB curve: the normal slope, extended a little further than normal.
     auto lo = x * (u/12.92f);
index 4546280..66b5f0e 100644 (file)
@@ -302,47 +302,12 @@ public:
 
     SkNx() {}
     SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
+    static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
     SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
          uint16_t e, uint16_t f, uint16_t g, uint16_t h) : fVec(_mm_setr_epi16(a,b,c,d,e,f,g,h)) {}
 
-    static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
     void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
 
-    static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
-        // TODO: AVX2 version
-        __m128i _01 = _mm_loadu_si128(((__m128i*)ptr) + 0),
-                _23 = _mm_loadu_si128(((__m128i*)ptr) + 1),
-                _45 = _mm_loadu_si128(((__m128i*)ptr) + 2),
-                _67 = _mm_loadu_si128(((__m128i*)ptr) + 3);
-
-        __m128i _02 = _mm_unpacklo_epi16(_01, _23),  // r0 r2 g0 g2 b0 b2 a0 a2
-                _13 = _mm_unpackhi_epi16(_01, _23),  // r1 r3 g1 g3 b1 b3 a1 a3
-                _46 = _mm_unpacklo_epi16(_45, _67),
-                _57 = _mm_unpackhi_epi16(_45, _67);
-
-        __m128i rg0123 = _mm_unpacklo_epi16(_02, _13),  // r0 r1 r2 r3 g0 g1 g2 g3
-                ba0123 = _mm_unpackhi_epi16(_02, _13),  // b0 b1 b2 b3 a0 a1 a2 a3
-                rg4567 = _mm_unpacklo_epi16(_46, _57),
-                ba4567 = _mm_unpackhi_epi16(_46, _57);
-
-        *r = _mm_unpacklo_epi64(rg0123, rg4567);
-        *g = _mm_unpackhi_epi64(rg0123, rg4567);
-        *b = _mm_unpacklo_epi64(ba0123, ba4567);
-        *a = _mm_unpackhi_epi64(ba0123, ba4567);
-    }
-    static void Store4(void* ptr, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
-        // TODO: AVX2 version
-        __m128i rg0123 = _mm_unpacklo_epi16(r.fVec, g.fVec),  // r0 g0 r1 g1 r2 g2 r3 g3
-                rg4567 = _mm_unpackhi_epi16(r.fVec, g.fVec),  // r4 g4 r5 g5 r6 g6 r7 g7
-                ba0123 = _mm_unpacklo_epi16(b.fVec, a.fVec),
-                ba4567 = _mm_unpackhi_epi16(b.fVec, a.fVec);
-
-        _mm_storeu_si128((__m128i*)ptr + 0, _mm_unpacklo_epi32(rg0123, ba0123));
-        _mm_storeu_si128((__m128i*)ptr + 1, _mm_unpackhi_epi32(rg0123, ba0123));
-        _mm_storeu_si128((__m128i*)ptr + 2, _mm_unpacklo_epi32(rg4567, ba4567));
-        _mm_storeu_si128((__m128i*)ptr + 3, _mm_unpackhi_epi32(rg4567, ba4567));
-    }
-
     SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
     SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
     SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
@@ -438,192 +403,6 @@ public:
     __m128i fVec;
 };
 
-#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
-
-// There are two different SkNx<8, uint8_t>, SkNx<8, int32_t>, SkNx<8, uint32_t>, SkNx<8, float>:
-//   - the default paired SkNx<4, ...> versions used without AVX2
-//   - the native AVX2 versions.
-// It is important that we don't call methods for one from the other.
-// Usually these methods inline, but they don't always in Debug builds.
-// For now, try to fix this by marking all the AVX2 versions as always-inline.
-// We may want or need to extend this strategy to all SkNx methods.
-#define I SK_ALWAYS_INLINE
-
-    template <>
-    class SkNx<8, uint8_t> {
-    public:
-        I SkNx(const __m128i& vec) : fVec(vec) {}
-
-        I SkNx() {}
-        I SkNx(uint8_t v) : fVec(_mm_set1_epi8(v)) {}
-        I SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
-               uint8_t e, uint8_t f, uint8_t g, uint8_t h)
-            : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, 0,0,0,0, 0,0,0,0)) {}
-
-
-        I static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); }
-        I void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); }
-
-        I uint8_t operator[](int k) const {
-            SkASSERT(0 <= k && k < 8);
-            union { __m128i v; uint8_t us[16]; } pun = {fVec};
-            return pun.us[k&7];
-        }
-
-        __m128i fVec;
-    };
-
-    template <>
-    class SkNx<8, int32_t> {
-    public:
-        I SkNx(const __m256i& vec) : fVec(vec) {}
-
-        I SkNx() {}
-        I SkNx(int32_t v) : fVec(_mm256_set1_epi32(v)) {}
-        I SkNx(int32_t a, int32_t b, int32_t c, int32_t d,
-               int32_t e, int32_t f, int32_t g, int32_t h)
-            : fVec(_mm256_setr_epi32(a,b,c,d, e,f,g,h)) {}
-
-        I static SkNx Load(const void* ptr) { return _mm256_loadu_si256((const __m256i*)ptr); }
-        I void store(void* ptr) const { _mm256_storeu_si256((__m256i*)ptr, fVec); }
-
-        I SkNx operator + (const SkNx& o) const { return _mm256_add_epi32(fVec, o.fVec); }
-        I SkNx operator - (const SkNx& o) const { return _mm256_sub_epi32(fVec, o.fVec); }
-        I SkNx operator * (const SkNx& o) const { return _mm256_mullo_epi32(fVec, o.fVec); }
-
-        I SkNx operator & (const SkNx& o) const { return _mm256_and_si256(fVec, o.fVec); }
-        I SkNx operator | (const SkNx& o) const { return _mm256_or_si256(fVec, o.fVec); }
-        I SkNx operator ^ (const SkNx& o) const { return _mm256_xor_si256(fVec, o.fVec); }
-
-        I SkNx operator << (int bits) const { return _mm256_slli_epi32(fVec, bits); }
-        I SkNx operator >> (int bits) const { return _mm256_srai_epi32(fVec, bits); }
-
-        I int32_t operator[](int k) const {
-            SkASSERT(0 <= k && k < 8);
-            union { __m256i v; int32_t is[8]; } pun = {fVec};
-            return pun.is[k&7];
-        }
-
-        __m256i fVec;
-    };
-
-    template <>
-    class SkNx<8, uint32_t> {
-    public:
-        I SkNx(const __m256i& vec) : fVec(vec) {}
-
-        I SkNx() {}
-        I SkNx(uint32_t v) : fVec(_mm256_set1_epi32(v)) {}
-        I SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d,
-               uint32_t e, uint32_t f, uint32_t g, uint32_t h)
-            : fVec(_mm256_setr_epi32(a,b,c,d, e,f,g,h)) {}
-
-        I static SkNx Load(const void* ptr) { return _mm256_loadu_si256((const __m256i*)ptr); }
-        I void store(void* ptr) const { _mm256_storeu_si256((__m256i*)ptr, fVec); }
-
-        I SkNx operator + (const SkNx& o) const { return _mm256_add_epi32(fVec, o.fVec); }
-        I SkNx operator - (const SkNx& o) const { return _mm256_sub_epi32(fVec, o.fVec); }
-        I SkNx operator * (const SkNx& o) const { return _mm256_mullo_epi32(fVec, o.fVec); }
-
-        I SkNx operator & (const SkNx& o) const { return _mm256_and_si256(fVec, o.fVec); }
-        I SkNx operator | (const SkNx& o) const { return _mm256_or_si256(fVec, o.fVec); }
-        I SkNx operator ^ (const SkNx& o) const { return _mm256_xor_si256(fVec, o.fVec); }
-
-        I SkNx operator << (int bits) const { return _mm256_slli_epi32(fVec, bits); }
-        I SkNx operator >> (int bits) const { return _mm256_srli_epi32(fVec, bits); }
-
-        I uint32_t operator[](int k) const {
-            SkASSERT(0 <= k && k < 8);
-            union { __m256i v; uint32_t us[8]; } pun = {fVec};
-            return pun.us[k&7];
-        }
-
-        __m256i fVec;
-    };
-
-    template <>
-    class SkNx<8, float> {
-    public:
-        I SkNx(const __m256& vec) : fVec(vec) {}
-
-        I SkNx() {}
-        I SkNx(float val) : fVec(_mm256_set1_ps(val)) {}
-        I SkNx(float a, float b, float c, float d,
-               float e, float f, float g, float h) : fVec(_mm256_setr_ps(a,b,c,d,e,f,g,h)) {}
-
-        I static SkNx Load(const void* ptr) { return _mm256_loadu_ps((const float*)ptr); }
-        I void store(void* ptr) const { _mm256_storeu_ps((float*)ptr, fVec); }
-
-        I SkNx operator+(const SkNx& o) const { return _mm256_add_ps(fVec, o.fVec); }
-        I SkNx operator-(const SkNx& o) const { return _mm256_sub_ps(fVec, o.fVec); }
-        I SkNx operator*(const SkNx& o) const { return _mm256_mul_ps(fVec, o.fVec); }
-        I SkNx operator/(const SkNx& o) const { return _mm256_div_ps(fVec, o.fVec); }
-
-        I SkNx operator==(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_EQ_OQ); }
-        I SkNx operator!=(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_NEQ_OQ); }
-        I SkNx operator <(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_LT_OQ); }
-        I SkNx operator >(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_GT_OQ); }
-        I SkNx operator<=(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_LE_OQ); }
-        I SkNx operator>=(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_GE_OQ); }
-
-        I static SkNx Min(const SkNx& l, const SkNx& r) { return _mm256_min_ps(l.fVec, r.fVec); }
-        I static SkNx Max(const SkNx& l, const SkNx& r) { return _mm256_max_ps(l.fVec, r.fVec); }
-
-        I SkNx   sqrt() const { return _mm256_sqrt_ps (fVec); }
-        I SkNx  rsqrt() const { return _mm256_rsqrt_ps(fVec); }
-        I SkNx invert() const { return _mm256_rcp_ps  (fVec); }
-
-        I float operator[](int k) const {
-            SkASSERT(0 <= k && k < 8);
-            union { __m256 v; float fs[8]; } pun = {fVec};
-            return pun.fs[k&7];
-        }
-
-        I SkNx thenElse(const SkNx& t, const SkNx& e) const {
-            return _mm256_blendv_ps(e.fVec, t.fVec, fVec);
-        }
-
-        __m256 fVec;
-    };
-
-    static I void SkNx_split(const Sk8f& v, Sk4f* lo, Sk4f* hi) {
-        *lo = _mm256_extractf128_ps(v.fVec, 0);
-        *hi = _mm256_extractf128_ps(v.fVec, 1);
-    }
-
-    static I Sk8f SkNx_join(const Sk4f& lo, const Sk4f& hi) {
-        return _mm256_insertf128_ps(_mm256_castps128_ps256(lo.fVec), hi.fVec, 1);
-    }
-
-    static I Sk8f SkNx_fma(const Sk8f& a, const Sk8f& b, const Sk8f& c) {
-        return _mm256_fmadd_ps(a.fVec, b.fVec, c.fVec);
-    }
-
-    template<> /*static*/ I Sk8f SkNx_cast<float>(const Sk8b& src) {
-        return _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(src.fVec));
-    }
-
-    template<> /*static*/ I Sk8f SkNx_cast<float>(const Sk8i& src) {
-        return _mm256_cvtepi32_ps(src.fVec);
-    }
-
-    template<> /*static*/ I Sk8i SkNx_cast<int>(const Sk8f& src) {
-        return _mm256_cvttps_epi32(src.fVec);
-    }
-
-    template<> /*static*/ I Sk8i SkNx_cast<int>(const Sk8h& src) {
-        return _mm256_cvtepu16_epi32(src.fVec);
-    }
-    template<> /*static*/ I Sk8h SkNx_cast<uint16_t>(const Sk8i& src) {
-        __m128i lo = _mm256_extractf128_si256(src.fVec, 0),
-                hi = _mm256_extractf128_si256(src.fVec, 1);
-        return _mm_packus_epi32(lo, hi);
-    }
-
-#undef I
-
-#endif
-
 template<> /*static*/ inline Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) {
     return _mm_cvtepi32_ps(src.fVec);
 }
index 9f72480..53e2e5a 100644 (file)
@@ -7,70 +7,9 @@
 
 #include "SkOpts.h"
 
-
 #define SK_OPTS_NS hsw
-#include "SkRasterPipeline_opts.h"
 
 namespace SkOpts {
-    void Init_hsw() {
-
-        run_pipeline = SK_OPTS_NS::run_pipeline;
-
-    #define STAGE(stage)                                                       \
-        body[SkRasterPipeline::stage] = (SkOpts::VoidFn)SK_OPTS_NS::stage;      \
-        tail[SkRasterPipeline::stage] = (SkOpts::VoidFn)SK_OPTS_NS::stage##_tail
-
-        STAGE(store_565);
-        STAGE(store_srgb);
-        STAGE(store_f16);
-
-        STAGE(load_s_565);
-        STAGE(load_s_srgb);
-        STAGE(load_s_f16);
-
-        STAGE(load_d_565);
-        STAGE(load_d_srgb);
-        STAGE(load_d_f16);
-
-        STAGE(scale_u8);
-
-        STAGE(lerp_u8);
-        STAGE(lerp_565);
-    #undef STAGE
-
-    #define STAGE(stage) \
-        body[SkRasterPipeline::stage] = (SkOpts::VoidFn)SK_OPTS_NS::stage; \
-        tail[SkRasterPipeline::stage] = (SkOpts::VoidFn)SK_OPTS_NS::stage
-
-        STAGE(lerp_constant_float);
-        STAGE(constant_color);
-
-        STAGE(dst);
-        STAGE(dstatop);
-        STAGE(dstin);
-        STAGE(dstout);
-        STAGE(dstover);
-        STAGE(srcatop);
-        STAGE(srcin);
-        STAGE(srcout);
-        STAGE(srcover);
-        STAGE(clear);
-        STAGE(modulate);
-        STAGE(multiply);
-        STAGE(plus_);
-        STAGE(screen);
-        STAGE(xor_);
-        STAGE(colorburn);
-        STAGE(colordodge);
-        STAGE(darken);
-        STAGE(difference);
-        STAGE(exclusion);
-        STAGE(hardlight);
-        STAGE(lighten);
-        STAGE(overlay);
-        STAGE(softlight);
-    #undef STAGE
-
-    }
+    void Init_hsw() { }
 }
 
index d55978c..3a37834 100644 (file)
@@ -21,60 +21,58 @@ namespace SkOpts {
         srcover_srgb_srgb    = sse41::srcover_srgb_srgb;
         blit_row_s32a_opaque = sse41::blit_row_s32a_opaque;
 
-    #define STAGE(stage)                                                       \
-        body[SkRasterPipeline::stage] = (SkOpts::VoidFn)SK_OPTS_NS::stage;      \
-        tail[SkRasterPipeline::stage] = (SkOpts::VoidFn)SK_OPTS_NS::stage##_tail
+    #define STAGE(stage, kCallNext) \
+        stages_4  [SkRasterPipeline::stage] = stage_4  <SK_OPTS_NS::stage, kCallNext>; \
+        stages_1_3[SkRasterPipeline::stage] = stage_1_3<SK_OPTS_NS::stage, kCallNext>
 
-        STAGE(store_565);
-        STAGE(store_srgb);
-        STAGE(store_f16);
+        STAGE(store_565 , false);
+        STAGE(store_srgb, false);
+        STAGE(store_f16 , false);
 
-        STAGE(load_s_565);
-        STAGE(load_s_srgb);
-        STAGE(load_s_f16);
+        STAGE(load_s_565 , true);
+        STAGE(load_s_srgb, true);
+        STAGE(load_s_f16 , true);
 
-        STAGE(load_d_565);
-        STAGE(load_d_srgb);
-        STAGE(load_d_f16);
+        STAGE(load_d_565 , true);
+        STAGE(load_d_srgb, true);
+        STAGE(load_d_f16 , true);
 
-        STAGE(scale_u8);
+        STAGE(scale_u8, true);
 
-        STAGE(lerp_u8);
-        STAGE(lerp_565);
-    #undef STAGE
+        STAGE(lerp_u8            , true);
+        STAGE(lerp_565           , true);
+        STAGE(lerp_constant_float, true);
 
-    #define STAGE(stage)                                                  \
-        body[SkRasterPipeline::stage] = (SkOpts::VoidFn)SK_OPTS_NS::stage; \
-        tail[SkRasterPipeline::stage] = (SkOpts::VoidFn)SK_OPTS_NS::stage
+        STAGE(constant_color, true);
 
-      // The commented-out stages don't actually benefit from SSE 4.1.
-      // To cut down on code bloat we skip them here, using the identical SSE2 defaults.
+    #undef STAGE
 
-      //STAGE(lerp_constant_float);
-      //STAGE(constant_color);
+    #define STAGE(stage) \
+        stages_4  [SkRasterPipeline::stage] = SK_OPTS_NS::stage; \
+        stages_1_3[SkRasterPipeline::stage] = SK_OPTS_NS::stage
 
-      //STAGE(dst);
-      //STAGE(dstatop);
-      //STAGE(dstin);
-      //STAGE(dstout);
-      //STAGE(dstover);
-      //STAGE(srcatop);
-      //STAGE(srcin);
-      //STAGE(srcout);
-      //STAGE(srcover);
-      //STAGE(clear);
-      //STAGE(modulate);
-      //STAGE(multiply);
-      //STAGE(plus_);
-      //STAGE(screen);
-      //STAGE(xor_);
+        STAGE(dst);
+        STAGE(dstatop);
+        STAGE(dstin);
+        STAGE(dstout);
+        STAGE(dstover);
+        STAGE(srcatop);
+        STAGE(srcin);
+        STAGE(srcout);
+        STAGE(srcover);
+        STAGE(clear);
+        STAGE(modulate);
+        STAGE(multiply);
+        STAGE(plus_);
+        STAGE(screen);
+        STAGE(xor_);
         STAGE(colorburn);
         STAGE(colordodge);
-      //STAGE(darken);
-      //STAGE(difference);
-      //STAGE(exclusion);
+        STAGE(darken);
+        STAGE(difference);
+        STAGE(exclusion);
         STAGE(hardlight);
-      //STAGE(lighten);
+        STAGE(lighten);
         STAGE(overlay);
         STAGE(softlight);
     #undef STAGE
index 1d8b044..b0e6e1d 100644 (file)
 #include "SkRasterPipeline.h"
 #include "SkSRGB.h"
 
-using SkNf = SkRasterPipeline::V;
-static constexpr auto N = sizeof(SkNf) / sizeof(float);
-using SkNi = SkNx<N, int>;
-using SkNh = SkNx<N, uint16_t>;
-
-#define SI static inline
-
-#define STAGE(name, kCallNext)                                                             \
-    static SK_ALWAYS_INLINE void name##_kernel(void* ctx, size_t x, size_t tail,           \
-                                               SkNf&  r, SkNf&  g, SkNf&  b, SkNf&  a,     \
-                                               SkNf& dr, SkNf& dg, SkNf& db, SkNf& da);    \
-    SI void SK_VECTORCALL name(SkRasterPipeline::Stage* st, size_t x, size_t tail,         \
-                               SkNf  r, SkNf  g, SkNf  b, SkNf  a,                         \
-                               SkNf dr, SkNf dg, SkNf db, SkNf da) {                       \
-        name##_kernel(st->ctx<void*>(), x,0, r,g,b,a, dr,dg,db,da);                        \
-        if (kCallNext) {                                                                   \
-            st->next(x,tail, r,g,b,a, dr,dg,db,da);                                        \
-        }                                                                                  \
-    }                                                                                      \
-    SI void SK_VECTORCALL name##_tail(SkRasterPipeline::Stage* st, size_t x, size_t tail,  \
-                                      SkNf  r, SkNf  g, SkNf  b, SkNf  a,                  \
-                                      SkNf dr, SkNf dg, SkNf db, SkNf da) {                \
-        name##_kernel(st->ctx<void*>(), x,tail, r,g,b,a, dr,dg,db,da);                     \
-        if (kCallNext) {                                                                   \
-            st->next(x,tail, r,g,b,a, dr,dg,db,da);                                        \
-        }                                                                                  \
-    }                                                                                      \
-    static SK_ALWAYS_INLINE void name##_kernel(void* ctx, size_t x, size_t tail,           \
-                                               SkNf&  r, SkNf&  g, SkNf&  b, SkNf&  a,     \
-                                               SkNf& dr, SkNf& dg, SkNf& db, SkNf& da)
+using Kernel_Sk4f = void(void*, size_t, size_t, Sk4f&, Sk4f&, Sk4f&, Sk4f&,
+                                                Sk4f&, Sk4f&, Sk4f&, Sk4f&);
+
+// These are always static, and we _really_ want them to inline.
+// If you find yourself wanting a non-inline stage, write a SkRasterPipeline::Fn directly.
+#define KERNEL_Sk4f(name)                                                      \
+    static SK_ALWAYS_INLINE void name(void* ctx, size_t x, size_t tail,        \
+                                      Sk4f&  r, Sk4f&  g, Sk4f&  b, Sk4f&  a,  \
+                                      Sk4f& dr, Sk4f& dg, Sk4f& db, Sk4f& da)
+
+
+template <Kernel_Sk4f kernel, bool kCallNext>
+static inline void SK_VECTORCALL stage_4(SkRasterPipeline::Stage* st, size_t x, size_t tail,
+                                         Sk4f  r, Sk4f  g, Sk4f  b, Sk4f  a,
+                                         Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) {
+    // Passing 0 lets the optimizer completely drop any "if (tail) {...}" code in kernel.
+    kernel(st->ctx<void*>(), x,0, r,g,b,a, dr,dg,db,da);
+    if (kCallNext) {
+        st->next(x,tail, r,g,b,a, dr,dg,db,da);  // It's faster to pass t here than 0.
+    }
+}
 
+template <Kernel_Sk4f kernel, bool kCallNext>
+static inline void SK_VECTORCALL stage_1_3(SkRasterPipeline::Stage* st, size_t x, size_t tail,
+                                           Sk4f  r, Sk4f  g, Sk4f  b, Sk4f  a,
+                                           Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) {
+#if defined(__clang__)
+    __builtin_assume(tail > 0);  // This flourish lets Clang compile away any tail==0 code.
+#endif
+    kernel(st->ctx<void*>(), x,tail, r,g,b,a, dr,dg,db,da);
+    if (kCallNext) {
+        st->next(x,tail, r,g,b,a, dr,dg,db,da);
+    }
+}
 
 // Many xfermodes apply the same logic to each channel.
-#define RGBA_XFERMODE(name)                                                                \
-    static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa,              \
-                                               const SkNf& d, const SkNf& da);             \
-    SI void SK_VECTORCALL name(SkRasterPipeline::Stage* st, size_t x, size_t tail,         \
-                               SkNf  r, SkNf  g, SkNf  b, SkNf  a,                         \
-                               SkNf dr, SkNf dg, SkNf db, SkNf da) {                       \
-        r = name##_kernel(r,a,dr,da);                                                      \
-        g = name##_kernel(g,a,dg,da);                                                      \
-        b = name##_kernel(b,a,db,da);                                                      \
-        a = name##_kernel(a,a,da,da);                                                      \
-        st->next(x,tail, r,g,b,a, dr,dg,db,da);                                            \
-    }                                                                                      \
-    static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa,              \
-                                               const SkNf& d, const SkNf& da)
+#define RGBA_XFERMODE_Sk4f(name)                                                       \
+    static SK_ALWAYS_INLINE Sk4f name##_kernel(const Sk4f& s, const Sk4f& sa,          \
+                                               const Sk4f& d, const Sk4f& da);         \
+    static void SK_VECTORCALL name(SkRasterPipeline::Stage* st, size_t x, size_t tail, \
+                                   Sk4f  r, Sk4f  g, Sk4f  b, Sk4f  a,                 \
+                                   Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) {               \
+        r = name##_kernel(r,a,dr,da);                                                  \
+        g = name##_kernel(g,a,dg,da);                                                  \
+        b = name##_kernel(b,a,db,da);                                                  \
+        a = name##_kernel(a,a,da,da);                                                  \
+        st->next(x,tail, r,g,b,a, dr,dg,db,da);                                        \
+    }                                                                                  \
+    static SK_ALWAYS_INLINE Sk4f name##_kernel(const Sk4f& s, const Sk4f& sa,          \
+                                               const Sk4f& d, const Sk4f& da)
 
 // Most of the rest apply the same logic to color channels and use srcover's alpha logic.
-#define RGB_XFERMODE(name)                                                                 \
-    static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa,              \
-                                               const SkNf& d, const SkNf& da);             \
-    SI void SK_VECTORCALL name(SkRasterPipeline::Stage* st, size_t x, size_t tail,         \
-                               SkNf  r, SkNf  g, SkNf  b, SkNf  a,                         \
-                               SkNf dr, SkNf dg, SkNf db, SkNf da) {                       \
-        r = name##_kernel(r,a,dr,da);                                                      \
-        g = name##_kernel(g,a,dg,da);                                                      \
-        b = name##_kernel(b,a,db,da);                                                      \
-        a = a + (da * (1.0f-a));                                                           \
-        st->next(x,tail, r,g,b,a, dr,dg,db,da);                                            \
-    }                                                                                      \
-    static SK_ALWAYS_INLINE SkNf name##_kernel(const SkNf& s, const SkNf& sa,              \
-                                               const SkNf& d, const SkNf& da)
-
+#define RGB_XFERMODE_Sk4f(name)                                                        \
+    static SK_ALWAYS_INLINE Sk4f name##_kernel(const Sk4f& s, const Sk4f& sa,          \
+                                               const Sk4f& d, const Sk4f& da);         \
+    static void SK_VECTORCALL name(SkRasterPipeline::Stage* st, size_t x, size_t tail, \
+                                   Sk4f  r, Sk4f  g, Sk4f  b, Sk4f  a,                 \
+                                   Sk4f dr, Sk4f dg, Sk4f db, Sk4f da) {               \
+        r = name##_kernel(r,a,dr,da);                                                  \
+        g = name##_kernel(g,a,dg,da);                                                  \
+        b = name##_kernel(b,a,db,da);                                                  \
+        a = a + (da * (1.0f-a));                                                       \
+        st->next(x,tail, r,g,b,a, dr,dg,db,da);                                        \
+    }                                                                                  \
+    static SK_ALWAYS_INLINE Sk4f name##_kernel(const Sk4f& s, const Sk4f& sa,          \
+                                               const Sk4f& d, const Sk4f& da)
 
 namespace SK_OPTS_NS {
 
-    SI void run_pipeline(size_t x, size_t n,
-                         void (*vBodyStart)(), SkRasterPipeline::Stage* body,
-                         void (*vTailStart)(), SkRasterPipeline::Stage* tail) {
-        auto bodyStart = (SkRasterPipeline::Fn)vBodyStart,
-             tailStart = (SkRasterPipeline::Fn)vTailStart;
-        SkNf v;  // Fastest to start uninitialized.
-        while (n >= N) {
-            bodyStart(body, x,0, v,v,v,v, v,v,v,v);
-            x += N;
-            n -= N;
-        }
-        if (n > 0) {
-            tailStart(tail, x,n, v,v,v,v, v,v,v,v);
-        }
-    }
-
     // Clamp colors into [0,1] premul (e.g. just before storing back to memory).
-    SI void clamp_01_premul(SkNf& r, SkNf& g, SkNf& b, SkNf& a) {
-        a = SkNf::Max(a, 0.0f);
-        r = SkNf::Max(r, 0.0f);
-        g = SkNf::Max(g, 0.0f);
-        b = SkNf::Max(b, 0.0f);
-
-        a = SkNf::Min(a, 1.0f);
-        r = SkNf::Min(r, a);
-        g = SkNf::Min(g, a);
-        b = SkNf::Min(b, a);
+    static void clamp_01_premul(Sk4f& r, Sk4f& g, Sk4f& b, Sk4f& a) {
+        a = Sk4f::Max(a, 0.0f);
+        r = Sk4f::Max(r, 0.0f);
+        g = Sk4f::Max(g, 0.0f);
+        b = Sk4f::Max(b, 0.0f);
+
+        a = Sk4f::Min(a, 1.0f);
+        r = Sk4f::Min(r, a);
+        g = Sk4f::Min(g, a);
+        b = Sk4f::Min(b, a);
     }
 
-    SI SkNf inv(const SkNf& x) { return 1.0f - x; }
+    static Sk4f inv(const Sk4f& x) { return 1.0f - x; }
 
-    SI SkNf lerp(const SkNf& from, const SkNf& to, const SkNf& cov) {
-        return SkNx_fma(to-from, cov, from);
+    static Sk4f lerp(const Sk4f& from, const Sk4f& to, const Sk4f& cov) {
+        return from + (to-from)*cov;
     }
 
     template <typename T>
-    SI SkNx<N,T> load_tail(size_t tail, const T* src) {
-        // TODO: better tail, maskload for 32- and 64-bit T
-        T buf[N] = {0};
+    static SkNx<4,T> load_tail(size_t tail, const T* src) {
         if (tail) {
-            memcpy(buf, src, tail*sizeof(T));
-            src = buf;
+           return SkNx<4,T>(src[0], (tail>1 ? src[1] : 0), (tail>2 ? src[2] : 0), 0);
         }
-        return SkNx<N,T>::Load(src);
+        return SkNx<4,T>::Load(src);
     }
 
     template <typename T>
-    SI void store_tail(size_t tail, const SkNx<N,T>& v, T* dst) {
-        // TODO: better tail, maskstore for 32- and 64-bit T
-        T buf[N] = {0};
-        v.store(tail ? buf : dst);
-        if (tail) {
-            memcpy(dst, buf, tail*sizeof(T));
+    static void store_tail(size_t tail, const SkNx<4,T>& v, T* dst) {
+        switch(tail) {
+            case 0: return v.store(dst);
+            case 3: dst[2] = v[2];
+            case 2: dst[1] = v[1];
+            case 1: dst[0] = v[0];
         }
     }
 
-    SI void from_565(const SkNh& _565, SkNf* r, SkNf* g, SkNf* b) {
-        auto _32_bit = SkNx_cast<int>(_565);
+    static void from_565(const Sk4h& _565, Sk4f* r, Sk4f* g, Sk4f* b) {
+        Sk4i _32_bit = SkNx_cast<int>(_565);
 
         *r = SkNx_cast<float>(_32_bit & SK_R16_MASK_IN_PLACE) * (1.0f / SK_R16_MASK_IN_PLACE);
         *g = SkNx_cast<float>(_32_bit & SK_G16_MASK_IN_PLACE) * (1.0f / SK_G16_MASK_IN_PLACE);
         *b = SkNx_cast<float>(_32_bit & SK_B16_MASK_IN_PLACE) * (1.0f / SK_B16_MASK_IN_PLACE);
     }
 
-    SI SkNh to_565(const SkNf& r, const SkNf& g, const SkNf& b) {
-        return SkNx_cast<uint16_t>( SkNx_cast<int>(r * SK_R16_MASK + 0.5f) << SK_R16_SHIFT
-                                  | SkNx_cast<int>(g * SK_G16_MASK + 0.5f) << SK_G16_SHIFT
-                                  | SkNx_cast<int>(b * SK_B16_MASK + 0.5f) << SK_B16_SHIFT);
+    static Sk4h to_565(const Sk4f& r, const Sk4f& g, const Sk4f& b) {
+        return SkNx_cast<uint16_t>( Sk4f_round(r * SK_R16_MASK) << SK_R16_SHIFT
+                                  | Sk4f_round(g * SK_G16_MASK) << SK_G16_SHIFT
+                                  | Sk4f_round(b * SK_B16_MASK) << SK_B16_SHIFT);
     }
 
-    STAGE(just_return, false) { }
 
     // The default shader produces a constant color (from the SkPaint).
-    STAGE(constant_color, true) {
+    KERNEL_Sk4f(constant_color) {
         auto color = (const SkPM4f*)ctx;
         r = color->r();
         g = color->g();
@@ -162,8 +144,8 @@ namespace SK_OPTS_NS {
     }
 
     // s' = d(1-c) + sc, for a constant c.
-    STAGE(lerp_constant_float, true) {
-        SkNf c = *(const float*)ctx;
+    KERNEL_Sk4f(lerp_constant_float) {
+        Sk4f c = *(const float*)ctx;
 
         r = lerp(dr, r, c);
         g = lerp(dg, g, c);
@@ -172,10 +154,10 @@ namespace SK_OPTS_NS {
     }
 
     // s' = sc for 8-bit c.
-    STAGE(scale_u8, true) {
+    KERNEL_Sk4f(scale_u8) {
         auto ptr = (const uint8_t*)ctx + x;
 
-        SkNf c = SkNx_cast<float>(load_tail(tail, ptr)) * (1/255.0f);
+        Sk4f c = SkNx_cast<float>(load_tail(tail, ptr)) * (1/255.0f);
         r = r*c;
         g = g*c;
         b = b*c;
@@ -183,10 +165,10 @@ namespace SK_OPTS_NS {
     }
 
     // s' = d(1-c) + sc for 8-bit c.
-    STAGE(lerp_u8, true) {
+    KERNEL_Sk4f(lerp_u8) {
         auto ptr = (const uint8_t*)ctx + x;
 
-        SkNf c = SkNx_cast<float>(load_tail(tail, ptr)) * (1/255.0f);
+        Sk4f c = SkNx_cast<float>(load_tail(tail, ptr)) * (1/255.0f);
         r = lerp(dr, r, c);
         g = lerp(dg, g, c);
         b = lerp(db, b, c);
@@ -194,9 +176,9 @@ namespace SK_OPTS_NS {
     }
 
     // s' = d(1-c) + sc for 565 c.
-    STAGE(lerp_565, true) {
+    KERNEL_Sk4f(lerp_565) {
         auto ptr = (const uint16_t*)ctx + x;
-        SkNf cr, cg, cb;
+        Sk4f cr, cg, cb;
         from_565(load_tail(tail, ptr), &cr, &cg, &cb);
 
         r = lerp(dr, r, cr);
@@ -205,145 +187,155 @@ namespace SK_OPTS_NS {
         a = 1.0f;
     }
 
-    STAGE(load_d_565, true) {
+    KERNEL_Sk4f(load_d_565) {
         auto ptr = (const uint16_t*)ctx + x;
         from_565(load_tail(tail, ptr), &dr,&dg,&db);
         da = 1.0f;
     }
 
-    STAGE(load_s_565, true) {
+    KERNEL_Sk4f(load_s_565) {
         auto ptr = (const uint16_t*)ctx + x;
         from_565(load_tail(tail, ptr), &r,&g,&b);
         a = 1.0f;
     }
 
-    STAGE(store_565, false) {
+    KERNEL_Sk4f(store_565) {
         clamp_01_premul(r,g,b,a);
         auto ptr = (uint16_t*)ctx + x;
         store_tail(tail, to_565(r,g,b), ptr);
     }
 
-    STAGE(load_d_f16, true) {
+    KERNEL_Sk4f(load_d_f16) {
         auto ptr = (const uint64_t*)ctx + x;
 
-        uint64_t buf[N] = {0};
         if (tail) {
-            memcpy(buf, ptr, tail*sizeof(uint64_t));
-            ptr = buf;
+            auto p0 =          SkHalfToFloat_finite_ftz(ptr[0])          ,
+                 p1 = tail>1 ? SkHalfToFloat_finite_ftz(ptr[1]) : Sk4f{0},
+                 p2 = tail>2 ? SkHalfToFloat_finite_ftz(ptr[2]) : Sk4f{0};
+            dr = { p0[0],p1[0],p2[0],0 };
+            dg = { p0[1],p1[1],p2[1],0 };
+            db = { p0[2],p1[2],p2[2],0 };
+            da = { p0[3],p1[3],p2[3],0 };
+            return;
         }
 
-        SkNh rh, gh, bh, ah;
-        SkNh::Load4(ptr, &rh, &gh, &bh, &ah);
+        Sk4h rh, gh, bh, ah;
+        Sk4h::Load4(ptr, &rh, &gh, &bh, &ah);
         dr = SkHalfToFloat_finite_ftz(rh);
         dg = SkHalfToFloat_finite_ftz(gh);
         db = SkHalfToFloat_finite_ftz(bh);
         da = SkHalfToFloat_finite_ftz(ah);
     }
 
-    STAGE(load_s_f16, true) {
+    KERNEL_Sk4f(load_s_f16) {
         auto ptr = (const uint64_t*)ctx + x;
 
-        uint64_t buf[N] = {0};
         if (tail) {
-            memcpy(buf, ptr, tail*sizeof(uint64_t));
-            ptr = buf;
+            auto p0 =          SkHalfToFloat_finite_ftz(ptr[0])          ,
+                 p1 = tail>1 ? SkHalfToFloat_finite_ftz(ptr[1]) : Sk4f{0},
+                 p2 = tail>2 ? SkHalfToFloat_finite_ftz(ptr[2]) : Sk4f{0};
+            r = { p0[0],p1[0],p2[0],0 };
+            g = { p0[1],p1[1],p2[1],0 };
+            b = { p0[2],p1[2],p2[2],0 };
+            a = { p0[3],p1[3],p2[3],0 };
+            return;
         }
 
-        SkNh rh, gh, bh, ah;
-        SkNh::Load4(ptr, &rh, &gh, &bh, &ah);
+        Sk4h rh, gh, bh, ah;
+        Sk4h::Load4(ptr, &rh, &gh, &bh, &ah);
         r = SkHalfToFloat_finite_ftz(rh);
         g = SkHalfToFloat_finite_ftz(gh);
         b = SkHalfToFloat_finite_ftz(bh);
         a = SkHalfToFloat_finite_ftz(ah);
     }
 
-    STAGE(store_f16, false) {
+    KERNEL_Sk4f(store_f16) {
         clamp_01_premul(r,g,b,a);
         auto ptr = (uint64_t*)ctx + x;
 
-        uint64_t buf[N] = {0};
-        SkNh::Store4(tail ? buf : ptr, SkFloatToHalf_finite_ftz(r),
-                                       SkFloatToHalf_finite_ftz(g),
-                                       SkFloatToHalf_finite_ftz(b),
-                                       SkFloatToHalf_finite_ftz(a));
-        if (tail) {
-            memcpy(ptr, buf, tail*sizeof(uint64_t));
+        switch (tail) {
+            case 0: return Sk4h::Store4(ptr, SkFloatToHalf_finite_ftz(r),
+                                             SkFloatToHalf_finite_ftz(g),
+                                             SkFloatToHalf_finite_ftz(b),
+                                             SkFloatToHalf_finite_ftz(a));
+
+            case 3: SkFloatToHalf_finite_ftz({r[2], g[2], b[2], a[2]}).store(ptr+2);
+            case 2: SkFloatToHalf_finite_ftz({r[1], g[1], b[1], a[1]}).store(ptr+1);
+            case 1: SkFloatToHalf_finite_ftz({r[0], g[0], b[0], a[0]}).store(ptr+0);
         }
     }
 
 
     // Load 8-bit SkPMColor-order sRGB.
-    STAGE(load_d_srgb, true) {
+    KERNEL_Sk4f(load_d_srgb) {
         auto ptr = (const uint32_t*)ctx + x;
 
-        auto px = load_tail(tail, ptr);
-        auto to_int = [](const SkNx<N, uint32_t>& v) { return SkNi::Load(&v); };
-        dr =    sk_linear_from_srgb_math(to_int((px >> SK_R32_SHIFT) & 0xff));
-        dg =    sk_linear_from_srgb_math(to_int((px >> SK_G32_SHIFT) & 0xff));
-        db =    sk_linear_from_srgb_math(to_int((px >> SK_B32_SHIFT) & 0xff));
-        da = (1/255.0f)*SkNx_cast<float>(to_int( px >> SK_A32_SHIFT        ));
+        auto px = load_tail(tail, (const int*)ptr);
+        dr =    sk_linear_from_srgb_math((px >> SK_R32_SHIFT) & 0xff);
+        dg =    sk_linear_from_srgb_math((px >> SK_G32_SHIFT) & 0xff);
+        db =    sk_linear_from_srgb_math((px >> SK_B32_SHIFT) & 0xff);
+        da = (1/255.0f)*SkNx_cast<float>((px >> SK_A32_SHIFT) & 0xff);
     }
 
-    STAGE(load_s_srgb, true) {
+    KERNEL_Sk4f(load_s_srgb) {
         auto ptr = (const uint32_t*)ctx + x;
 
-        auto px = load_tail(tail, ptr);
-        auto to_int = [](const SkNx<N, uint32_t>& v) { return SkNi::Load(&v); };
-        r =    sk_linear_from_srgb_math(to_int((px >> SK_R32_SHIFT) & 0xff));
-        g =    sk_linear_from_srgb_math(to_int((px >> SK_G32_SHIFT) & 0xff));
-        b =    sk_linear_from_srgb_math(to_int((px >> SK_B32_SHIFT) & 0xff));
-        a = (1/255.0f)*SkNx_cast<float>(to_int( px >> SK_A32_SHIFT        ));
+        auto px = load_tail(tail, (const int*)ptr);
+        r =    sk_linear_from_srgb_math((px >> SK_R32_SHIFT) & 0xff);
+        g =    sk_linear_from_srgb_math((px >> SK_G32_SHIFT) & 0xff);
+        b =    sk_linear_from_srgb_math((px >> SK_B32_SHIFT) & 0xff);
+        a = (1/255.0f)*SkNx_cast<float>((px >> SK_A32_SHIFT) & 0xff);
     }
 
-    STAGE(store_srgb, false) {
+    KERNEL_Sk4f(store_srgb) {
         clamp_01_premul(r,g,b,a);
         auto ptr = (uint32_t*)ctx + x;
-        store_tail(tail, (      sk_linear_to_srgb_noclamp(r) << SK_R32_SHIFT
-                         |      sk_linear_to_srgb_noclamp(g) << SK_G32_SHIFT
-                         |      sk_linear_to_srgb_noclamp(b) << SK_B32_SHIFT
-                         | SkNx_cast<int>(255.0f * a + 0.5f) << SK_A32_SHIFT ), (int*)ptr);
+        store_tail(tail, ( sk_linear_to_srgb_noclamp(r) << SK_R32_SHIFT
+                         | sk_linear_to_srgb_noclamp(g) << SK_G32_SHIFT
+                         | sk_linear_to_srgb_noclamp(b) << SK_B32_SHIFT
+                         |       Sk4f_round(255.0f * a) << SK_A32_SHIFT), (int*)ptr);
     }
 
-    RGBA_XFERMODE(clear)    { return 0.0f; }
-  //RGBA_XFERMODE(src)      { return s; }   // This would be a no-op stage, so we just omit it.
-    RGBA_XFERMODE(dst)      { return d; }
-
-    RGBA_XFERMODE(srcatop)  { return s*da + d*inv(sa); }
-    RGBA_XFERMODE(srcin)    { return s * da; }
-    RGBA_XFERMODE(srcout)   { return s * inv(da); }
-    RGBA_XFERMODE(srcover)  { return SkNx_fma(d, inv(sa), s); }
-    RGBA_XFERMODE(dstatop)  { return srcatop_kernel(d,da,s,sa); }
-    RGBA_XFERMODE(dstin)    { return srcin_kernel  (d,da,s,sa); }
-    RGBA_XFERMODE(dstout)   { return srcout_kernel (d,da,s,sa); }
-    RGBA_XFERMODE(dstover)  { return srcover_kernel(d,da,s,sa); }
-
-    RGBA_XFERMODE(modulate) { return s*d; }
-    RGBA_XFERMODE(multiply) { return s*inv(da) + d*inv(sa) + s*d; }
-    RGBA_XFERMODE(plus_)    { return s + d; }
-    RGBA_XFERMODE(screen)   { return s + d - s*d; }
-    RGBA_XFERMODE(xor_)     { return s*inv(da) + d*inv(sa); }
-
-    RGB_XFERMODE(colorburn) {
+    RGBA_XFERMODE_Sk4f(clear)    { return 0.0f; }
+  //RGBA_XFERMODE_Sk4f(src)      { return s; }   // This would be a no-op stage, so we just omit it.
+    RGBA_XFERMODE_Sk4f(dst)      { return d; }
+
+    RGBA_XFERMODE_Sk4f(srcatop)  { return s*da + d*inv(sa); }
+    RGBA_XFERMODE_Sk4f(srcin)    { return s * da; }
+    RGBA_XFERMODE_Sk4f(srcout)   { return s * inv(da); }
+    RGBA_XFERMODE_Sk4f(srcover)  { return s + inv(sa)*d; }
+    RGBA_XFERMODE_Sk4f(dstatop)  { return srcatop_kernel(d,da,s,sa); }
+    RGBA_XFERMODE_Sk4f(dstin)    { return srcin_kernel  (d,da,s,sa); }
+    RGBA_XFERMODE_Sk4f(dstout)   { return srcout_kernel (d,da,s,sa); }
+    RGBA_XFERMODE_Sk4f(dstover)  { return srcover_kernel(d,da,s,sa); }
+
+    RGBA_XFERMODE_Sk4f(modulate) { return s*d; }
+    RGBA_XFERMODE_Sk4f(multiply) { return s*inv(da) + d*inv(sa) + s*d; }
+    RGBA_XFERMODE_Sk4f(plus_)    { return s + d; }
+    RGBA_XFERMODE_Sk4f(screen)   { return s + d - s*d; }
+    RGBA_XFERMODE_Sk4f(xor_)     { return s*inv(da) + d*inv(sa); }
+
+    RGB_XFERMODE_Sk4f(colorburn) {
         return (d == da  ).thenElse(d + s*inv(da),
                (s == 0.0f).thenElse(s + d*inv(sa),
-                                    sa*(da - SkNf::Min(da, (da-d)*sa/s)) + s*inv(da) + d*inv(sa)));
+                                    sa*(da - Sk4f::Min(da, (da-d)*sa/s)) + s*inv(da) + d*inv(sa)));
     }
-    RGB_XFERMODE(colordodge) {
+    RGB_XFERMODE_Sk4f(colordodge) {
         return (d == 0.0f).thenElse(d + s*inv(da),
                (s == sa  ).thenElse(s + d*inv(sa),
-                                    sa*SkNf::Min(da, (d*sa)/(sa - s)) + s*inv(da) + d*inv(sa)));
+                                    sa*Sk4f::Min(da, (d*sa)/(sa - s)) + s*inv(da) + d*inv(sa)));
     }
-    RGB_XFERMODE(darken)     { return s + d - SkNf::Max(s*da, d*sa); }
-    RGB_XFERMODE(difference) { return s + d - 2.0f*SkNf::Min(s*da,d*sa); }
-    RGB_XFERMODE(exclusion)  { return s + d - 2.0f*s*d; }
-    RGB_XFERMODE(hardlight) {
+    RGB_XFERMODE_Sk4f(darken)     { return s + d - Sk4f::Max(s*da, d*sa); }
+    RGB_XFERMODE_Sk4f(difference) { return s + d - 2.0f*Sk4f::Min(s*da,d*sa); }
+    RGB_XFERMODE_Sk4f(exclusion)  { return s + d - 2.0f*s*d; }
+    RGB_XFERMODE_Sk4f(hardlight) {
         return s*inv(da) + d*inv(sa)
              + (2.0f*s <= sa).thenElse(2.0f*s*d, sa*da - 2.0f*(da-d)*(sa-s));
     }
-    RGB_XFERMODE(lighten) { return s + d - SkNf::Min(s*da, d*sa); }
-    RGB_XFERMODE(overlay) { return hardlight_kernel(d,da,s,sa); }
-    RGB_XFERMODE(softlight) {
-        SkNf m  = (da > 0.0f).thenElse(d / da, 0.0f),
+    RGB_XFERMODE_Sk4f(lighten) { return s + d - Sk4f::Min(s*da, d*sa); }
+    RGB_XFERMODE_Sk4f(overlay) { return hardlight_kernel(d,da,s,sa); }
+    RGB_XFERMODE_Sk4f(softlight) {
+        Sk4f m  = (da > 0.0f).thenElse(d / da, 0.0f),
              s2 = 2.0f*s,
              m4 = 4.0f*m;
 
@@ -351,7 +343,7 @@ namespace SK_OPTS_NS {
         //    1. dark src?
         //    2. light src, dark dst?
         //    3. light src, light dst?
-        SkNf darkSrc = d*(sa + (s2 - sa)*(1.0f - m)),     // Used in case 1.
+        Sk4f darkSrc = d*(sa + (s2 - sa)*(1.0f - m)),     // Used in case 1.
              darkDst = (m4*m4 + m4)*(m - 1.0f) + 7.0f*m,  // Used in case 2.
              liteDst = m.rsqrt().invert() - m,            // Used in case 3.
              liteSrc = d*sa + da*(s2 - sa) * (4.0f*d <= da).thenElse(darkDst, liteDst);  // 2 or 3?
@@ -359,9 +351,8 @@ namespace SK_OPTS_NS {
     }
 }
 
-#undef SI
-#undef STAGE
-#undef RGBA_XFERMODE
-#undef RGB_XFERMODE
+#undef KERNEL_Sk4f
+#undef RGB_XFERMODE_Sk4f
+#undef RGB_XFERMODE_Sk4f
 
 #endif//SkRasterPipeline_opts_DEFINED