From 834d9e109298ae704043128005f8c1bc622350f4 Mon Sep 17 00:00:00 2001 From: mtklein Date: Thu, 14 Apr 2016 16:23:15 -0700 Subject: [PATCH] Revert of skcpu: sse4.1 floor, f16c f16<->f32 (patchset #10 id:180001 of https://codereview.chromium.org/1891513002/ ) Reason for revert: Need to change around my #if guards so that clang-cl is treated like GCC and Clang, rather than MSVC. Original issue's description: > skcpu: sse4.1 floor, f16c f16<->f32 > > - floor with roundps is about 4.5x faster when available > - f16 srcover_n is similar to but a little faster than the version in https://codereview.chromium.org/1884683002. This new one fuses the dst load/stores into the f16<->f32 conversions: > > +0x180 movups (%r15), %xmm1 > +0x184 vcvtph2ps (%rbx), %xmm2 > +0x189 movaps %xmm1, %xmm3 > +0x18c shufps $255, %xmm3, %xmm3 > +0x190 movaps %xmm0, %xmm4 > +0x193 subps %xmm3, %xmm4 > +0x196 mulps %xmm2, %xmm4 > +0x199 addps %xmm1, %xmm4 > +0x19c vcvtps2ph $0, %xmm4, (%rbx) > +0x1a2 addq $16, %r15 > +0x1a6 addq $8, %rbx > +0x1aa decl %r14d > +0x1ad jne +0x180 > > If we decide to land this it'd be a good idea to convert most or all users of SkFloatToHalf_01 and SkHalfToFloat_01 over to the pointer-based versions. > > BUG=skia: > GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1891513002 > CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot > > Committed: https://skia.googlesource.com/skia/+/cbe3c1af987d622ea67ef560d855b41bb14a0ce9 TBR=fmalita@chromium.org,herb@google.com,reed@google.com,mtklein@chromium.org # Skipping CQ checks because original CL landed less than 1 days ago. NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true BUG=skia: Review URL: https://codereview.chromium.org/1891993002 --- src/core/SkHalf.h | 30 ------------------------------ src/core/SkXfermodeF16.cpp | 12 +++++++----- src/opts/SkNx_sse.h | 32 +++++++++++--------------------- 3 files changed, 18 insertions(+), 56 deletions(-) diff --git a/src/core/SkHalf.h b/src/core/SkHalf.h index e56c78c..5f5575a 100644 --- a/src/core/SkHalf.h +++ b/src/core/SkHalf.h @@ -8,7 +8,6 @@ #ifndef SkHalf_DEFINED #define SkHalf_DEFINED -#include "SkCpu.h" #include "SkNx.h" #include "SkTypes.h" @@ -123,32 +122,3 @@ static inline uint64_t SkFloatToHalf_01(const Sk4f& fs) { } #endif - -static inline Sk4f SkHalfToFloat_01(const uint64_t* hs) { -#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 - if (SkCpu::Supports(SkCpu::F16C)) { - __m128 fs; - #if defined(_MSC_VER) - fs = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i*)hs)); - #else - asm("vcvtph2ps %[hs], %[fs]" : [fs]"=x"(fs) : [hs]"m"(*hs)); - #endif - return fs; - } -#endif - return SkHalfToFloat_01(*hs); -} - -static inline void SkFloatToHalf_01(const Sk4f& fs, uint64_t* hs) { -#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 - if (SkCpu::Supports(SkCpu::F16C)) { - #if defined(_MSC_VER) - _mm_storel_epi64((__m128i*)hs, _mm_cvtps_ph(fs.fVec, 0)); - #else - asm("vcvtps2ph $0, %[fs], %[hs]" : [hs]"=m"(*hs) : [fs]"x"(fs.fVec)); - #endif - return; - } -#endif - *hs = SkFloatToHalf_01(fs); -} diff --git a/src/core/SkXfermodeF16.cpp b/src/core/SkXfermodeF16.cpp index 2c6873f..dfcefa2 100644 --- a/src/core/SkXfermodeF16.cpp +++ b/src/core/SkXfermodeF16.cpp @@ -134,13 +134,15 @@ static void srcover_1(const SkXfermode*, uint64_t dst[], const SkPM4f* src, int static void srcover_n(const SkXfermode*, uint64_t dst[], const SkPM4f src[], int count, const SkAlpha aa[]) { for (int i = 0; i < count; ++i) { - Sk4f s = Sk4f::Load(src+i), - d = SkHalfToFloat_01(dst+i), - r = s + d*(1.0f - SkNx_shuffle<3,3,3,3>(s)); + const Sk4f s4 = Sk4f::Load(src[i].fVec); + const Sk4f dst_scale = Sk4f(1 - get_alpha(s4)); + const Sk4f d4 = SkHalfToFloat_01(dst[i]); + const Sk4f r4 = s4 + d4 * dst_scale; if (aa) { - r = lerp_by_coverage(r, d, aa[i]); + dst[i] = SkFloatToHalf_01(lerp_by_coverage(r4, d4, aa[i])); + } else { + dst[i] = SkFloatToHalf_01(r4); } - SkFloatToHalf_01(r, dst+i); } } diff --git a/src/opts/SkNx_sse.h b/src/opts/SkNx_sse.h index e824d29..80c7f0e 100644 --- a/src/opts/SkNx_sse.h +++ b/src/opts/SkNx_sse.h @@ -8,13 +8,21 @@ #ifndef SkNx_sse_DEFINED #define SkNx_sse_DEFINED -#include "SkCpu.h" - // This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent. // If you do, make sure this is in a static inline function... anywhere else risks violating ODR. #define SKNX_IS_FAST +// SSE 4.1 has _mm_floor_ps to floor 4 floats. We emulate it: +// - roundtrip through integers via truncation +// - subtract 1 if that's too big (possible for negative values). +// This restricts the domain of our inputs to a maximum somehwere around 2^31. Seems plenty big. +static inline __m128 sse2_mm_floor_ps(__m128 v) { + __m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v)); + __m128 too_big = _mm_cmpgt_ps(roundtrip, v); + return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f))); +} + template <> class SkNx<2, float> { public: @@ -89,25 +97,7 @@ public: static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); } SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); } - SkNx floor() const { - if (SkCpu::Supports(SkCpu::SSE41)) { - __m128 r; - #if defined(_MSC_VER) - r = _mm_floor_ps(fVec); - #else - asm("roundps $0x1, %[fVec], %[r]" : [r]"=x"(r) : [fVec]"x"(fVec)); - #endif - return r; - } - // Emulate _mm_floor_ps() with SSE2: - // - roundtrip through integers via truncation - // - subtract 1 if that's too big (possible for negative values). - // This restricts the domain of our inputs to a maximum somehwere around 2^31. - // Seems plenty big. - __m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(fVec)); - __m128 too_big = _mm_cmpgt_ps(roundtrip, fVec); - return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f))); - } + SkNx floor() const { return sse2_mm_floor_ps(fVec); } SkNx sqrt() const { return _mm_sqrt_ps (fVec); } SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); } -- 2.7.4