From bf90520f63415f539cd5792a18efbd79cb86be0a Mon Sep 17 00:00:00 2001 From: mtklein Date: Wed, 7 Oct 2015 12:46:43 -0700 Subject: [PATCH] SkPath::fFirstDirection: seq-cst -> relaxed We landed this originally with lazily-correct sequentially-consistent memory order. It turns out that's regressed performance, we think particularly when recording paths. We also think there's no need for anything but relaxed memory order here. We should see this chart go down if all goes well: https://perf.skia.org/#4329 There are also Chrome performance charts to watch in the linked bug. BUG=chromium:537700 CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-TSAN-Trybot,Test-Ubuntu-GCC-Golo-GPU-GT610-x86_64-Release-TSAN No public API changes. TBR=reed@google.com Review URL: https://codereview.chromium.org/1393833003 --- include/core/SkPath.h | 13 ++++++------- include/private/SkAtomics.h | 19 ++++++++++--------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/include/core/SkPath.h b/include/core/SkPath.h index d3673b6..2200e03 100644 --- a/include/core/SkPath.h +++ b/include/core/SkPath.h @@ -938,13 +938,12 @@ private: kCurrent_Version = 1 }; - SkAutoTUnref fPathRef; - - int fLastMoveToIndex; - uint8_t fFillType; - mutable uint8_t fConvexity; - mutable SkAtomic fFirstDirection; // SkPathPriv::FirstDirection - mutable SkBool8 fIsVolatile; + SkAutoTUnref fPathRef; + int fLastMoveToIndex; + uint8_t fFillType; + mutable uint8_t fConvexity; + mutable SkAtomic fFirstDirection;// SkPathPriv::FirstDirection + mutable SkBool8 fIsVolatile; /** Resets all fields other than fPathRef to their initial 'empty' values. * Assumes the caller has already emptied fPathRef. diff --git a/include/private/SkAtomics.h b/include/private/SkAtomics.h index e947d1a..56eace4 100644 --- a/include/private/SkAtomics.h +++ b/include/private/SkAtomics.h @@ -42,43 +42,44 @@ T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst); // A little wrapper class for small T (think, builtins: int, float, void*) to // ensure they're always used atomically. This is our stand-in for std::atomic. -template +// !!! Please _really_ know what you're doing if you change default_memory_order. !!! +template class SkAtomic : SkNoncopyable { public: SkAtomic() {} explicit SkAtomic(const T& val) : fVal(val) {} // It is essential we return by value rather than by const&. fVal may change at any time. - T load(sk_memory_order mo = sk_memory_order_seq_cst) const { + T load(sk_memory_order mo = default_memory_order) const { return sk_atomic_load(&fVal, mo); } - void store(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { + void store(const T& val, sk_memory_order mo = default_memory_order) { sk_atomic_store(&fVal, val, mo); } - // Alias for .load(sk_memory_order_seq_cst). + // Alias for .load(default_memory_order). operator T() const { return this->load(); } - // Alias for .store(v, sk_memory_order_seq_cst). + // Alias for .store(v, default_memory_order). T operator=(const T& v) { this->store(v); return v; } - T fetch_add(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { + T fetch_add(const T& val, sk_memory_order mo = default_memory_order) { return sk_atomic_fetch_add(&fVal, val, mo); } - T fetch_sub(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { + T fetch_sub(const T& val, sk_memory_order mo = default_memory_order) { return sk_atomic_fetch_sub(&fVal, val, mo); } bool compare_exchange(T* expected, const T& desired, - sk_memory_order success = sk_memory_order_seq_cst, - sk_memory_order failure = sk_memory_order_seq_cst) { + sk_memory_order success = default_memory_order, + sk_memory_order failure = default_memory_order) { return sk_atomic_compare_exchange(&fVal, expected, desired, success, failure); } private: -- 2.7.4