M = 2
};
+class AtomicInc32 : public Benchmark {
+public:
+ AtomicInc32() : fX(0) {}
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
+ }
+
+protected:
+ virtual const char* onGetName() {
+ return "atomic_inc_32";
+ }
+
+ virtual void onDraw(const int loops, SkCanvas*) {
+ for (int i = 0; i < loops; ++i) {
+ sk_atomic_inc(&fX);
+ }
+ }
+
+private:
+ int32_t fX;
+ typedef Benchmark INHERITED;
+};
+
+class AtomicInc64 : public Benchmark {
+public:
+ AtomicInc64() : fX(0) {}
+
+ virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
+ return backend == kNonRendering_Backend;
+ }
+
+protected:
+ virtual const char* onGetName() {
+ return "atomic_inc_64";
+ }
+
+ virtual void onDraw(const int loops, SkCanvas*) {
+ for (int i = 0; i < loops; ++i) {
+ sk_atomic_inc(&fX);
+ }
+ }
+
+private:
+ int64_t fX;
+ typedef Benchmark INHERITED;
+};
+
class RefCntBench_Stack : public Benchmark {
public:
virtual bool isSuitableFor(Backend backend) SK_OVERRIDE {
///////////////////////////////////////////////////////////////////////////////
+DEF_BENCH( return new AtomicInc32(); )
+DEF_BENCH( return new AtomicInc64(); )
+
DEF_BENCH( return new RefCntBench_Stack(); )
DEF_BENCH( return new RefCntBench_Heap(); )
DEF_BENCH( return new RefCntBench_New(); )
* No additional memory barrier is required; this must act as a compiler barrier.
*/
static int32_t sk_atomic_inc(int32_t* addr);
+static int64_t sk_atomic_inc(int64_t* addr);
/** Atomically adds inc to the int referenced by addr and returns the previous value.
* No additional memory barrier is required; this must act as a compiler barrier.
*/
static int32_t sk_atomic_add(int32_t* addr, int32_t inc);
+static int64_t sk_atomic_add(int64_t* addr, int64_t inc);
/** Atomically subtracts one from the int referenced by addr and returns the previous value.
* This must act as a release (SL/S) memory barrier and as a compiler barrier.
*/
static int32_t sk_atomic_dec(int32_t* addr);
+static int64_t sk_atomic_dec(int64_t* addr);
/** Atomic compare and set.
* If *addr == before, set *addr to after and return true, otherwise return false.
* This must act as a release (SL/S) memory barrier and as a compiler barrier.
*/
static bool sk_atomic_cas(int32_t* addr, int32_t before, int32_t after);
+static bool sk_atomic_cas(int64_t* addr, int64_t before, int64_t after);
/** If sk_atomic_dec does not act as an acquire (L/SL) barrier,
* this must act as an acquire (L/SL) memory barrier and as a compiler barrier.
* and returns the previous value.
* No additional memory barrier is required; this must act as a compiler barrier.
*/
-static inline int32_t sk_atomic_conditional_inc(int32_t* addr) {
- int32_t prev;
+template<typename INT_TYPE> static inline INT_TYPE sk_atomic_conditional_inc(INT_TYPE* addr) {
+ INT_TYPE prev;
do {
prev = *addr;
if (0 == prev) {
return __sync_fetch_and_add(addr, 1);
}
+static inline __attribute__((always_inline)) int64_t sk_atomic_inc(int64_t* addr) {
+ return __sync_fetch_and_add(addr, 1);
+}
+
static inline __attribute__((always_inline)) int32_t sk_atomic_add(int32_t* addr, int32_t inc) {
return __sync_fetch_and_add(addr, inc);
}
+static inline __attribute__((always_inline)) int64_t sk_atomic_add(int64_t* addr, int64_t inc) {
+ return __sync_fetch_and_add(addr, inc);
+}
+
static inline __attribute__((always_inline)) int32_t sk_atomic_dec(int32_t* addr) {
return __sync_fetch_and_add(addr, -1);
}
+static inline __attribute__((always_inline)) int64_t sk_atomic_dec(int64_t* addr) {
+ return __sync_fetch_and_add(addr, -1);
+}
+
static inline __attribute__((always_inline)) void sk_membar_acquire__after_atomic_dec() { }
static inline __attribute__((always_inline)) bool sk_atomic_cas(int32_t* addr,
return __sync_bool_compare_and_swap(addr, before, after);
}
+static inline __attribute__((always_inline)) bool sk_atomic_cas(int64_t* addr,
+ int64_t before,
+ int64_t after) {
+ return __sync_bool_compare_and_swap(addr, before, after);
+}
+
static inline __attribute__((always_inline)) void* sk_atomic_cas(void** addr,
void* before,
void* after) {
return _InterlockedIncrement(reinterpret_cast<long*>(addr)) - 1;
}
+static inline int64_t sk_atomic_inc(int64_t* addr) {
+ // InterlockedIncrement returns the new value, we want to return the old.
+ return InterlockedIncrement64(addr) - 1;
+}
+
static inline int32_t sk_atomic_add(int32_t* addr, int32_t inc) {
return _InterlockedExchangeAdd(reinterpret_cast<long*>(addr), static_cast<long>(inc));
}
+static inline int64_t sk_atomic_add(int64_t* addr, int64_t inc) {
+ return InterlockedExchangeAdd64(addr, inc);
+}
+
static inline int32_t sk_atomic_dec(int32_t* addr) {
// InterlockedDecrement returns the new value, we want to return the old.
return _InterlockedDecrement(reinterpret_cast<long*>(addr)) + 1;
}
+static inline int64_t sk_atomic_dec(int64_t* addr) {
+ // InterlockedDecrement returns the new value, we want to return the old.
+ return InterlockedDecrement64(addr) + 1;
+}
+
static inline void sk_membar_acquire__after_atomic_dec() { }
static inline bool sk_atomic_cas(int32_t* addr, int32_t before, int32_t after) {
return _InterlockedCompareExchange(reinterpret_cast<long*>(addr), after, before) == before;
}
+static inline bool sk_atomic_cas(int64_t* addr, int64_t before, int64_t after) {
+ return _InterlockedCompareExchange64(addr, after, before) == before;
+}
+
static inline void* sk_atomic_cas(void** addr, void* before, void* after) {
return InterlockedCompareExchangePointer(addr, after, before);
}