* found in the LICENSE file.
*/
#include "SkBenchmark.h"
+#include "SkRefCnt.h"
#include "SkThread.h"
+#include "SkWeakRefCnt.h"
#include <memory>
enum {
typedef SkBenchmark INHERITED;
};
+class RefCntBench_New : public SkBenchmark {
+public:
+ RefCntBench_New(void* param) : INHERITED(param) {
+ }
+protected:
+ virtual const char* onGetName() {
+ return "ref_cnt_new";
+ }
+
+ virtual void onDraw(SkCanvas* canvas) {
+ for (int i = 0; i < N; ++i) {
+ SkRefCnt* ref = new SkRefCnt();
+ for (int j = 0; j < M; ++j) {
+ ref->ref();
+ ref->unref();
+ }
+ ref->unref();
+ }
+ }
+
+private:
+ typedef SkBenchmark INHERITED;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+class WeakRefCntBench_Stack : public SkBenchmark {
+public:
+ WeakRefCntBench_Stack(void* param) : INHERITED(param) {
+ }
+protected:
+ virtual const char* onGetName() {
+ return "ref_cnt_stack_weak";
+ }
+
+ virtual void onDraw(SkCanvas* canvas) {
+ for (int i = 0; i < N; ++i) {
+ SkWeakRefCnt ref;
+ for (int j = 0; j < M; ++j) {
+ ref.ref();
+ ref.unref();
+ }
+ }
+ }
+
+private:
+ typedef SkBenchmark INHERITED;
+};
+
+class PlacedWeakRefCnt : public SkWeakRefCnt {
+public:
+ PlacedWeakRefCnt() : SkWeakRefCnt() { }
+ void operator delete(void *p) { }
+};
+
+class WeakRefCntBench_Heap : public SkBenchmark {
+public:
+ WeakRefCntBench_Heap(void* param) : INHERITED(param) {
+ }
+protected:
+ virtual const char* onGetName() {
+ return "ref_cnt_heap_weak";
+ }
+
+ virtual void onDraw(SkCanvas* canvas) {
+ char memory[sizeof(PlacedWeakRefCnt)];
+ for (int i = 0; i < N; ++i) {
+ PlacedWeakRefCnt* ref = new (memory) PlacedWeakRefCnt();
+ for (int j = 0; j < M; ++j) {
+ ref->ref();
+ ref->unref();
+ }
+ ref->unref();
+ }
+ }
+
+private:
+ typedef SkBenchmark INHERITED;
+};
+
+class WeakRefCntBench_New : public SkBenchmark {
+public:
+ WeakRefCntBench_New(void* param) : INHERITED(param) {
+ }
+protected:
+ virtual const char* onGetName() {
+ return "ref_cnt_new_weak";
+ }
+
+ virtual void onDraw(SkCanvas* canvas) {
+ for (int i = 0; i < N; ++i) {
+ SkWeakRefCnt* ref = new SkWeakRefCnt();
+ for (int j = 0; j < M; ++j) {
+ ref->ref();
+ ref->unref();
+ }
+ ref->unref();
+ }
+ }
+
+private:
+ typedef SkBenchmark INHERITED;
+};
+
///////////////////////////////////////////////////////////////////////////////
-static SkBenchmark* Fact0(void* p) { return new RefCntBench_Stack(p); }
-static SkBenchmark* Fact1(void* p) { return new RefCntBench_Heap(p); }
+static SkBenchmark* Fact00(void* p) { return new RefCntBench_Stack(p); }
+static SkBenchmark* Fact01(void* p) { return new RefCntBench_Heap(p); }
+static SkBenchmark* Fact02(void* p) { return new RefCntBench_New(p); }
+
+static SkBenchmark* Fact10(void* p) { return new WeakRefCntBench_Stack(p); }
+static SkBenchmark* Fact11(void* p) { return new WeakRefCntBench_Heap(p); }
+static SkBenchmark* Fact12(void* p) { return new WeakRefCntBench_New(p); }
-static BenchRegistry gReg01(Fact0);
-static BenchRegistry gReg02(Fact1);
+static BenchRegistry gReg00(Fact00);
+static BenchRegistry gReg01(Fact01);
+static BenchRegistry gReg02(Fact02);
+static BenchRegistry gReg10(Fact10);
+static BenchRegistry gReg11(Fact11);
+static BenchRegistry gReg12(Fact12);
'../include/core/SkUnPreMultiply.h',
'../include/core/SkUnitMapper.h',
'../include/core/SkUtils.h',
+ '../include/core/SkWeakRefCnt.h',
'../include/core/SkWriter32.h',
'../include/core/SkXfermode.h',
],
{
'target_name': 'ports',
'type': 'static_library',
+ 'dependencies': [
+ 'core.gyp:core',
+ 'sfnt.gyp:sfnt',
+ 'utils.gyp:utils',
+ ],
'include_dirs': [
- '../include/config',
- '../include/core',
'../include/images',
'../include/effects',
'../include/ports',
'../include/xml',
'../src/core',
+ '../src/utils',
],
'sources': [
'../src/ports/SkDebug_stdio.cpp',
/** \class SkRefCnt
SkRefCnt is the base class for objects that may be shared by multiple
- objects. When a new owner wants a reference, it calls ref(). When an owner
- wants to release its reference, it calls unref(). When the shared object's
- reference count goes to zero as the result of an unref() call, its (virtual)
- destructor is called. It is an error for the destructor to be called
- explicitly (or via the object going out of scope on the stack or calling
- delete) if getRefCnt() > 1.
+ objects. When an existing owner wants to share a reference, it calls ref().
+ When an owner wants to release its reference, it calls unref(). When the
+ shared object's reference count goes to zero as the result of an unref()
+ call, its (virtual) destructor is called. It is an error for the
+ destructor to be called explicitly (or via the object going out of scope on
+ the stack or calling delete) if getRefCnt() > 1.
*/
class SK_API SkRefCnt : SkNoncopyable {
public:
*/
SkRefCnt() : fRefCnt(1) {}
- /** Destruct, asserting that the reference count is 1.
+ /** Destruct, asserting that the reference count is 1.
*/
virtual ~SkRefCnt() {
#ifdef SK_DEBUG
*/
void ref() const {
SkASSERT(fRefCnt > 0);
- sk_atomic_inc(&fRefCnt);
+ sk_atomic_inc(&fRefCnt); // No barrier required.
}
/** Decrement the reference count. If the reference count is 1 before the
- decrement, then call delete on the object. Note that if this is the
- case, then the object needs to have been allocated via new, and not on
- the stack.
+ decrement, then delete the object. Note that if this is the case, then
+ the object needs to have been allocated via new, and not on the stack.
*/
void unref() const {
SkASSERT(fRefCnt > 0);
+ // Release barrier (SL/S), if not provided below.
if (sk_atomic_dec(&fRefCnt) == 1) {
- fRefCnt = 1; // so our destructor won't complain
- SkDELETE(this);
+ // Aquire barrier (L/SL), if not provided above.
+ // Prevents code in dispose from happening before the decrement.
+ sk_membar_aquire__after_atomic_dec();
+ internal_dispose();
}
}
}
private:
+ /** Called when the ref count goes to 0.
+ */
+ virtual void internal_dispose() const {
+#ifdef SK_DEBUG
+ // so our destructor won't complain
+ fRefCnt = 1;
+#endif
+ SkDELETE(this);
+ }
+ friend class SkWeakRefCnt;
+
mutable int32_t fRefCnt;
};
int32_t sk_atomic_inc(int32_t*);
int32_t sk_atomic_dec(int32_t*);
+int32_t sk_atomic_conditional_inc(int32_t*);
class SkMutex {
public:
static __attribute__((always_inline)) int32_t sk_atomic_dec(int32_t *addr) {
return __sync_fetch_and_add(addr, -1);
}
+void sk_membar_aquire__after_atomic_dec() { }
+
+static __attribute__((always_inline)) int32_t sk_atomic_conditional_inc(int32_t* addr) {
+ int32_t value = *addr;
+
+ while (true) {
+ if (value == 0) {
+ return 0;
+ }
+
+ int32_t before = __sync_val_compare_and_swap(addr, value, value + 1);
+
+ if (before == value) {
+ return value;
+ } else {
+ value = before;
+ }
+ }
+}
+void sk_membar_aquire__after_atomic_conditional_inc() { }
#else // !SK_BUILD_FOR_ANDROID_NDK
#define sk_atomic_inc(addr) android_atomic_inc(addr)
#define sk_atomic_dec(addr) android_atomic_dec(addr)
+void sk_membar_aquire__after_atomic_dec() {
+ //HACK: Android is actually using full memory barriers.
+ // Should this change, uncomment below.
+ //int dummy;
+ //android_atomic_aquire_store(0, &dummy);
+}
+int32_t sk_atomic_conditional_inc(int32_t* addr) {
+ while (true) {
+ int32_t value = *addr;
+ if (value == 0) {
+ return 0;
+ }
+ if (0 == android_atomic_release_cas(value, value + 1, addr)) {
+ return value;
+ }
+ }
+}
+void sk_membar_aquire__after_atomic_conditional_inc() {
+ //HACK: Android is actually using full memory barriers.
+ // Should this change, uncomment below.
+ //int dummy;
+ //android_atomic_aquire_store(0, &dummy);
+}
#endif // !SK_BUILD_FOR_ANDROID_NDK
#else // !SK_BUILD_FOR_ANDROID
-/** Implemented by the porting layer, this function adds 1 to the int specified
- by the address (in a thread-safe manner), and returns the previous value.
+/** Implemented by the porting layer, this function adds one to the int
+ specified by the address (in a thread-safe manner), and returns the
+ previous value.
+ No additional memory barrier is required.
+ This must act as a compiler barrier.
*/
SK_API int32_t sk_atomic_inc(int32_t* addr);
-/** Implemented by the porting layer, this function subtracts 1 to the int
- specified by the address (in a thread-safe manner), and returns the previous
- value.
+
+/** Implemented by the porting layer, this function subtracts one from the int
+ specified by the address (in a thread-safe manner), and returns the
+ previous value.
+ Expected to act as a release (SL/S) memory barrier and a compiler barrier.
*/
SK_API int32_t sk_atomic_dec(int32_t* addr);
+/** If sk_atomic_dec does not act as an aquire (L/SL) barrier, this is expected
+ to act as an aquire (L/SL) memory barrier and as a compiler barrier.
+*/
+SK_API void sk_membar_aquire__after_atomic_dec();
+
+/** Implemented by the porting layer, this function adds one to the int
+ specified by the address iff the int specified by the address is not zero
+ (in a thread-safe manner), and returns the previous value.
+ No additional memory barrier is required.
+ This must act as a compiler barrier.
+*/
+SK_API int32_t sk_atomic_conditional_inc(int32_t*);
+/** If sk_atomic_conditional_inc does not act as an aquire (L/SL) barrier, this
+ is expected to act as an aquire (L/SL) memory barrier and as a compiler
+ barrier.
+*/
+SK_API void sk_membar_aquire__after_atomic_conditional_inc();
#endif // !SK_BUILD_FOR_ANDROID
#define SkTypeface_DEFINED
#include "SkAdvancedTypefaceMetrics.h"
-#include "SkRefCnt.h"
+#include "SkWeakRefCnt.h"
class SkStream;
class SkAdvancedTypefaceMetrics;
Typeface objects are immutable, and so they can be shared between threads.
*/
-class SK_API SkTypeface : public SkRefCnt {
+class SK_API SkTypeface : public SkWeakRefCnt {
public:
/** Style specifies the intrinsic style attributes of a given typeface
*/
Style fStyle;
bool fIsFixedWidth;
- typedef SkRefCnt INHERITED;
+ typedef SkWeakRefCnt INHERITED;
};
#endif
--- /dev/null
+/*
+ * Copyright 2012 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkWeakRefCnt_DEFINED
+#define SkWeakRefCnt_DEFINED
+
+#include "SkRefCnt.h"
+#include "SkThread.h"
+
+/** \class SkWeakRefCnt
+
+ SkWeakRefCnt is the base class for objects that may be shared by multiple
+ objects. When an existing strong owner wants to share a reference, it calls
+ ref(). When a strong owner wants to release its reference, it calls
+ unref(). When the shared object's strong reference count goes to zero as
+ the result of an unref() call, its (virtual) weak_dispose method is called.
+ It is an error for the destructor to be called explicitly (or via the
+ object going out of scope on the stack or calling delete) if
+ getRefCnt() > 1.
+
+ In addition to strong ownership, an owner may instead obtain a weak
+ reference by calling weak_ref(). A call to weak_ref() must be balanced my a
+ call to weak_unref(). To obtain a strong reference from a weak reference,
+ call try_ref(). If try_ref() returns true, the owner's pointer is now also
+ a strong reference on which unref() must be called. Note that this does not
+ affect the original weak reference, weak_unref() must still be called. When
+ the weak reference count goes to zero, the object is deleted. While the
+ weak reference count is positive and the strong reference count is zero the
+ object still exists, but will be in the disposed state. It is up to the
+ object to define what this means.
+
+ Note that a strong reference implicitly implies a weak reference. As a
+ result, it is allowable for the owner of a strong ref to call try_ref().
+ This will have the same effect as calling ref(), but may be more expensive.
+
+ Example:
+
+ SkWeakRefCnt myRef = strongRef.weak_ref();
+ ... // strongRef.unref() may or may not be called
+ if (myRef.try_ref()) {
+ ... // use myRef
+ myRef.unref();
+ } else {
+ // myRef is in the disposed state
+ }
+ myRef.weak_unref();
+*/
+class SK_API SkWeakRefCnt : public SkRefCnt {
+public:
+ /** Default construct, initializing the reference counts to 1.
+ The strong references collectively hold one weak reference. When the
+ strong reference count goes to zero, the collectively held weak
+ reference is released.
+ */
+ SkWeakRefCnt() : SkRefCnt(), fWeakCnt(1) {}
+
+ /** Destruct, asserting that the weak reference count is 1.
+ */
+ virtual ~SkWeakRefCnt() {
+#ifdef SK_DEBUG
+ SkASSERT(fWeakCnt == 1);
+ fWeakCnt = 0;
+#endif
+ }
+
+ /** Return the weak reference count.
+ */
+ int32_t getWeakCnt() const { return fWeakCnt; }
+
+ void validate() const {
+ SkRefCnt::validate();
+ SkASSERT(fWeakCnt > 0);
+ }
+
+ /** Creates a strong reference from a weak reference, if possible. The
+ caller must already be an owner. If try_ref() returns true the owner
+ is in posession of an additional strong reference. Both the original
+ reference and new reference must be properly unreferenced. If try_ref()
+ returns false, no strong reference could be created and the owner's
+ reference is in the same state as before the call.
+ */
+ bool SK_WARN_UNUSED_RESULT try_ref() const {
+ if (sk_atomic_conditional_inc(&fRefCnt) != 0) {
+ // Aquire barrier (L/SL), if not provided above.
+ // Prevents subsequent code from happening before the increment.
+ sk_membar_aquire__after_atomic_conditional_inc();
+ return true;
+ }
+ return false;
+ }
+
+ /** Increment the weak reference count. Must be balanced by a call to
+ weak_unref().
+ */
+ void weak_ref() const {
+ SkASSERT(fRefCnt > 0);
+ SkASSERT(fWeakCnt > 0);
+ sk_atomic_inc(&fWeakCnt); // No barrier required.
+ }
+
+ /** Decrement the weak reference count. If the weak reference count is 1
+ before the decrement, then call delete on the object. Note that if this
+ is the case, then the object needs to have been allocated via new, and
+ not on the stack.
+ */
+ void weak_unref() const {
+ SkASSERT(fWeakCnt > 0);
+ // Release barrier (SL/S), if not provided below.
+ if (sk_atomic_dec(&fWeakCnt) == 1) {
+ // Aquire barrier (L/SL), if not provided above.
+ // Prevents code in destructor from happening before the decrement.
+ sk_membar_aquire__after_atomic_dec();
+#ifdef SK_DEBUG
+ // so our destructor won't complain
+ fWeakCnt = 1;
+#endif
+ SkRefCnt::internal_dispose();
+ }
+ }
+
+ /** Returns true if there are no strong references to the object. When this
+ is the case all future calls to try_ref() will return false.
+ */
+ bool weak_expired() const {
+ return fRefCnt == 0;
+ }
+
+protected:
+ /** Called when the strong reference count goes to zero. This allows the
+ object to free any resources it may be holding. Weak references may
+ still exist and their level of allowed access to the object is defined
+ by the object's class.
+ */
+ virtual void weak_dispose() const {
+ }
+
+private:
+ /** Called when the strong reference count goes to zero. Calls weak_dispose
+ on the object and releases the implicit weak reference held
+ collectively by the strong references.
+ */
+ virtual void internal_dispose() const SK_OVERRIDE {
+ weak_dispose();
+ weak_unref();
+ }
+
+ /* Invariant: fWeakCnt = #weak + (fRefCnt > 0 ? 1 : 0) */
+ mutable int32_t fWeakCnt;
+};
+
+#endif
#define TYPEFACE_CACHE_LIMIT 128
-void SkTypefaceCache::add(SkTypeface* face, SkTypeface::Style requestedStyle) {
+void SkTypefaceCache::add(SkTypeface* face,
+ SkTypeface::Style requestedStyle,
+ bool strong) {
if (fArray.count() >= TYPEFACE_CACHE_LIMIT) {
this->purge(TYPEFACE_CACHE_LIMIT >> 2);
}
Rec* rec = fArray.append();
rec->fFace = face;
rec->fRequestedStyle = requestedStyle;
- face->ref();
+ rec->fStrong = strong;
+ if (strong) {
+ face->ref();
+ } else {
+ face->weak_ref();
+ }
}
SkTypeface* SkTypefaceCache::findByID(SkFontID fontID) const {
return NULL;
}
-SkTypeface* SkTypefaceCache::findByProc(FindProc proc, void* ctx) const {
+SkTypeface* SkTypefaceCache::findByProcAndRef(FindProc proc, void* ctx) const {
const Rec* curr = fArray.begin();
const Rec* stop = fArray.end();
while (curr < stop) {
- if (proc(curr->fFace, curr->fRequestedStyle, ctx)) {
- return curr->fFace;
+ SkTypeface* currFace = curr->fFace;
+ if (proc(currFace, curr->fRequestedStyle, ctx)) {
+ if (curr->fStrong) {
+ currFace->ref();
+ return currFace;
+ } else if (currFace->try_ref()) {
+ return currFace;
+ } else {
+ //remove currFace from fArray?
+ }
}
curr += 1;
}
int i = 0;
while (i < count) {
SkTypeface* face = fArray[i].fFace;
- if (1 == face->getRefCnt()) {
- face->unref();
+ bool strong = fArray[i].fStrong;
+ if ((strong && face->getRefCnt() == 1) ||
+ (!strong && face->weak_expired()))
+ {
+ if (strong) {
+ face->unref();
+ } else {
+ face->weak_unref();
+ }
fArray.remove(i);
--count;
if (--numToPurge == 0) {
SK_DECLARE_STATIC_MUTEX(gMutex);
-void SkTypefaceCache::Add(SkTypeface* face, SkTypeface::Style requestedStyle) {
+void SkTypefaceCache::Add(SkTypeface* face,
+ SkTypeface::Style requestedStyle,
+ bool strong) {
SkAutoMutexAcquire ama(gMutex);
- Get().add(face, requestedStyle);
+ Get().add(face, requestedStyle, strong);
}
SkTypeface* SkTypefaceCache::FindByID(SkFontID fontID) {
SkTypeface* SkTypefaceCache::FindByProcAndRef(FindProc proc, void* ctx) {
SkAutoMutexAcquire ama(gMutex);
- SkTypeface* typeface = Get().findByProc(proc, ctx);
- SkSafeRef(typeface);
+ SkTypeface* typeface = Get().findByProcAndRef(proc, ctx);
return typeface;
}
void SkTypefaceCache::Dump() {
#ifdef SK_DEBUG
SkAutoMutexAcquire ama(gMutex);
- (void)Get().findByProc(DumpProc, NULL);
+ (void)Get().findByProcAndRef(DumpProc, NULL);
#endif
}
/**
* Callback for FindByProc. Returns true if the given typeface is a match
* for the given context. The passed typeface is owned by the cache and is
- * not additionally ref()ed.
+ * not additionally ref()ed. The typeface may be in the disposed state.
*/
typedef bool (*FindProc)(SkTypeface*, SkTypeface::Style, void* context);
* whose refcnt is 1 (meaning only the cache is an owner) will be
* unref()ed.
*/
- static void Add(SkTypeface*, SkTypeface::Style requested);
+ static void Add(SkTypeface*,
+ SkTypeface::Style requested,
+ bool strong = true);
/**
* Search the cache for a typeface with the specified fontID (uniqueID).
private:
static SkTypefaceCache& Get();
- void add(SkTypeface*, SkTypeface::Style requested);
+ void add(SkTypeface*, SkTypeface::Style requested, bool strong = true);
SkTypeface* findByID(SkFontID findID) const;
- SkTypeface* findByProc(FindProc proc, void* ctx) const;
+ SkTypeface* findByProcAndRef(FindProc proc, void* ctx) const;
void purge(int count);
void purgeAll();
struct Rec {
SkTypeface* fFace;
+ bool fStrong;
SkTypeface::Style fRequestedStyle;
};
SkTDArray<Rec> fArray;
*addr = value - 1;
return value;
}
+void sk_membar_aquire__after_atomic_dec() { }
+
+int32_t sk_atomic_conditional_inc(int32_t* addr) {
+ int32_t value = *addr;
+ if (value != 0) ++*addr;
+ return value;
+}
+void sk_membar_aquire__after_atomic_conditional_inc() { }
SkMutex::SkMutex() {}
{
return __sync_fetch_and_add(addr, -1);
}
+void sk_membar_aquire__after_atomic_dec() { }
+
+int32_t sk_atomic_conditional_inc(int32_t* addr)
+{
+ int32_t value = *addr;
+
+ while (true) {
+ if (value == 0) {
+ return 0;
+ }
+
+ int32_t before = __sync_val_compare_and_swap(addr, value, value + 1);
+
+ if (before == value) {
+ return value;
+ } else {
+ value = before;
+ }
+ }
+}
+void sk_membar_aquire__after_atomic_conditional_inc() { }
#else
*addr = value - 1;
return value;
}
+void sk_membar_aquire__after_atomic_dec() { }
+
+int32_t sk_atomic_conditional_inc(int32_t* addr)
+{
+ SkAutoMutexAcquire ac(gAtomicMutex);
+
+ int32_t value = *addr;
+ if (value != 0) ++*addr;
+ return value;
+}
+void sk_membar_aquire__after_atomic_conditional_inc() { }
#endif
//directive.
//The pragma appears to be unnecessary, but doesn't hurt.
#pragma intrinsic(_InterlockedIncrement, _InterlockedDecrement)
+#pragma intrinsic(_InterlockedCompareExchange)
int32_t sk_atomic_inc(int32_t* addr) {
// InterlockedIncrement returns the new value, we want to return the old.
int32_t sk_atomic_dec(int32_t* addr) {
return _InterlockedDecrement(reinterpret_cast<LONG*>(addr)) + 1;
}
+void sk_membar_aquire__after_atomic_dec() { }
+
+int32_t sk_atomic_conditional_inc(int32_t* addr) {
+ while (true) {
+ LONG value = static_cast<LONG const volatile&>(*addr);
+ if (value == 0) {
+ return 0;
+ }
+ if (_InterlockedCompareExchange(reinterpret_cast<LONG*>(addr),
+ value + 1,
+ value) == value) {
+ return value;
+ }
+ }
+}
+void sk_membar_aquire__after_atomic_conditional_inc() { }
SkMutex::SkMutex() {
SK_COMPILE_ASSERT(sizeof(fStorage) > sizeof(CRITICAL_SECTION),
#include "SkRefCnt.h"
#include "SkThreadUtils.h"
+#include "SkWeakRefCnt.h"
///////////////////////////////////////////////////////////////////////////////
ref->unref();
}
+static void bounce_weak_ref(void* data) {
+ SkWeakRefCnt* ref = static_cast<SkWeakRefCnt*>(data);
+ for (int i = 0; i < 100000; ++i) {
+ if (ref->try_ref()) {
+ ref->unref();
+ }
+ }
+}
+
+static void bounce_weak_weak_ref(void* data) {
+ SkWeakRefCnt* ref = static_cast<SkWeakRefCnt*>(data);
+ for (int i = 0; i < 100000; ++i) {
+ ref->weak_ref();
+ ref->weak_unref();
+ }
+}
+
+static void test_weakRefCnt(skiatest::Reporter* reporter) {
+ SkWeakRefCnt* ref = new SkWeakRefCnt();
+
+ SkThread thing1(bounce_ref, ref);
+ SkThread thing2(bounce_ref, ref);
+ SkThread thing3(bounce_weak_ref, ref);
+ SkThread thing4(bounce_weak_weak_ref, ref);
+
+ thing1.setProcessorAffinity(0);
+ thing2.setProcessorAffinity(23);
+ thing3.setProcessorAffinity(2);
+ thing4.setProcessorAffinity(17);
+
+ SkASSERT(thing1.start());
+ SkASSERT(thing2.start());
+ SkASSERT(thing3.start());
+ SkASSERT(thing4.start());
+
+ thing1.join();
+ thing2.join();
+ thing3.join();
+ thing4.join();
+
+ REPORTER_ASSERT(reporter, ref->getRefCnt() == 1);
+ REPORTER_ASSERT(reporter, ref->getWeakCnt() == 1);
+ ref->unref();
+}
+
+static void test_refCntTests(skiatest::Reporter* reporter) {
+ test_refCnt(reporter);
+ test_weakRefCnt(reporter);
+}
+
#include "TestClassDef.h"
-DEFINE_TESTCLASS("ref_cnt", RefCntTestClass, test_refCnt)
+DEFINE_TESTCLASS("RefCnt", RefCntTestClass, test_refCntTests)