Will be used in future changes.
Reviewed By: melver
Differential Revision: https://reviews.llvm.org/D122905
}
}
+ bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
+ u64 state = atomic_load_relaxed(&state_);
+ for (;;) {
+ if (UNLIKELY(state & (kWriterLock | kReaderLockMask)))
+ return false;
+ // The mutex is not read-/write-locked, try to lock.
+ if (LIKELY(atomic_compare_exchange_weak(
+ &state_, &state, state | kWriterLock, memory_order_acquire))) {
+ CheckedMutex::Lock();
+ return true;
+ }
+ }
+ }
+
void Unlock() SANITIZER_RELEASE() {
CheckedMutex::Unlock();
bool wake_writer;
for (int i = 0; i < kThreads; i++) PTHREAD_JOIN(threads[i], 0);
}
+TEST(SanitizerCommon, MutexTry) {
+ Mutex mtx;
+ TestData<Mutex> data(&mtx);
+ pthread_t threads[kThreads];
+ for (int i = 0; i < kThreads; i++)
+ PTHREAD_CREATE(&threads[i], 0, try_thread<Mutex>, &data);
+ for (int i = 0; i < kThreads; i++) PTHREAD_JOIN(threads[i], 0);
+}
+
struct SemaphoreData {
Semaphore *sem;
bool done;