This fixes an on AIX.
The lock function currently just spins, which should be changed to use
back-off, and maybe then _M_val.wait(__current) when supported.
libstdc++-v3/ChangeLog:
PR libstdc++/104101
* include/bits/shared_ptr_atomic.h (_Sp_atomic::_Atomic_count::lock):
Only use __thread_relax if __cpp_lib_atomic_wait is defined.
auto __current = _M_val.load(memory_order_relaxed);
while (__current & _S_lock_bit)
{
+#if __cpp_lib_atomic_wait
__detail::__thread_relax();
+#endif
__current = _M_val.load(memory_order_relaxed);
}
__o,
memory_order_relaxed))
{
+#if __cpp_lib_atomic_wait
__detail::__thread_relax();
+#endif
__current = __current & ~_S_lock_bit;
}
return reinterpret_cast<pointer>(__current);