libstdc++: Implement C++20 atomic<shared_ptr> and atomic<weak_ptr>
authorJonathan Wakely <jwakely@redhat.com>
Sun, 16 Jan 2022 20:47:09 +0000 (20:47 +0000)
committerJonathan Wakely <jwakely@redhat.com>
Mon, 17 Jan 2022 00:06:28 +0000 (00:06 +0000)
This adds another piece of C++20, the std::atomic specializations for
std::shared_ptr and std::weak_ptr.

The new _Sp_atomic type mimics the structure of shared_ptr<T> and
weak_ptr<T>, holding a T* pointer (the one returned by get() on a
shared_ptr/weak ptr) and a _Sp_counted_base<>* pointer to the
ref-counted control block. For _Sp_atomic the low bit of the control
block pointer is used as a lock bit, to ensure only one thread will
access the object at a time.  The pointer is actually stored as a
uintptr_t to avoid accidental dereferences of the pointer when unlocked
(which would be a race) or when locked (which would dereference the
wrong pointer value due to the low bit being set). To get a raw pointer
to the control block, the lock must be acquired. Converting between a
_Sp_atomic and a shared_ptr or weak_ptr requires manually adjusting the
T* and _Sp_counted_base<>* members of the shared/weak ptr, instead of
going through the public API. This must be done carefully to ensure that
any change in the number of owners is reflected in a ref-count update.

Co-authored-by: Thomas Rodgers <trodgers@redhat.com>
Signed-off-by: Thomas Rodgers <trodgers@redhat.com>
libstdc++-v3/ChangeLog:

* include/bits/shared_ptr_atomic.h (__cpp_lib_atomic_shared_ptr):
New macro.
(_Sp_atomic): New class template.
(atomic<shared_ptr<T>>, atomic<weak_ptr<T>>): New partial
specializations.
* include/bits/shared_ptr_base.h (__shared_count, __weak_count)
(__shared_ptr, __weak_ptr): Declare _Sp_atomic as a friend.
* include/std/version (__cpp_lib_atomic_shared_ptr): New macro.
* testsuite/20_util/shared_ptr/atomic/atomic_shared_ptr.cc: New
test.
* testsuite/20_util/weak_ptr/atomic_weak_ptr.cc: New test.

libstdc++-v3/include/bits/shared_ptr_atomic.h
libstdc++-v3/include/bits/shared_ptr_base.h
libstdc++-v3/include/std/version
libstdc++-v3/testsuite/20_util/shared_ptr/atomic/atomic_shared_ptr.cc [new file with mode: 0644]
libstdc++-v3/testsuite/20_util/weak_ptr/atomic_weak_ptr.cc [new file with mode: 0644]

index 0e1c289..900499b 100644 (file)
@@ -327,6 +327,461 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     }
   /// @}
 
+#if __cplusplus >= 202002L
+# define __cpp_lib_atomic_shared_ptr 201711L
+  template<typename _Tp>
+    class atomic;
+
+  template<typename _Up>
+    static constexpr bool __is_shared_ptr = false;
+  template<typename _Up>
+    static constexpr bool __is_shared_ptr<shared_ptr<_Up>> = true;
+
+  template<typename _Tp>
+    class _Sp_atomic
+    {
+      using value_type = _Tp;
+
+      friend class atomic<_Tp>;
+
+      // An atomic version of __shared_count<> and __weak_count<>.
+      // Stores a _Sp_counted_base<>* but uses the LSB as a lock.
+      struct _Atomic_count
+      {
+       // Either __shared_count<> or __weak_count<>
+       using __count_type = decltype(_Tp::_M_refcount);
+
+       // _Sp_counted_base<>*
+       using pointer = decltype(__count_type::_M_pi);
+
+       // Ensure we can use the LSB as the lock bit.
+       static_assert(alignof(remove_pointer_t<pointer>) > 1);
+
+       _Atomic_count() : _M_val(0) { }
+
+       explicit
+       _Atomic_count(__count_type&& __c) noexcept
+       : _M_val(reinterpret_cast<uintptr_t>(__c._M_pi))
+       {
+         __c._M_pi = nullptr;
+       }
+
+       ~_Atomic_count()
+       {
+         auto __val = _M_val.load(memory_order_relaxed);
+         __glibcxx_assert(!(__val & _S_lock_bit));
+         if (auto __pi = reinterpret_cast<pointer>(__val))
+           {
+             if constexpr (__is_shared_ptr<_Tp>)
+               __pi->_M_release();
+             else
+               __pi->_M_weak_release();
+           }
+       }
+
+       _Atomic_count(const _Atomic_count&) = delete;
+       _Atomic_count& operator=(const _Atomic_count&) = delete;
+
+       // Precondition: Caller does not hold lock!
+       // Returns the raw pointer value without the lock bit set.
+       pointer
+       lock(memory_order __o) const noexcept
+       {
+         // To acquire the lock we flip the LSB from 0 to 1.
+
+         auto __current = _M_val.load(memory_order_relaxed);
+         while (__current & _S_lock_bit)
+           {
+             __detail::__thread_relax();
+             __current = _M_val.load(memory_order_relaxed);
+           }
+
+         while (!_M_val.compare_exchange_strong(__current,
+                                                __current | _S_lock_bit,
+                                                __o,
+                                                memory_order_relaxed))
+           {
+             __detail::__thread_relax();
+             __current = __current & ~_S_lock_bit;
+           }
+         return reinterpret_cast<pointer>(__current);
+       }
+
+       // Precondition: caller holds lock!
+       void
+       unlock(memory_order __o) const noexcept
+       {
+         _M_val.fetch_sub(1, __o);
+       }
+
+       // Swaps the values of *this and __c, and unlocks *this.
+       // Precondition: caller holds lock!
+       void
+       _M_swap_unlock(__count_type& __c, memory_order __o) noexcept
+       {
+         if (__o != memory_order_seq_cst)
+           __o = memory_order_release;
+         auto __x = reinterpret_cast<uintptr_t>(__c._M_pi);
+         __x = _M_val.exchange(__x, __o);
+         __c._M_pi = reinterpret_cast<pointer>(__x & ~_S_lock_bit);
+       }
+
+#if __cpp_lib_atomic_wait
+       // Precondition: caller holds lock!
+       void
+       _M_wait_unlock(memory_order __o) const noexcept
+       {
+         auto __v = _M_val.fetch_sub(1, memory_order_relaxed);
+         _M_val.wait(__v & ~_S_lock_bit, __o);
+       }
+
+       void
+       notify_one() noexcept
+       {
+         _M_val.notify_one();
+       }
+
+       void
+       notify_all() noexcept
+       {
+         _M_val.notify_all();
+       }
+#endif
+
+      private:
+       mutable __atomic_base<uintptr_t> _M_val{0};
+       static constexpr uintptr_t _S_lock_bit{1};
+      };
+
+      typename _Tp::element_type* _M_ptr;
+      _Atomic_count _M_refcount;
+
+      static _Atomic_count::pointer
+      _S_add_ref(_Atomic_count::pointer __p)
+      {
+       if (__p)
+         {
+           if constexpr (__is_shared_ptr<_Tp>)
+             __p->_M_add_ref_copy();
+           else
+             __p->_M_weak_add_ref();
+         }
+       return __p;
+      }
+
+      constexpr _Sp_atomic() noexcept = default;
+
+      explicit
+      _Sp_atomic(value_type __r) noexcept
+      : _M_ptr(__r._M_ptr), _M_refcount(std::move(__r._M_refcount))
+      { }
+
+      ~_Sp_atomic() = default;
+
+      _Sp_atomic(const _Sp_atomic&) = delete;
+      void operator=(const _Sp_atomic&) = delete;
+
+      value_type
+      load(memory_order __o) const noexcept
+      {
+       __glibcxx_assert(__o != memory_order_release
+                          && __o != memory_order_acq_rel);
+       // Ensure that the correct value of _M_ptr is visible after locking.,
+       // by upgrading relaxed or consume to acquire.
+       if (__o != memory_order_seq_cst)
+         __o = memory_order_acquire;
+
+       value_type __ret;
+       auto __pi = _M_refcount.lock(__o);
+       __ret._M_ptr = _M_ptr;
+       __ret._M_refcount._M_pi = _S_add_ref(__pi);
+       _M_refcount.unlock(memory_order_relaxed);
+       return __ret;
+      }
+
+      void
+      swap(value_type& __r, memory_order __o) noexcept
+      {
+       _M_refcount.lock(memory_order_acquire);
+       std::swap(_M_ptr, __r._M_ptr);
+       _M_refcount._M_swap_unlock(__r._M_refcount, __o);
+      }
+
+      bool
+      compare_exchange_strong(value_type& __expected, value_type __desired,
+                             memory_order __o, memory_order __o2) noexcept
+      {
+       bool __result = true;
+       auto __pi = _M_refcount.lock(memory_order_acquire);
+       if (_M_ptr == __expected._M_ptr
+             && __pi == __expected._M_refcount._M_pi)
+         {
+           _M_ptr = __desired._M_ptr;
+           _M_refcount._M_swap_unlock(__desired._M_refcount, __o);
+         }
+       else
+         {
+           _Tp __sink = std::move(__expected);
+           __expected._M_ptr = _M_ptr;
+           __expected._M_refcount._M_pi = _S_add_ref(__pi);
+           _M_refcount.unlock(__o2);
+           __result = false;
+         }
+       return __result;
+      }
+
+#if __cpp_lib_atomic_wait
+      void
+      wait(value_type __old, memory_order __o) const noexcept
+      {
+       auto __pi = _M_refcount.lock(memory_order_acquire);
+       if (_M_ptr == __old._M_ptr && __pi == __old._M_refcount._M_pi)
+         _M_refcount._M_wait_unlock(__o);
+       else
+         _M_refcount.unlock(memory_order_relaxed);
+      }
+
+      void
+      notify_one() noexcept
+      {
+       _M_refcount.notify_one();
+      }
+
+      void
+      notify_all() noexcept
+      {
+       _M_refcount.notify_all();
+      }
+#endif
+    };
+
+  template<typename _Tp>
+    class atomic<shared_ptr<_Tp>>
+    {
+    public:
+      using value_type = shared_ptr<_Tp>;
+
+      static constexpr bool is_always_lock_free = false;
+
+      bool
+      is_lock_free() const noexcept
+      { return false; }
+
+      constexpr atomic() noexcept = default;
+
+      atomic(shared_ptr<_Tp> __r) noexcept
+      : _M_impl(std::move(__r))
+      { }
+
+      atomic(const atomic&) = delete;
+      void operator=(const atomic&) = delete;
+
+      shared_ptr<_Tp>
+      load(memory_order __o = memory_order_seq_cst) const noexcept
+      { return _M_impl.load(__o); }
+
+      operator shared_ptr<_Tp>() const noexcept
+      { return _M_impl.load(memory_order_seq_cst); }
+
+      void
+      store(shared_ptr<_Tp> __desired,
+           memory_order __o = memory_order_seq_cst) noexcept
+      { _M_impl.swap(__desired, __o); }
+
+      void
+      operator=(shared_ptr<_Tp> __desired) noexcept
+      { _M_impl.swap(__desired, memory_order_seq_cst); }
+
+      shared_ptr<_Tp>
+      exchange(shared_ptr<_Tp> __desired,
+              memory_order __o = memory_order_seq_cst) noexcept
+      {
+       _M_impl.swap(__desired, __o);
+       return __desired;
+      }
+
+      bool
+      compare_exchange_strong(shared_ptr<_Tp>& __expected,
+                             shared_ptr<_Tp> __desired,
+                             memory_order __o, memory_order __o2) noexcept
+      {
+       return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2);
+      }
+
+      bool
+      compare_exchange_strong(value_type& __expected, value_type __desired,
+                             memory_order __o = memory_order_seq_cst) noexcept
+      {
+       memory_order __o2;
+       switch (__o)
+       {
+       case memory_order_acq_rel:
+         __o2 = memory_order_acquire;
+         break;
+       case memory_order_release:
+         __o2 = memory_order_relaxed;
+         break;
+       default:
+         __o2 = __o;
+       }
+       return compare_exchange_strong(__expected, std::move(__desired),
+                                      __o, __o2);
+      }
+
+      bool
+      compare_exchange_weak(value_type& __expected, value_type __desired,
+                           memory_order __o, memory_order __o2) noexcept
+      {
+       return compare_exchange_strong(__expected, std::move(__desired),
+                                      __o, __o2);
+      }
+
+      bool
+      compare_exchange_weak(value_type& __expected, value_type __desired,
+                           memory_order __o = memory_order_seq_cst) noexcept
+      {
+       return compare_exchange_strong(__expected, std::move(__desired), __o);
+      }
+
+#if __cpp_lib_atomic_wait
+      void
+      wait(value_type __old,
+          memory_order __o = memory_order_seq_cst) const noexcept
+      {
+       _M_impl.wait(std::move(__old), __o);
+      }
+
+      void
+      notify_one() noexcept
+      {
+       _M_impl.notify_one();
+      }
+
+      void
+      notify_all() noexcept
+      {
+       _M_impl.notify_all();
+      }
+#endif
+
+    private:
+      _Sp_atomic<shared_ptr<_Tp>> _M_impl;
+    };
+
+  template<typename _Tp>
+    class atomic<weak_ptr<_Tp>>
+    {
+    public:
+      using value_type = weak_ptr<_Tp>;
+
+      static constexpr bool is_always_lock_free = false;
+
+      bool
+      is_lock_free() const noexcept
+      { return false; }
+
+      constexpr atomic() noexcept = default;
+
+      atomic(weak_ptr<_Tp> __r) noexcept
+     : _M_impl(move(__r))
+      { }
+
+      atomic(const atomic&) = delete;
+      void operator=(const atomic&) = delete;
+
+      weak_ptr<_Tp>
+      load(memory_order __o = memory_order_seq_cst) const noexcept
+      { return _M_impl.load(__o); }
+
+      operator weak_ptr<_Tp>() const noexcept
+      { return _M_impl.load(memory_order_seq_cst); }
+
+      void
+      store(weak_ptr<_Tp> __desired,
+           memory_order __o = memory_order_seq_cst) noexcept
+      { _M_impl.swap(__desired, __o); }
+
+      void
+      operator=(weak_ptr<_Tp> __desired) noexcept
+      { _M_impl.swap(__desired, memory_order_seq_cst); }
+
+      weak_ptr<_Tp>
+      exchange(weak_ptr<_Tp> __desired,
+              memory_order __o = memory_order_seq_cst) noexcept
+      {
+       _M_impl.swap(__desired, __o);
+       return __desired;
+      }
+
+      bool
+      compare_exchange_strong(weak_ptr<_Tp>& __expected,
+                             weak_ptr<_Tp> __desired,
+                             memory_order __o, memory_order __o2) noexcept
+      {
+       return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2);
+      }
+
+      bool
+      compare_exchange_strong(value_type& __expected, value_type __desired,
+                             memory_order __o = memory_order_seq_cst) noexcept
+      {
+       memory_order __o2;
+       switch (__o)
+       {
+       case memory_order_acq_rel:
+         __o2 = memory_order_acquire;
+         break;
+       case memory_order_release:
+         __o2 = memory_order_relaxed;
+         break;
+       default:
+         __o2 = __o;
+       }
+       return compare_exchange_strong(__expected, std::move(__desired),
+                                      __o, __o2);
+      }
+
+      bool
+      compare_exchange_weak(value_type& __expected, value_type __desired,
+                           memory_order __o, memory_order __o2) noexcept
+      {
+       return compare_exchange_strong(__expected, std::move(__desired),
+                                      __o, __o2);
+      }
+
+      bool
+      compare_exchange_weak(value_type& __expected, value_type __desired,
+                           memory_order __o = memory_order_seq_cst) noexcept
+      {
+       return compare_exchange_strong(__expected, std::move(__desired), __o);
+      }
+
+#if __cpp_lib_atomic_wait
+      void
+      wait(value_type __old,
+          memory_order __o = memory_order_seq_cst) const noexcept
+      {
+       _M_impl.wait(std::move(__old), __o);
+      }
+
+      void
+      notify_one() noexcept
+      {
+       _M_impl.notify_one();
+      }
+
+      void
+      notify_all() noexcept
+      {
+       _M_impl.notify_all();
+      }
+#endif
+
+    private:
+      _Sp_atomic<weak_ptr<_Tp>> _M_impl;
+    };
+#endif // C++20
+
   /// @} relates shared_ptr
   /// @} group pointer_abstractions
 
index 9e80aab..5b8f84b 100644 (file)
@@ -409,6 +409,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
   template<_Lock_policy _Lp = __default_lock_policy>
     class __shared_count;
 
+#if __cplusplus >= 202002L
+  template<typename>
+    class _Sp_atomic;
+#endif
 
   // Counted ptr with no deleter or allocator support
   template<typename _Ptr, _Lock_policy _Lp>
@@ -1121,6 +1125,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
     private:
       friend class __weak_count<_Lp>;
+#if __cplusplus >= 202002L
+      template<typename> friend class _Sp_atomic;
+#endif
 
       _Sp_counted_base<_Lp>*  _M_pi;
     };
@@ -1218,6 +1225,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
     private:
       friend class __shared_count<_Lp>;
+#if __cplusplus >= 202002L
+      template<typename> friend class _Sp_atomic;
+#endif
 
       _Sp_counted_base<_Lp>*  _M_pi;
     };
@@ -1765,6 +1775,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       template<typename _Del, typename _Tp1>
        friend _Del* get_deleter(const shared_ptr<_Tp1>&) noexcept;
 
+#if __cplusplus >= 202002L
+      friend _Sp_atomic<shared_ptr<_Tp>>;
+#endif
+
       element_type*       _M_ptr;         // Contained pointer.
       __shared_count<_Lp>  _M_refcount;    // Reference counter.
     };
@@ -2097,6 +2111,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       template<typename _Tp1, _Lock_policy _Lp1> friend class __weak_ptr;
       friend class __enable_shared_from_this<_Tp, _Lp>;
       friend class enable_shared_from_this<_Tp>;
+#if __cplusplus >= 202002L
+      friend _Sp_atomic<weak_ptr<_Tp>>;
+#endif
 
       element_type*     _M_ptr;         // Contained pointer.
       __weak_count<_Lp>  _M_refcount;    // Reference counter.
index a8b792e..7bd32f6 100644 (file)
 #if _GLIBCXX_HOSTED
 #define __cpp_lib_array_constexpr 201811L
 #define __cpp_lib_assume_aligned 201811L
+#define __cpp_lib_atomic_shared_ptr 201711L
 #if defined _GLIBCXX_HAS_GTHREADS || defined _GLIBCXX_HAVE_LINUX_FUTEX
 # define __cpp_lib_atomic_wait 201907L
 # if __cpp_aligned_new
diff --git a/libstdc++-v3/testsuite/20_util/shared_ptr/atomic/atomic_shared_ptr.cc b/libstdc++-v3/testsuite/20_util/shared_ptr/atomic/atomic_shared_ptr.cc
new file mode 100644 (file)
index 0000000..725e7ba
--- /dev/null
@@ -0,0 +1,150 @@
+// { dg-options "-std=gnu++20" }
+// { dg-do run { target c++20 } }
+// { dg-require-effective-target gthreads }
+// { dg-additional-options "-pthread" { target pthread } }
+// { dg-add-options libatomic }
+
+#include <memory>
+
+#ifndef __cpp_lib_atomic_shared_ptr
+# error "Feature-test macro for atomic<shared_ptr<T>> missing in <memory>"
+#elif __cpp_lib_atomic_shared_ptr != 201711L
+# error "Feature-test macro for atomic<shared_ptr<T>> has wrong value in <memory>"
+#endif
+
+#include <thread>
+
+#include <testsuite_hooks.h>
+
+void
+test_is_lock_free()
+{
+  using test_type = std::atomic<std::shared_ptr<int>>;
+  static_assert( test_type::is_always_lock_free == false );
+
+  test_type p;
+  VERIFY( p.is_lock_free() == false );
+}
+
+void
+test_atomic_shared_ptr()
+{
+  struct A { int a; int b; };
+
+  auto a = std::make_shared<A>( 0, 42 );
+  using ptr_t = std::shared_ptr<A>;
+  {
+    std::atomic<ptr_t> p{ };
+    VERIFY( p.load().get() == nullptr );
+  }
+
+  std::atomic<ptr_t> p{ a };
+  VERIFY( p.load().get() == a.get() );
+  auto b = std::make_shared<A>( 42, 0 );
+  p.store(b);
+  VERIFY( p.load().get() != a.get() );
+  VERIFY( p.load().get() == b.get() );
+  p.exchange(a);
+  VERIFY( p.load().get() != b.get() );
+  VERIFY( p.load().get() == a.get() );
+
+  {
+    ptr_t aa{ a };
+    VERIFY( p.compare_exchange_strong(aa, b,
+                                     std::memory_order_seq_cst,
+                                     std::memory_order_seq_cst) == true );
+    ptr_t bb{ a };
+    VERIFY( p.compare_exchange_strong(bb, b,
+                                     std::memory_order_seq_cst,
+                                     std::memory_order_seq_cst) == false );
+    VERIFY( bb.get() == b.get() );
+  }
+
+  {
+    ptr_t bb{ b };
+    VERIFY( p.compare_exchange_weak(bb, a,
+                                   std::memory_order_seq_cst,
+                                   std::memory_order_seq_cst) == true );
+    ptr_t aa{ b };
+    VERIFY( p.compare_exchange_weak(aa, a,
+                                     std::memory_order_seq_cst,
+                                     std::memory_order_seq_cst) == false );
+    VERIFY( aa.get() == a.get() );
+  }
+}
+
+void
+test_wait_notify()
+{
+  std::atomic<std::shared_ptr<int>> p;
+  std::shared_ptr<int> a = std::make_shared<int>();
+  std::shared_ptr<int> b = std::make_shared<int>();
+
+  p.store(a);
+  p.wait(b);
+  std::thread t([&]
+      {
+       p.store(b);
+       p.notify_one();
+      });
+  p.wait(a);
+  t.join();
+}
+
+int counter = 0;
+
+void
+test_counting()
+{
+  struct X
+  {
+    ~X() { ++counter; }
+  };
+
+  {
+    std::atomic<std::shared_ptr<X>> p{ std::make_shared<X>() };
+    std::shared_ptr<X> a = p.load();
+    VERIFY( a.use_count() == 2 ); // p, a
+    p.store({});
+    VERIFY( a.use_count() == 1 ); // a
+    p.store(a);
+    VERIFY( a.use_count() == 2 ); // p, a
+    std::shared_ptr<X> b = std::make_shared<X>();
+    std::shared_ptr<X> c = p.exchange(b);
+    VERIFY( a.use_count() == 2 ); // a, c
+    VERIFY( c == a );
+    VERIFY( b.use_count() == 2 ); // p, b
+    std::atomic<std::shared_ptr<X>> p2{a};
+    VERIFY( a.use_count() == 3 ); // p2, a, c
+    VERIFY( p2.compare_exchange_strong(a, b) );
+    VERIFY( a.use_count() == 2 ); // a, c
+    VERIFY( b.use_count() == 3 ); // p, p2, b
+    VERIFY ( ! p2.compare_exchange_strong(a, b) );
+    VERIFY( a == b );
+    VERIFY( a.use_count() == 4 ); // p, p2, a, b
+    VERIFY( b.use_count() == 4 );
+    VERIFY( c.use_count() == 1 ); // c
+    VERIFY( p.compare_exchange_weak(b, c) );
+    VERIFY( b.use_count() == 3 ); // p2, a, b
+    VERIFY( c.use_count() == 2 ); // p, c
+    VERIFY( ! p.compare_exchange_weak(a, b) );
+    VERIFY( a == c );
+    VERIFY( a.use_count() == 3 ); // p, a, c
+    VERIFY( b.use_count() == 2 ); // p2, b
+    VERIFY( c.use_count() == 3 ); // p, a, c
+    a.reset();
+    b.reset();
+    c.reset();
+    VERIFY( counter == 0 );
+  }
+  VERIFY( counter == 2 );
+}
+
+int
+main()
+{
+  test_is_lock_free();
+  test_atomic_shared_ptr();
+  test_wait_notify();
+  test_counting();
+}
diff --git a/libstdc++-v3/testsuite/20_util/weak_ptr/atomic_weak_ptr.cc b/libstdc++-v3/testsuite/20_util/weak_ptr/atomic_weak_ptr.cc
new file mode 100644 (file)
index 0000000..e394e55
--- /dev/null
@@ -0,0 +1,95 @@
+// { dg-options "-std=gnu++20" }
+// { dg-do run { target c++20 } }
+// { dg-require-effective-target gthreads }
+// { dg-additional-options "-pthread" { target pthread } }
+// { dg-add-options libatomic }
+
+#include <memory>
+#include <thread>
+#include <testsuite_hooks.h>
+
+void
+test_is_lock_free()
+{
+  using test_type = std::atomic<std::weak_ptr<int>>;
+  static_assert( test_type::is_always_lock_free == false );
+
+  test_type p;
+  VERIFY( p.is_lock_free() == false );
+}
+
+void
+test_atomic_weak_ptr()
+{
+  struct A { int a; int b; };
+
+  auto a = std::make_shared<A>( 0, 42 );
+  using ptr_t = std::weak_ptr<A>;
+  ptr_t wa{ a };
+  {
+    std::atomic<ptr_t> p{ };
+    VERIFY( p.load().lock().get() == nullptr );
+  }
+
+  std::atomic<ptr_t> p{ wa };
+  VERIFY( p.load().lock().get() == a.get() );
+
+  auto b = std::make_shared<A>( 42, 0 );
+  ptr_t wb{ b };
+  p.store(wb);
+  VERIFY( p.load().lock().get() != a.get() );
+  VERIFY( p.load().lock().get() == b.get() );
+  p.exchange(wa);
+  VERIFY( p.load().lock().get() != b.get() );
+  VERIFY( p.load().lock().get() == a.get() );
+
+  {
+    ptr_t aa{ a };
+    VERIFY( p.compare_exchange_strong(aa, b,
+                                     std::memory_order_seq_cst,
+                                     std::memory_order_seq_cst) == true );
+    ptr_t bb{ a };
+    VERIFY( p.compare_exchange_strong(bb, b,
+                                       std::memory_order_seq_cst,
+                                       std::memory_order_seq_cst) == false );
+    VERIFY( bb.lock().get() == b.get() );
+  }
+
+  {
+    ptr_t bb{ b };
+    VERIFY( p.compare_exchange_weak(bb, a,
+                                   std::memory_order_seq_cst,
+                                   std::memory_order_seq_cst) == true );
+    ptr_t aa{ b };
+    VERIFY( p.compare_exchange_weak(aa, a,
+                                   std::memory_order_seq_cst,
+                                   std::memory_order_seq_cst) == false );
+    VERIFY( aa.lock().get() == a.get() );
+  }
+}
+
+void
+test_wait_notify()
+{
+  std::atomic<std::weak_ptr<int>> p;
+  std::weak_ptr<int> a = std::make_shared<int>();
+  std::weak_ptr<int> b = std::make_shared<int>();
+
+  p.store(a);
+  p.wait(b);
+  std::thread t([&]
+      {
+       p.store(b);
+       p.notify_one();
+      });
+  p.wait(a);
+  t.join();
+}
+
+int
+main()
+{
+  test_is_lock_free();
+  test_atomic_weak_ptr();
+  test_wait_notify();
+}