1 // Copyright 2014 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file is an internal atomic implementation, use atomicops.h instead.
7 // This implementation uses C++11 atomics' member functions. The code base is
8 // currently written assuming atomicity revolves around accesses instead of
9 // C++11's memory locations. The burden is on the programmer to ensure that all
10 // memory locations accessed atomically are never accessed non-atomically (tsan
11 // should help with this).
13 // TODO(jfb) Modify the atomicops.h API and user code to declare atomic
14 // locations as truly atomic. See the static_assert below.
16 // Of note in this implementation:
17 // * All NoBarrier variants are implemented as relaxed.
18 // * All Barrier variants are implemented as sequentially-consistent.
19 // * Compare exchange's failure ordering is always the same as the success one
20 // (except for release, which fails as relaxed): using a weaker ordering is
21 // only valid under certain uses of compare exchange.
22 // * Atomic increment is expected to return the post-incremented value, whereas
23 // C11 fetch add returns the previous value. The implementation therefore
24 // needs to increment twice (which the compiler should be able to detect and
27 #ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
28 #define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
32 #include "base/numerics/wrapping_math.h"
33 #include "build/build_config.h"
38 // This implementation is transitional and maintains the original API for
39 // atomicops.h. This requires casting memory locations to the atomic types, and
40 // assumes that the API and the C++11 implementation are layout-compatible,
41 // which isn't true for all implementations or hardware platforms. The static
42 // assertion should detect this issue, were it to fire then this header
45 // TODO(jfb) If this header manages to stay committed then the API should be
46 // modified, and all call sites updated.
47 typedef volatile std::atomic<Atomic32>* AtomicLocation32;
48 static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),
49 "incompatible 32-bit atomic layout");
51 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
54 ((AtomicLocation32)ptr)
55 ->compare_exchange_strong(old_value,
57 std::memory_order_relaxed,
58 std::memory_order_relaxed);
62 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
64 return ((AtomicLocation32)ptr)
65 ->exchange(new_value, std::memory_order_relaxed);
68 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
70 return base::WrappingAdd(
71 ((AtomicLocation32)ptr)->fetch_add(increment, std::memory_order_relaxed),
75 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
77 return base::WrappingAdd(((AtomicLocation32)ptr)->fetch_add(increment),
81 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
84 ((AtomicLocation32)ptr)
85 ->compare_exchange_strong(old_value,
87 std::memory_order_acquire,
88 std::memory_order_acquire);
92 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
95 ((AtomicLocation32)ptr)
96 ->compare_exchange_strong(old_value,
98 std::memory_order_release,
99 std::memory_order_relaxed);
103 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
104 ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
107 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
108 ((AtomicLocation32)ptr)->store(value, std::memory_order_release);
111 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
112 return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
115 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
116 return ((AtomicLocation32)ptr)->load(std::memory_order_acquire);
119 #if defined(ARCH_CPU_64_BITS)
121 using AtomicU64 = std::make_unsigned_t<Atomic64>;
123 typedef volatile std::atomic<Atomic64>* AtomicLocation64;
124 static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),
125 "incompatible 64-bit atomic layout");
127 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
129 Atomic64 new_value) {
130 ((AtomicLocation64)ptr)
131 ->compare_exchange_strong(old_value,
133 std::memory_order_relaxed,
134 std::memory_order_relaxed);
138 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
139 Atomic64 new_value) {
140 return ((AtomicLocation64)ptr)
141 ->exchange(new_value, std::memory_order_relaxed);
144 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
145 Atomic64 increment) {
146 return base::WrappingAdd(
147 ((AtomicLocation64)ptr)->fetch_add(increment, std::memory_order_relaxed),
151 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
152 Atomic64 increment) {
153 return base::WrappingAdd(((AtomicLocation64)ptr)->fetch_add(increment),
157 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
159 Atomic64 new_value) {
160 ((AtomicLocation64)ptr)
161 ->compare_exchange_strong(old_value,
163 std::memory_order_acquire,
164 std::memory_order_acquire);
168 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
170 Atomic64 new_value) {
171 ((AtomicLocation64)ptr)
172 ->compare_exchange_strong(old_value,
174 std::memory_order_release,
175 std::memory_order_relaxed);
179 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
180 ((AtomicLocation64)ptr)->store(value, std::memory_order_release);
183 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
184 return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
187 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
188 return ((AtomicLocation64)ptr)->load(std::memory_order_acquire);
191 #endif // defined(ARCH_CPU_64_BITS)
192 } // namespace subtle
195 #endif // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_