1 /* Copyright (c) 2006, Google Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * Author: Sanjay Ghemawat
34 // For atomic operations on statistics counters, see atomic_stats_counter.h.
35 // For atomic operations on sequence numbers, see atomic_sequence_num.h.
36 // For atomic operations on reference counts, see atomic_refcount.h.
38 // Some fast atomic operations -- typically with machine-dependent
39 // implementations. This file may need editing as Google code is
40 // ported to different architectures.
42 // The routines exported by this module are subtle. If you use them, even if
43 // you get the code right, it will depend on careful reasoning about atomicity
44 // and memory ordering; it will be less readable, and harder to maintain. If
45 // you plan to use these routines, you should have a good reason, such as solid
46 // evidence that performance would otherwise suffer, or there being no
47 // alternative. You should assume only properties explicitly guaranteed by the
48 // specifications in this file. You are almost certainly _not_ writing code
49 // just for the x86; if you assume x86 semantics, x86 hardware bugs and
50 // implementations on other archtectures will cause your code to break. If you
51 // do not know what you are doing, avoid these routines, and use a Mutex.
53 // It is incorrect to make direct assignments to/from an atomic variable.
54 // You should use one of the Load or Store routines. The NoBarrier
55 // versions are provided when no barriers are needed:
58 // Although there are currently no compiler enforcement, you are encouraged
59 // to use these. Moreover, if you choose to use base::subtle::Atomic64 type,
60 // you MUST use one of the Load or Store routines to get correct behavior
61 // on 32-bit platforms.
63 // The intent is eventually to put all of these routines in namespace
66 #ifndef THREAD_ATOMICOPS_H_
67 #define THREAD_ATOMICOPS_H_
74 // ------------------------------------------------------------------------
75 // Include the platform specific implementations of the types
76 // and operations listed below. Implementations are to provide Atomic32
77 // and Atomic64 operations. If there is a mismatch between intptr_t and
78 // the Atomic32 or Atomic64 types for a platform, the platform-specific header
79 // should define the macro, AtomicWordCastType in a clause similar to the
81 // #if ...pointers are 64 bits...
82 // # define AtomicWordCastType base::subtle::Atomic64
84 // # define AtomicWordCastType Atomic32
86 // TODO(csilvers): figure out ARCH_PIII/ARCH_K8 (perhaps via ./configure?)
87 // ------------------------------------------------------------------------
89 #include "base/arm_instruction_set_select.h"
91 // TODO(csilvers): match piii, not just __i386. Also, match k8
92 #if defined(__MACH__) && defined(__APPLE__)
93 #include "base/atomicops-internals-macosx.h"
94 #elif defined(__GNUC__) && defined(ARMV6)
95 #include "base/atomicops-internals-arm-v6plus.h"
97 #include "base/atomicops-internals-arm-generic.h"
99 #include "base/atomicops-internals-windows.h"
100 #elif defined(__GNUC__) && (defined(__i386) || defined(__x86_64__))
101 #include "base/atomicops-internals-x86.h"
102 #elif defined(__linux__) && defined(__PPC__)
103 #include "base/atomicops-internals-linuxppc.h"
105 // Assume x86 for now. If you need to support a new architecture and
106 // don't know how to implement atomic ops, you can probably get away
107 // with using pthreads, since atomicops is only used by spinlock.h/cc
108 //#error You need to implement atomic operations for this architecture
109 #include "base/atomicops-internals-x86.h"
112 // Signed type that can hold a pointer and supports the atomic ops below, as
113 // well as atomic loads and stores. Instances must be naturally-aligned.
114 typedef intptr_t AtomicWord;
116 #ifdef AtomicWordCastType
117 // ------------------------------------------------------------------------
118 // This section is needed only when explicit type casting is required to
119 // cast AtomicWord to one of the basic atomic types (Atomic64 or Atomic32).
120 // It also serves to document the AtomicWord interface.
121 // ------------------------------------------------------------------------
126 // Atomically execute:
128 // if (*ptr == old_value)
132 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
133 // Always return the old value of "*ptr"
135 // This routine implies no memory barriers.
136 inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
137 AtomicWord old_value,
138 AtomicWord new_value) {
139 return NoBarrier_CompareAndSwap(
140 reinterpret_cast<volatile AtomicWordCastType*>(ptr),
141 old_value, new_value);
144 // Atomically store new_value into *ptr, returning the previous value held in
145 // *ptr. This routine implies no memory barriers.
146 inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
147 AtomicWord new_value) {
148 return NoBarrier_AtomicExchange(
149 reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
152 // Atomically increment *ptr by "increment". Returns the new value of
153 // *ptr with the increment applied. This routine implies no memory
155 inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
156 AtomicWord increment) {
157 return NoBarrier_AtomicIncrement(
158 reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
161 inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
162 AtomicWord increment) {
163 return Barrier_AtomicIncrement(
164 reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
167 // ------------------------------------------------------------------------
168 // These following lower-level operations are typically useful only to people
169 // implementing higher-level synchronization operations like spinlocks,
170 // mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
171 // a store with appropriate memory-ordering instructions. "Acquire" operations
172 // ensure that no later memory access can be reordered ahead of the operation.
173 // "Release" operations ensure that no previous memory access can be reordered
174 // after the operation. "Barrier" operations have both "Acquire" and "Release"
175 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
177 // ------------------------------------------------------------------------
178 inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
179 AtomicWord old_value,
180 AtomicWord new_value) {
181 return base::subtle::Acquire_CompareAndSwap(
182 reinterpret_cast<volatile AtomicWordCastType*>(ptr),
183 old_value, new_value);
186 inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
187 AtomicWord old_value,
188 AtomicWord new_value) {
189 return base::subtle::Release_CompareAndSwap(
190 reinterpret_cast<volatile AtomicWordCastType*>(ptr),
191 old_value, new_value);
194 inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
196 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
199 inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
200 return base::subtle::Acquire_Store(
201 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
204 inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
205 return base::subtle::Release_Store(
206 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
209 inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
210 return NoBarrier_Load(
211 reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
214 inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
215 return base::subtle::Acquire_Load(
216 reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
219 inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
220 return base::subtle::Release_Load(
221 reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
224 } // namespace base::subtle
226 #endif // AtomicWordCastType
228 // ------------------------------------------------------------------------
229 // Commented out type definitions and method declarations for documentation
230 // of the interface provided by this module.
231 // ------------------------------------------------------------------------
235 // Signed 32-bit type that supports the atomic ops below, as well as atomic
236 // loads and stores. Instances must be naturally aligned. This type differs
237 // from AtomicWord in 64-bit binaries where AtomicWord is 64-bits.
238 typedef int32_t Atomic32;
240 // Corresponding operations on Atomic32
244 // Signed 64-bit type that supports the atomic ops below, as well as atomic
245 // loads and stores. Instances must be naturally aligned. This type differs
246 // from AtomicWord in 32-bit binaries where AtomicWord is 32-bits.
247 typedef int64_t Atomic64;
249 Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
252 Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
253 Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
254 Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
256 Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
259 Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
262 void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
263 void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
264 void Release_Store(volatile Atomic32* ptr, Atomic32 value);
265 Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
266 Atomic32 Acquire_Load(volatile const Atomic32* ptr);
267 Atomic32 Release_Load(volatile const Atomic32* ptr);
269 // Corresponding operations on Atomic64
270 Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
273 Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
274 Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
275 Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
277 Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
280 Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
283 void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
284 void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
285 void Release_Store(volatile Atomic64* ptr, Atomic64 value);
286 Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
287 Atomic64 Acquire_Load(volatile const Atomic64* ptr);
288 Atomic64 Release_Load(volatile const Atomic64* ptr);
289 } // namespace base::subtle
292 void MemoryBarrier();
297 // ------------------------------------------------------------------------
298 // The following are to be deprecated when all uses have been changed to
299 // use the base::subtle namespace.
300 // ------------------------------------------------------------------------
302 #ifdef AtomicWordCastType
303 // AtomicWord versions to be deprecated
304 inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
305 AtomicWord old_value,
306 AtomicWord new_value) {
307 return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
310 inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
311 AtomicWord old_value,
312 AtomicWord new_value) {
313 return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
316 inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
317 return base::subtle::Acquire_Store(ptr, value);
320 inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
321 return base::subtle::Release_Store(ptr, value);
324 inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
325 return base::subtle::Acquire_Load(ptr);
328 inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
329 return base::subtle::Release_Load(ptr);
331 #endif // AtomicWordCastType
333 // 32-bit Acquire/Release operations to be deprecated.
335 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
337 Atomic32 new_value) {
338 return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
340 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
342 Atomic32 new_value) {
343 return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
345 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
346 base::subtle::Acquire_Store(ptr, value);
348 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
349 return base::subtle::Release_Store(ptr, value);
351 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
352 return base::subtle::Acquire_Load(ptr);
354 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
355 return base::subtle::Release_Load(ptr);
358 #ifdef BASE_HAS_ATOMIC64
360 // 64-bit Acquire/Release operations to be deprecated.
362 inline base::subtle::Atomic64 Acquire_CompareAndSwap(
363 volatile base::subtle::Atomic64* ptr,
364 base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) {
365 return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value);
367 inline base::subtle::Atomic64 Release_CompareAndSwap(
368 volatile base::subtle::Atomic64* ptr,
369 base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) {
370 return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
372 inline void Acquire_Store(
373 volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) {
374 base::subtle::Acquire_Store(ptr, value);
376 inline void Release_Store(
377 volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) {
378 return base::subtle::Release_Store(ptr, value);
380 inline base::subtle::Atomic64 Acquire_Load(
381 volatile const base::subtle::Atomic64* ptr) {
382 return base::subtle::Acquire_Load(ptr);
384 inline base::subtle::Atomic64 Release_Load(
385 volatile const base::subtle::Atomic64* ptr) {
386 return base::subtle::Release_Load(ptr);
389 #endif // BASE_HAS_ATOMIC64
391 #endif // THREAD_ATOMICOPS_H_