1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 // The routines exported by this module are subtle. If you use them, even if
29 // you get the code right, it will depend on careful reasoning about atomicity
30 // and memory ordering; it will be less readable, and harder to maintain. If
31 // you plan to use these routines, you should have a good reason, such as solid
32 // evidence that performance would otherwise suffer, or there being no
33 // alternative. You should assume only properties explicitly guaranteed by the
34 // specifications in this file. You are almost certainly _not_ writing code
35 // just for the x86; if you assume x86 semantics, x86 hardware bugs and
36 // implementations on other archtectures will cause your code to break. If you
37 // do not know what you are doing, avoid these routines, and use a Mutex.
39 // It is incorrect to make direct assignments to/from an atomic variable.
40 // You should use one of the Load or Store routines. The NoBarrier
41 // versions are provided when no barriers are needed:
44 // Although there are currently no compiler enforcement, you are encouraged
48 #ifndef V8_ATOMICOPS_H_
49 #define V8_ATOMICOPS_H_
51 #include "../include/v8.h"
57 typedef int32_t Atomic32;
58 #ifdef V8_HOST_ARCH_64_BIT
59 // We need to be able to go between Atomic64 and AtomicWord implicitly. This
60 // means Atomic64 and AtomicWord should be the same type on 64-bit.
61 #if defined(__APPLE__)
62 // MacOS is an exception to the implicit conversion rule above,
63 // because it uses long for intptr_t.
64 typedef int64_t Atomic64;
66 typedef intptr_t Atomic64;
70 // Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
71 // Atomic64 routines below, depending on your architecture.
72 typedef intptr_t AtomicWord;
74 // Atomically execute:
76 // if (*ptr == old_value)
80 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
81 // Always return the old value of "*ptr"
83 // This routine implies no memory barriers.
84 Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
88 // Atomically store new_value into *ptr, returning the previous value held in
89 // *ptr. This routine implies no memory barriers.
90 Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
92 // Atomically increment *ptr by "increment". Returns the new value of
93 // *ptr with the increment applied. This routine implies no memory barriers.
94 Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
96 Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
99 // These following lower-level operations are typically useful only to people
100 // implementing higher-level synchronization operations like spinlocks,
101 // mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
102 // a store with appropriate memory-ordering instructions. "Acquire" operations
103 // ensure that no later memory access can be reordered ahead of the operation.
104 // "Release" operations ensure that no previous memory access can be reordered
105 // after the operation. "Barrier" operations have both "Acquire" and "Release"
106 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
108 Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
111 Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
115 void MemoryBarrier();
116 void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
117 void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
118 void Release_Store(volatile Atomic32* ptr, Atomic32 value);
120 Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
121 Atomic32 Acquire_Load(volatile const Atomic32* ptr);
122 Atomic32 Release_Load(volatile const Atomic32* ptr);
124 // 64-bit atomic operations (only available on 64-bit processors).
125 #ifdef V8_HOST_ARCH_64_BIT
126 Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
129 Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
130 Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
131 Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
133 Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
136 Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
139 void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
140 void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
141 void Release_Store(volatile Atomic64* ptr, Atomic64 value);
142 Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
143 Atomic64 Acquire_Load(volatile const Atomic64* ptr);
144 Atomic64 Release_Load(volatile const Atomic64* ptr);
145 #endif // V8_HOST_ARCH_64_BIT
147 } } // namespace v8::internal
149 // Include our platform specific implementation.
150 #if defined(_MSC_VER) && \
151 (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
152 #include "atomicops_internals_x86_msvc.h"
153 #elif defined(__APPLE__) && \
154 (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
155 #include "atomicops_internals_x86_macosx.h"
156 #elif defined(__GNUC__) && \
157 (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
158 #include "atomicops_internals_x86_gcc.h"
159 #elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
160 // We need special handling for QNX as the existing code in
161 // atomicops_internals_arm_gcc.h is actually Linux-specific. This is due to
162 // it using a magic hard-wired function address for LinuxKernelCmpxchgFunc.
163 // The QNX implementation uses the equivalent system call for that platform
164 // but is not source compatible.
165 #if defined(__QNXNTO__)
166 #include "atomicops_internals_arm_qnx.h"
168 #include "atomicops_internals_arm_gcc.h"
170 #elif defined(__GNUC__) && defined(V8_HOST_ARCH_MIPS)
171 #include "atomicops_internals_mips_gcc.h"
173 #error "Atomic operations are not supported on your platform"
176 #endif // V8_ATOMICOPS_H_