1 /* Copyright (c) 2006, Google Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Implementation of atomic operations for Mac OS X. This file should not
32 // be included directly. Clients should instead include
33 // "base/atomicops.h".
35 #ifndef BASE_ATOMICOPS_INTERNALS_MACOSX_H_
36 #define BASE_ATOMICOPS_INTERNALS_MACOSX_H_
38 typedef int32_t Atomic32;
40 // MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
41 // on the Mac, even when they are the same size. Similarly, on __ppc64__,
42 // AtomicWord and Atomic64 are always different. Thus, we need explicit
45 #define AtomicWordCastType base::subtle::Atomic64
47 #define AtomicWordCastType Atomic32
50 #if defined(__LP64__) || defined(__i386__)
51 #define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic*
54 #include <libkern/OSAtomic.h>
59 #if !defined(__LP64__) && defined(__ppc__)
61 // The Mac 64-bit OSAtomic implementations are not available for 32-bit PowerPC,
62 // while the underlying assembly instructions are available only some
63 // implementations of PowerPC.
65 // The following inline functions will fail with the error message at compile
66 // time ONLY IF they are called. So it is safe to use this header if user
67 // code only calls AtomicWord and Atomic32 operations.
69 // NOTE(vchen): Implementation notes to implement the atomic ops below may
70 // be found in "PowerPC Virtual Environment Architecture, Book II,
71 // Version 2.02", January 28, 2005, Appendix B, page 46. Unfortunately,
72 // extra care must be taken to ensure data are properly 8-byte aligned, and
73 // that data are returned correctly according to Mac OS X ABI specs.
75 inline int64_t OSAtomicCompareAndSwap64(
76 int64_t oldValue, int64_t newValue, int64_t *theValue) {
78 "_OSAtomicCompareAndSwap64_not_supported_for_32_bit_ppc\n\t");
82 inline int64_t OSAtomicAdd64(int64_t theAmount, int64_t *theValue) {
84 "_OSAtomicAdd64_not_supported_for_32_bit_ppc\n\t");
88 inline int64_t OSAtomicCompareAndSwap64Barrier(
89 int64_t oldValue, int64_t newValue, int64_t *theValue) {
90 int64_t prev = OSAtomicCompareAndSwap64(oldValue, newValue, theValue);
95 inline int64_t OSAtomicAdd64Barrier(
96 int64_t theAmount, int64_t *theValue) {
97 int64_t new_val = OSAtomicAdd64(theAmount, theValue);
103 typedef int64_t Atomic64;
105 inline void MemoryBarrier() {
111 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
113 Atomic32 new_value) {
116 if (OSAtomicCompareAndSwap32(old_value, new_value,
117 const_cast<Atomic32*>(ptr))) {
121 } while (prev_value == old_value);
125 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
126 Atomic32 new_value) {
130 } while (!OSAtomicCompareAndSwap32(old_value, new_value,
131 const_cast<Atomic32*>(ptr)));
135 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
136 Atomic32 increment) {
137 return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
140 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
141 Atomic32 increment) {
142 return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
145 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
147 Atomic32 new_value) {
150 if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
151 const_cast<Atomic32*>(ptr))) {
155 } while (prev_value == old_value);
159 inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
161 Atomic32 new_value) {
162 return Acquire_CompareAndSwap(ptr, old_value, new_value);
165 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
169 inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
174 inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
179 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
183 inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
184 Atomic32 value = *ptr;
189 inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
196 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
198 Atomic64 new_value) {
201 if (OSAtomicCompareAndSwap64(old_value, new_value,
202 const_cast<Atomic64*>(ptr))) {
206 } while (prev_value == old_value);
210 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
211 Atomic64 new_value) {
215 } while (!OSAtomicCompareAndSwap64(old_value, new_value,
216 const_cast<Atomic64*>(ptr)));
220 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
221 Atomic64 increment) {
222 return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
225 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
226 Atomic64 increment) {
227 return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
230 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
232 Atomic64 new_value) {
235 if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
236 const_cast<Atomic64*>(ptr))) {
240 } while (prev_value == old_value);
244 inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
246 Atomic64 new_value) {
247 // The lib kern interface does not distinguish between
248 // Acquire and Release memory barriers; they are equivalent.
249 return Acquire_CompareAndSwap(ptr, old_value, new_value);
254 // 64-bit implementation on 64-bit platform
256 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
260 inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
265 inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
270 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
274 inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
275 Atomic64 value = *ptr;
280 inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
287 // 64-bit implementation on 32-bit platform
291 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
292 __asm__ __volatile__(
293 "_NoBarrier_Store_not_supported_for_32_bit_ppc\n\t");
296 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
297 __asm__ __volatile__(
298 "_NoBarrier_Load_not_supported_for_32_bit_ppc\n\t");
302 #elif defined(__i386__)
304 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
305 __asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
306 "movq %%mm0, %0\n\t" // moves (ptr could be read-only)
307 "emms\n\t" // Reset FP registers
310 : // mark the FP stack and mmx registers as clobbered
311 "st", "st(1)", "st(2)", "st(3)", "st(4)",
312 "st(5)", "st(6)", "st(7)", "mm0", "mm1",
313 "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
317 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
319 __asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
320 "movq %%mm0, %0\n\t" // moves (ptr could be read-only)
321 "emms\n\t" // Reset FP registers
324 : // mark the FP stack and mmx registers as clobbered
325 "st", "st(1)", "st(2)", "st(3)", "st(4)",
326 "st(5)", "st(6)", "st(7)", "mm0", "mm1",
327 "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
334 inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
335 NoBarrier_Store(ptr, value);
339 inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
341 NoBarrier_Store(ptr, value);
344 inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
345 Atomic64 value = NoBarrier_Load(ptr);
350 inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
352 return NoBarrier_Load(ptr);
356 } // namespace base::subtle
359 #endif // BASE_ATOMICOPS_INTERNALS_MACOSX_H_