1 // Copyright (c) 2011, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Sasha Levitskiy
32 // based on atomicops-internals by Sanjay Ghemawat
34 // This file is an internal atomic implementation, use base/atomicops.h instead.
36 // This code implements ARM atomics for architectures V6 and newer.
38 #ifndef BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
39 #define BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
43 #include "base/abort.h"
44 #include "base/basictypes.h" // For COMPILE_ASSERT
46 // The LDREXD and STREXD instructions in ARM all v7 variants or above. In v6,
47 // only some variants support it. For simplicity, we only use exclusive
48 // 64-bit load/store in V7 or above.
50 # define BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
53 typedef int32_t Atomic32;
58 typedef int64_t Atomic64;
60 // 32-bit low-level ops
62 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
71 // The following IT (if-then) instruction is needed for the subsequent
72 // conditional instruction STREXEQ when compiling in THUMB mode.
73 // In ARM mode, the compiler/assembler will not generate any code for it.
75 "strexeq %0, %5, [%3]\n"
76 : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
77 : "r" (ptr), "Ir" (old_value), "r" (new_value)
83 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
89 "strex %0, %3, [%2]\n"
92 : "=&r" (tmp), "=&r" (old)
93 : "r" (ptr), "r" (new_value)
98 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
101 __asm__ __volatile__(
105 "strex %0, %1, [%2]\n"
108 : "=&r" (tmp), "=&r"(res)
109 : "r" (ptr), "r"(increment)
114 inline void MemoryBarrier() {
115 __asm__ __volatile__("dmb" : : : "memory");
118 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
119 Atomic32 increment) {
121 __asm__ __volatile__(
126 "strex %0, %1, [%2]\n"
129 : "=&r" (tmp), "=&r"(res)
130 : "r" (ptr), "r"(increment)
135 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
137 Atomic32 new_value) {
138 Atomic32 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
143 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
145 Atomic32 new_value) {
147 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
150 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
154 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
159 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
164 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
168 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
169 Atomic32 value = *ptr;
174 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
179 // 64-bit versions are only available if LDREXD and STREXD instructions
181 #ifdef BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
183 #define BASE_HAS_ATOMIC64 1
185 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
187 Atomic64 new_value) {
188 Atomic64 oldval, res;
190 __asm__ __volatile__(
194 // The following IT (if-then) instructions are needed for the subsequent
195 // conditional instructions when compiling in THUMB mode.
196 // In ARM mode, the compiler/assembler will not generate any code for it.
200 "strexdeq %0, %5, [%3]\n"
201 : "=&r" (res), "=&r" (oldval), "+Q" (*ptr)
202 : "r" (ptr), "Ir" (old_value), "r" (new_value)
208 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
209 Atomic64 new_value) {
212 __asm__ __volatile__(
215 "strexd %0, %3, [%2]\n"
218 : "=&r" (store_failed), "=&r" (old)
219 : "r" (ptr), "r" (new_value)
224 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
225 Atomic64 increment) {
228 __asm__ __volatile__(
231 "adds %Q1, %Q1, %Q3\n"
232 "adc %R1, %R1, %R3\n"
233 "strexd %0, %1, [%2]\n"
236 : "=&r" (store_failed), "=&r"(res)
237 : "r" (ptr), "r"(increment)
242 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
243 Atomic64 increment) {
246 __asm__ __volatile__(
249 "adds %Q1, %Q1, %Q3\n"
250 "adc %R1, %R1, %R3\n"
252 "strexd %0, %1, [%2]\n"
255 : "=&r" (store_failed), "=&r"(res)
256 : "r" (ptr), "r"(increment)
261 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
264 __asm__ __volatile__(
266 // Dummy load to lock cache line.
268 "strexd %0, %2, [%3]\n"
271 : "=&r" (store_failed), "=&r"(dummy)
272 : "r"(value), "r" (ptr)
276 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
278 __asm__ __volatile__(
282 : "r"(ptr), "Q"(*ptr));
286 #else // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
288 inline void NotImplementedFatalError(const char *function_name) {
289 fprintf(stderr, "64-bit %s() not implemented on this platform\n",
294 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
296 Atomic64 new_value) {
297 NotImplementedFatalError("NoBarrier_CompareAndSwap");
301 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
302 Atomic64 new_value) {
303 NotImplementedFatalError("NoBarrier_AtomicExchange");
307 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
308 Atomic64 increment) {
309 NotImplementedFatalError("NoBarrier_AtomicIncrement");
313 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
314 Atomic64 increment) {
315 NotImplementedFatalError("Barrier_AtomicIncrement");
319 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
320 NotImplementedFatalError("NoBarrier_Store");
323 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
324 NotImplementedFatalError("NoBarrier_Load");
328 #endif // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
330 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
331 NoBarrier_Store(ptr, value);
335 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
337 NoBarrier_Store(ptr, value);
340 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
341 Atomic64 value = NoBarrier_Load(ptr);
346 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
348 return NoBarrier_Load(ptr);
351 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
353 Atomic64 new_value) {
354 Atomic64 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
359 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
361 Atomic64 new_value) {
363 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
366 } // namespace subtle ends
367 } // namespace base ends
369 #endif // BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_