1 // Copyright (c) 2011, Google Inc.
2 // All rights reserved.
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // Author: Sasha Levitskiy
32 // based on atomicops-internals by Sanjay Ghemawat
34 // This file is an internal atomic implementation, use base/atomicops.h instead.
36 // This code implements ARM atomics for architectures V6 and newer.
38 #ifndef BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
39 #define BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
43 #include "base/basictypes.h" // For COMPILE_ASSERT
45 // The LDREXD and STREXD instructions in ARM all v7 variants or above. In v6,
46 // only some variants support it. For simplicity, we only use exclusive
47 // 64-bit load/store in V7 or above.
49 # define BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
52 typedef int32_t Atomic32;
57 typedef int64_t Atomic64;
59 // 32-bit low-level ops
61 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
70 // The following IT (if-then) instruction is needed for the subsequent
71 // conditional instruction STREXEQ when compiling in THUMB mode.
72 // In ARM mode, the compiler/assembler will not generate any code for it.
74 "strexeq %0, %5, [%3]\n"
75 : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
76 : "r" (ptr), "Ir" (old_value), "r" (new_value)
82 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
88 "strex %0, %3, [%2]\n"
91 : "=&r" (tmp), "=&r" (old)
92 : "r" (ptr), "r" (new_value)
97 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
100 __asm__ __volatile__(
104 "strex %0, %1, [%2]\n"
107 : "=&r" (tmp), "=&r"(res)
108 : "r" (ptr), "r"(increment)
113 inline void MemoryBarrier() {
114 __asm__ __volatile__("dmb" : : : "memory");
117 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
118 Atomic32 increment) {
120 __asm__ __volatile__(
125 "strex %0, %1, [%2]\n"
128 : "=&r" (tmp), "=&r"(res)
129 : "r" (ptr), "r"(increment)
134 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
136 Atomic32 new_value) {
137 Atomic32 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
142 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
144 Atomic32 new_value) {
146 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
149 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
153 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
158 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
163 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
167 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
168 Atomic32 value = *ptr;
173 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
178 // 64-bit versions are only available if LDREXD and STREXD instructions
180 #ifdef BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
182 #define BASE_HAS_ATOMIC64 1
184 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
186 Atomic64 new_value) {
187 Atomic64 oldval, res;
189 __asm__ __volatile__(
193 // The following IT (if-then) instructions are needed for the subsequent
194 // conditional instructions when compiling in THUMB mode.
195 // In ARM mode, the compiler/assembler will not generate any code for it.
199 "strexdeq %0, %5, [%3]\n"
200 : "=&r" (res), "=&r" (oldval), "+Q" (*ptr)
201 : "r" (ptr), "Ir" (old_value), "r" (new_value)
207 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
208 Atomic64 new_value) {
211 __asm__ __volatile__(
214 "strexd %0, %3, [%2]\n"
217 : "=&r" (store_failed), "=&r" (old)
218 : "r" (ptr), "r" (new_value)
223 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
224 Atomic64 increment) {
227 __asm__ __volatile__(
230 "adds %Q1, %Q1, %Q3\n"
231 "adc %R1, %R1, %R3\n"
232 "strexd %0, %1, [%2]\n"
235 : "=&r" (store_failed), "=&r"(res)
236 : "r" (ptr), "r"(increment)
241 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
242 Atomic64 increment) {
245 __asm__ __volatile__(
248 "adds %Q1, %Q1, %Q3\n"
249 "adc %R1, %R1, %R3\n"
251 "strexd %0, %1, [%2]\n"
254 : "=&r" (store_failed), "=&r"(res)
255 : "r" (ptr), "r"(increment)
260 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
263 __asm__ __volatile__(
265 // Dummy load to lock cache line.
267 "strexd %0, %2, [%3]\n"
270 : "=&r" (store_failed), "=&r"(dummy)
271 : "r"(value), "r" (ptr)
275 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
277 __asm__ __volatile__(
281 : "r"(ptr), "Q"(*ptr));
285 #else // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
287 inline void NotImplementedFatalError(const char *function_name) {
288 fprintf(stderr, "64-bit %s() not implemented on this platform\n",
293 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
295 Atomic64 new_value) {
296 NotImplementedFatalError("NoBarrier_CompareAndSwap");
300 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
301 Atomic64 new_value) {
302 NotImplementedFatalError("NoBarrier_AtomicExchange");
306 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
307 Atomic64 increment) {
308 NotImplementedFatalError("NoBarrier_AtomicIncrement");
312 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
313 Atomic64 increment) {
314 NotImplementedFatalError("Barrier_AtomicIncrement");
318 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
319 NotImplementedFatalError("NoBarrier_Store");
322 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
323 NotImplementedFatalError("NoBarrier_Load");
327 #endif // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
329 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
330 NoBarrier_Store(ptr, value);
334 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
336 NoBarrier_Store(ptr, value);
339 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
340 Atomic64 value = NoBarrier_Load(ptr);
345 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
347 return NoBarrier_Load(ptr);
350 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
352 Atomic64 new_value) {
353 Atomic64 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
358 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
360 Atomic64 new_value) {
362 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
365 } // namespace subtle ends
366 } // namespace base ends
368 #endif // BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_