1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file is an internal atomic implementation, use atomicops.h instead.
7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
8 #define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
13 inline void MemoryBarrier() {
14 __asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT
17 // NoBarrier versions of the operation include "memory" in the clobber list.
18 // This is not required for direct usage of the NoBarrier versions of the
19 // operations. However this is required for correctness when they are used as
20 // part of the Acquire or Release versions, to ensure that nothing from outside
21 // the call is reordered between the operation and the memory barrier. This does
22 // not change the code generated, so has no or minimal impact on the
23 // NoBarrier operations.
25 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
31 __asm__ __volatile__ ( // NOLINT
33 "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
34 "cmp %w[prev], %w[old_value] \n\t"
36 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
37 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
42 : [old_value]"IJr" (old_value),
43 [new_value]"r" (new_value)
50 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
55 __asm__ __volatile__ ( // NOLINT
57 "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
58 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
59 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
60 : [result]"=&r" (result),
63 : [new_value]"r" (new_value)
70 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
75 __asm__ __volatile__ ( // NOLINT
77 "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
78 "add %w[result], %w[result], %w[increment]\n\t"
79 "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
80 "cbnz %w[temp], 0b \n\t" // Retry on failure.
81 : [result]"=&r" (result),
84 : [increment]"IJr" (increment)
91 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
96 result = NoBarrier_AtomicIncrement(ptr, increment);
102 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
104 Atomic32 new_value) {
107 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
113 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
115 Atomic32 new_value) {
119 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
124 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
128 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
132 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
137 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
138 __asm__ __volatile__ ( // NOLINT
139 "stlr %w[value], %[ptr] \n\t"
146 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
150 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
154 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
157 __asm__ __volatile__ ( // NOLINT
158 "ldar %w[value], %[ptr] \n\t"
159 : [value]"=r" (value)
167 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
172 // 64-bit versions of the operations.
173 // See the 32-bit versions for comments.
175 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
177 Atomic64 new_value) {
181 __asm__ __volatile__ ( // NOLINT
183 "ldxr %[prev], %[ptr] \n\t"
184 "cmp %[prev], %[old_value] \n\t"
186 "stxr %w[temp], %[new_value], %[ptr] \n\t"
187 "cbnz %w[temp], 0b \n\t"
189 : [prev]"=&r" (prev),
192 : [old_value]"IJr" (old_value),
193 [new_value]"r" (new_value)
200 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
201 Atomic64 new_value) {
205 __asm__ __volatile__ ( // NOLINT
207 "ldxr %[result], %[ptr] \n\t"
208 "stxr %w[temp], %[new_value], %[ptr] \n\t"
209 "cbnz %w[temp], 0b \n\t"
210 : [result]"=&r" (result),
213 : [new_value]"r" (new_value)
220 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
221 Atomic64 increment) {
225 __asm__ __volatile__ ( // NOLINT
227 "ldxr %[result], %[ptr] \n\t"
228 "add %[result], %[result], %[increment] \n\t"
229 "stxr %w[temp], %[result], %[ptr] \n\t"
230 "cbnz %w[temp], 0b \n\t"
231 : [result]"=&r" (result),
234 : [increment]"IJr" (increment)
241 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
242 Atomic64 increment) {
246 result = NoBarrier_AtomicIncrement(ptr, increment);
252 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
254 Atomic64 new_value) {
257 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
263 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
265 Atomic64 new_value) {
269 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
274 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
278 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
283 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
284 __asm__ __volatile__ ( // NOLINT
285 "stlr %x[value], %[ptr] \n\t"
292 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
296 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
299 __asm__ __volatile__ ( // NOLINT
300 "ldar %x[value], %[ptr] \n\t"
301 : [value]"=r" (value)
309 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
317 #endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_