1 /* Copyright (c) 2008, Google Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 // Implementation of atomic operations for ppc-linux. This file should not
34 // be included directly. Clients should instead include
35 // "base/atomicops.h".
37 #ifndef BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_
38 #define BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_
40 typedef int32_t Atomic32;
43 #define BASE_HAS_ATOMIC64 1
49 static inline void _sync(void) {
50 __asm__ __volatile__("sync": : : "memory");
53 static inline void _lwsync(void) {
54 // gcc defines __NO_LWSYNC__ when appropriate; see
55 // http://gcc.gnu.org/ml/gcc-patches/2006-11/msg01238.html
57 __asm__ __volatile__("msync": : : "memory");
59 __asm__ __volatile__("lwsync": : : "memory");
63 static inline void _isync(void) {
64 __asm__ __volatile__("isync": : : "memory");
67 static inline Atomic32 OSAtomicAdd32(Atomic32 amount, Atomic32 *value) {
74 : "=&r" (t), "+m" (*value)
75 : "r" (amount), "r" (value)
80 static inline Atomic32 OSAtomicAdd32Barrier(Atomic32 amount, Atomic32 *value) {
83 t = OSAtomicAdd32(amount, value);
84 // This is based on the code snippet in the architecture manual (Vol
85 // 2, Appendix B). It's a little tricky: correctness depends on the
86 // fact that the code right before this (in OSAtomicAdd32) has a
87 // conditional branch with a data dependency on the update.
88 // Otherwise, we'd have to use sync.
93 static inline bool OSAtomicCompareAndSwap32(Atomic32 old_value,
104 : "=&r" (prev), "+m" (*value)
105 : "r" (value), "r" (old_value), "r" (new_value)
107 return prev == old_value;
110 static inline Atomic32 OSAtomicCompareAndSwap32Acquire(Atomic32 old_value,
114 t = OSAtomicCompareAndSwap32(old_value, new_value, value);
115 // This is based on the code snippet in the architecture manual (Vol
116 // 2, Appendix B). It's a little tricky: correctness depends on the
117 // fact that the code right before this (in
118 // OSAtomicCompareAndSwap32) has a conditional branch with a data
119 // dependency on the update. Otherwise, we'd have to use sync.
124 static inline Atomic32 OSAtomicCompareAndSwap32Release(Atomic32 old_value,
128 return OSAtomicCompareAndSwap32(old_value, new_value, value);
131 typedef int64_t Atomic64;
133 inline void MemoryBarrier() {
134 // This can't be _lwsync(); we need to order the immediately
135 // preceding stores against any load that may follow, but lwsync
136 // doesn't guarantee that.
142 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
144 Atomic32 new_value) {
147 if (OSAtomicCompareAndSwap32(old_value, new_value,
148 const_cast<Atomic32*>(ptr))) {
152 } while (prev_value == old_value);
156 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
157 Atomic32 new_value) {
161 } while (!OSAtomicCompareAndSwap32(old_value, new_value,
162 const_cast<Atomic32*>(ptr)));
166 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
167 Atomic32 increment) {
168 return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
171 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
172 Atomic32 increment) {
173 return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
176 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
178 Atomic32 new_value) {
181 if (OSAtomicCompareAndSwap32Acquire(old_value, new_value,
182 const_cast<Atomic32*>(ptr))) {
186 } while (prev_value == old_value);
190 inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
192 Atomic32 new_value) {
195 if (OSAtomicCompareAndSwap32Release(old_value, new_value,
196 const_cast<Atomic32*>(ptr))) {
200 } while (prev_value == old_value);
208 static inline Atomic64 OSAtomicAdd64(Atomic64 amount, Atomic64 *value) {
210 __asm__ __volatile__(
215 : "=&r" (t), "+m" (*value)
216 : "r" (amount), "r" (value)
221 static inline Atomic64 OSAtomicAdd64Barrier(Atomic64 amount, Atomic64 *value) {
224 t = OSAtomicAdd64(amount, value);
225 // This is based on the code snippet in the architecture manual (Vol
226 // 2, Appendix B). It's a little tricky: correctness depends on the
227 // fact that the code right before this (in OSAtomicAdd64) has a
228 // conditional branch with a data dependency on the update.
229 // Otherwise, we'd have to use sync.
234 static inline bool OSAtomicCompareAndSwap64(Atomic64 old_value,
238 __asm__ __volatile__(
245 : "=&r" (prev), "+m" (*value)
246 : "r" (value), "r" (old_value), "r" (new_value)
248 return prev == old_value;
251 static inline Atomic64 OSAtomicCompareAndSwap64Acquire(Atomic64 old_value,
255 t = OSAtomicCompareAndSwap64(old_value, new_value, value);
256 // This is based on the code snippet in the architecture manual (Vol
257 // 2, Appendix B). It's a little tricky: correctness depends on the
258 // fact that the code right before this (in
259 // OSAtomicCompareAndSwap64) has a conditional branch with a data
260 // dependency on the update. Otherwise, we'd have to use sync.
265 static inline Atomic64 OSAtomicCompareAndSwap64Release(Atomic64 old_value,
269 return OSAtomicCompareAndSwap64(old_value, new_value, value);
273 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
275 Atomic64 new_value) {
278 if (OSAtomicCompareAndSwap64(old_value, new_value,
279 const_cast<Atomic64*>(ptr))) {
283 } while (prev_value == old_value);
287 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
288 Atomic64 new_value) {
292 } while (!OSAtomicCompareAndSwap64(old_value, new_value,
293 const_cast<Atomic64*>(ptr)));
297 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
298 Atomic64 increment) {
299 return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
302 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
303 Atomic64 increment) {
304 return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
307 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
309 Atomic64 new_value) {
312 if (OSAtomicCompareAndSwap64Acquire(old_value, new_value,
313 const_cast<Atomic64*>(ptr))) {
317 } while (prev_value == old_value);
321 inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
323 Atomic64 new_value) {
326 if (OSAtomicCompareAndSwap64Release(old_value, new_value,
327 const_cast<Atomic64*>(ptr))) {
331 } while (prev_value == old_value);
337 inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
341 inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
343 // This can't be _lwsync(); we need to order the immediately
344 // preceding stores against any load that may follow, but lwsync
345 // doesn't guarantee that.
349 inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
354 inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
358 inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
359 Atomic32 value = *ptr;
364 inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
365 // This can't be _lwsync(); we need to order the immediately
366 // preceding stores against any load that may follow, but lwsync
367 // doesn't guarantee that.
376 inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
380 inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
382 // This can't be _lwsync(); we need to order the immediately
383 // preceding stores against any load that may follow, but lwsync
384 // doesn't guarantee that.
388 inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
393 inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
397 inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
398 Atomic64 value = *ptr;
403 inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
404 // This can't be _lwsync(); we need to order the immediately
405 // preceding stores against any load that may follow, but lwsync
406 // doesn't guarantee that.
413 } // namespace base::subtle
416 #endif // BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_