1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file is an internal atomic implementation, use base/atomicops.h instead.
7 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
9 #ifndef BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
10 #define BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
15 // Atomically execute:
17 // if (*ptr == old_value)
21 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
22 // Always return the old value of "*ptr"
24 // This routine implies no memory barriers.
25 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
29 __asm__ __volatile__(".set push\n"
32 "ll %0, %5\n" // prev = *ptr
33 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
34 "move %2, %4\n" // tmp = new_value
35 "sc %2, %1\n" // *ptr = tmp (with atomic check)
36 "beqz %2, 1b\n" // start again on atomic error
37 "nop\n" // delay slot nop
40 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
41 : "Ir" (old_value), "r" (new_value), "m" (*ptr)
46 // Atomically store new_value into *ptr, returning the previous value held in
47 // *ptr. This routine implies no memory barriers.
48 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
51 __asm__ __volatile__(".set push\n"
54 "ll %1, %2\n" // old = *ptr
55 "move %0, %3\n" // temp = new_value
56 "sc %0, %2\n" // *ptr = temp (with atomic check)
57 "beqz %0, 1b\n" // start again on atomic error
58 "nop\n" // delay slot nop
60 : "=&r" (temp), "=&r" (old), "=m" (*ptr)
61 : "r" (new_value), "m" (*ptr)
67 // Atomically increment *ptr by "increment". Returns the new value of
68 // *ptr with the increment applied. This routine implies no memory barriers.
69 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
73 __asm__ __volatile__(".set push\n"
76 "ll %0, %2\n" // temp = *ptr
77 "addu %1, %0, %3\n" // temp2 = temp + increment
78 "sc %1, %2\n" // *ptr = temp2 (with atomic check)
79 "beqz %1, 1b\n" // start again on atomic error
80 "addu %1, %0, %3\n" // temp2 = temp + increment
82 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
83 : "Ir" (increment), "m" (*ptr)
85 // temp2 now holds the final value.
89 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
92 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
97 // "Acquire" operations
98 // ensure that no later memory access can be reordered ahead of the operation.
99 // "Release" operations ensure that no previous memory access can be reordered
100 // after the operation. "Barrier" operations have both "Acquire" and "Release"
101 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
103 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
105 Atomic32 new_value) {
106 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
111 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
113 Atomic32 new_value) {
115 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
118 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
122 inline void MemoryBarrier() {
123 __asm__ __volatile__("sync" : : : "memory");
126 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
131 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
136 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
140 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
141 Atomic32 value = *ptr;
146 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
151 } // namespace base::subtle
154 #endif // BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_