1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // This file is an internal atomic implementation, use base/atomicops.h instead.
7 #ifndef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
8 #define BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
10 #include "base/base_export.h"
12 // This struct is not part of the public API of this module; clients may not
13 // use it. (However, it's exported via BASE_EXPORT because clients implicitly
14 // do use it at link time by inlining these functions.)
15 // Features of this x86. Values may not be correct before main() is run,
16 // but are set conservatively.
17 struct AtomicOps_x86CPUFeatureStruct {
18 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
19 // after acquire compare-and-swap.
20 bool has_sse2; // Processor has SSE2.
22 BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct
23 AtomicOps_Internalx86CPUFeatures;
25 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
30 // 32-bit low-level operations on any platform.
32 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
36 __asm__ __volatile__("lock; cmpxchgl %1,%2"
38 : "q" (new_value), "m" (*ptr), "0" (old_value)
43 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
45 __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
47 : "m" (*ptr), "0" (new_value)
49 return new_value; // Now it's the previous value.
52 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
54 Atomic32 temp = increment;
55 __asm__ __volatile__("lock; xaddl %0,%1"
56 : "+r" (temp), "+m" (*ptr)
58 // temp now holds the old value of *ptr
59 return temp + increment;
62 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
64 Atomic32 temp = increment;
65 __asm__ __volatile__("lock; xaddl %0,%1"
66 : "+r" (temp), "+m" (*ptr)
68 // temp now holds the old value of *ptr
69 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
70 __asm__ __volatile__("lfence" : : : "memory");
72 return temp + increment;
75 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
78 Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
79 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
80 __asm__ __volatile__("lfence" : : : "memory");
85 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
88 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
91 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
95 #if defined(__x86_64__)
97 // 64-bit implementations of memory barrier can be simpler, because it
98 // "mfence" is guaranteed to exist.
99 inline void MemoryBarrier() {
100 __asm__ __volatile__("mfence" : : : "memory");
103 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
110 inline void MemoryBarrier() {
111 if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
112 __asm__ __volatile__("mfence" : : : "memory");
113 } else { // mfence is faster but not present on PIII
115 NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
119 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
120 if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
122 __asm__ __volatile__("mfence" : : : "memory");
124 NoBarrier_AtomicExchange(ptr, value);
125 // acts as a barrier on PIII
130 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
131 ATOMICOPS_COMPILER_BARRIER();
132 *ptr = value; // An x86 store acts as a release barrier.
133 // See comments in Atomic64 version of Release_Store(), below.
136 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
140 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
141 Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
142 // See comments in Atomic64 version of Release_Store(), below.
143 ATOMICOPS_COMPILER_BARRIER();
147 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
152 #if defined(__x86_64__)
154 // 64-bit low-level operations on 64-bit platform.
156 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
158 Atomic64 new_value) {
160 __asm__ __volatile__("lock; cmpxchgq %1,%2"
162 : "q" (new_value), "m" (*ptr), "0" (old_value)
167 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
168 Atomic64 new_value) {
169 __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
171 : "m" (*ptr), "0" (new_value)
173 return new_value; // Now it's the previous value.
176 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
177 Atomic64 increment) {
178 Atomic64 temp = increment;
179 __asm__ __volatile__("lock; xaddq %0,%1"
180 : "+r" (temp), "+m" (*ptr)
182 // temp now contains the previous value of *ptr
183 return temp + increment;
186 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
187 Atomic64 increment) {
188 Atomic64 temp = increment;
189 __asm__ __volatile__("lock; xaddq %0,%1"
190 : "+r" (temp), "+m" (*ptr)
192 // temp now contains the previous value of *ptr
193 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
194 __asm__ __volatile__("lfence" : : : "memory");
196 return temp + increment;
199 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
203 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
208 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
209 ATOMICOPS_COMPILER_BARRIER();
211 *ptr = value; // An x86 store acts as a release barrier
212 // for current AMD/Intel chips as of Jan 2008.
213 // See also Acquire_Load(), below.
215 // When new chips come out, check:
216 // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
217 // System Programming Guide, Chatper 7: Multiple-processor management,
218 // Section 7.2, Memory Ordering.
220 // http://developer.intel.com/design/pentium4/manuals/index_new.htm
222 // x86 stores/loads fail to act as barriers for a few instructions (clflush
223 // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
224 // not generated by the compiler, and are rare. Users of these instructions
225 // need to know about cache behaviour in any case since all of these involve
226 // either flushing cache lines or non-temporal cache hints.
229 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
233 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
234 Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
235 // for current AMD/Intel chips as of Jan 2008.
236 // See also Release_Store(), above.
237 ATOMICOPS_COMPILER_BARRIER();
241 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
246 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
248 Atomic64 new_value) {
249 Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
250 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
251 __asm__ __volatile__("lfence" : : : "memory");
256 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
258 Atomic64 new_value) {
259 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
262 #endif // defined(__x86_64__)
264 } // namespace base::subtle
267 #undef ATOMICOPS_COMPILER_BARRIER
269 #endif // BASE_ATOMICOPS_INTERNALS_X86_GCC_H_