1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 // This file is an internal atomic implementation, use atomicops.h instead.
30 #ifndef V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
31 #define V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
34 #include "win32-headers.h"
39 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
42 LONG result = InterlockedCompareExchange(
43 reinterpret_cast<volatile LONG*>(ptr),
44 static_cast<LONG>(new_value),
45 static_cast<LONG>(old_value));
46 return static_cast<Atomic32>(result);
49 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
51 LONG result = InterlockedExchange(
52 reinterpret_cast<volatile LONG*>(ptr),
53 static_cast<LONG>(new_value));
54 return static_cast<Atomic32>(result);
57 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
59 return InterlockedExchangeAdd(
60 reinterpret_cast<volatile LONG*>(ptr),
61 static_cast<LONG>(increment)) + increment;
64 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
66 return Barrier_AtomicIncrement(ptr, increment);
69 #if !(defined(_MSC_VER) && _MSC_VER >= 1400)
70 #error "We require at least vs2005 for MemoryBarrier"
72 inline void MemoryBarrier() {
73 // We use MemoryBarrier from WinNT.h
77 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
80 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
83 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
86 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
89 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
93 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
94 NoBarrier_AtomicExchange(ptr, value);
95 // acts as a barrier in this implementation
98 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
99 *ptr = value; // works w/o barrier for current Intel chips as of June 2005
100 // See comments in Atomic64 version of Release_Store() below.
103 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
107 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
108 Atomic32 value = *ptr;
112 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
119 // 64-bit low-level operations on 64-bit platform.
121 STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID));
123 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
125 Atomic64 new_value) {
126 PVOID result = InterlockedCompareExchangePointer(
127 reinterpret_cast<volatile PVOID*>(ptr),
128 reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
129 return reinterpret_cast<Atomic64>(result);
132 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
133 Atomic64 new_value) {
134 PVOID result = InterlockedExchangePointer(
135 reinterpret_cast<volatile PVOID*>(ptr),
136 reinterpret_cast<PVOID>(new_value));
137 return reinterpret_cast<Atomic64>(result);
140 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
141 Atomic64 increment) {
142 return InterlockedExchangeAdd64(
143 reinterpret_cast<volatile LONGLONG*>(ptr),
144 static_cast<LONGLONG>(increment)) + increment;
147 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
148 Atomic64 increment) {
149 return Barrier_AtomicIncrement(ptr, increment);
152 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
156 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
157 NoBarrier_AtomicExchange(ptr, value);
158 // acts as a barrier in this implementation
161 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
162 *ptr = value; // works w/o barrier for current Intel chips as of June 2005
164 // When new chips come out, check:
165 // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
166 // System Programming Guide, Chatper 7: Multiple-processor management,
167 // Section 7.2, Memory Ordering.
169 // http://developer.intel.com/design/pentium4/manuals/index_new.htm
172 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
176 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
177 Atomic64 value = *ptr;
181 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
186 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
188 Atomic64 new_value) {
189 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
192 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
194 Atomic64 new_value) {
195 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
199 #endif // defined(_WIN64)
201 } } // namespace v8::internal
203 #endif // V8_ATOMICOPS_INTERNALS_X86_MSVC_H_