fixup! [M120 Migration] Notify media device state to webbrowser
[platform/framework/web/chromium-efl.git] / base / atomicops_internals_portable.h
1 // Copyright 2014 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // This file is an internal atomic implementation, use atomicops.h instead.
6 //
7 // This implementation uses C++11 atomics' member functions. The code base is
8 // currently written assuming atomicity revolves around accesses instead of
9 // C++11's memory locations. The burden is on the programmer to ensure that all
10 // memory locations accessed atomically are never accessed non-atomically (tsan
11 // should help with this).
12 //
13 // TODO(jfb) Modify the atomicops.h API and user code to declare atomic
14 //           locations as truly atomic. See the static_assert below.
15 //
16 // Of note in this implementation:
17 //  * All NoBarrier variants are implemented as relaxed.
18 //  * All Barrier variants are implemented as sequentially-consistent.
19 //  * Compare exchange's failure ordering is always the same as the success one
20 //    (except for release, which fails as relaxed): using a weaker ordering is
21 //    only valid under certain uses of compare exchange.
22 //  * Atomic increment is expected to return the post-incremented value, whereas
23 //    C11 fetch add returns the previous value. The implementation therefore
24 //    needs to increment twice (which the compiler should be able to detect and
25 //    optimize).
26
27 #ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
28 #define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
29
30 #include <atomic>
31
32 #include "base/numerics/wrapping_math.h"
33 #include "build/build_config.h"
34
35 namespace base {
36 namespace subtle {
37
38 // This implementation is transitional and maintains the original API for
39 // atomicops.h. This requires casting memory locations to the atomic types, and
40 // assumes that the API and the C++11 implementation are layout-compatible,
41 // which isn't true for all implementations or hardware platforms. The static
42 // assertion should detect this issue, were it to fire then this header
43 // shouldn't be used.
44 //
45 // TODO(jfb) If this header manages to stay committed then the API should be
46 //           modified, and all call sites updated.
47 typedef volatile std::atomic<Atomic32>* AtomicLocation32;
48 static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),
49               "incompatible 32-bit atomic layout");
50
51 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
52                                          Atomic32 old_value,
53                                          Atomic32 new_value) {
54   ((AtomicLocation32)ptr)
55       ->compare_exchange_strong(old_value,
56                                 new_value,
57                                 std::memory_order_relaxed,
58                                 std::memory_order_relaxed);
59   return old_value;
60 }
61
62 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
63                                          Atomic32 new_value) {
64   return ((AtomicLocation32)ptr)
65       ->exchange(new_value, std::memory_order_relaxed);
66 }
67
68 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
69                                           Atomic32 increment) {
70   return base::WrappingAdd(
71       ((AtomicLocation32)ptr)->fetch_add(increment, std::memory_order_relaxed),
72       increment);
73 }
74
75 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
76                                         Atomic32 increment) {
77   return base::WrappingAdd(((AtomicLocation32)ptr)->fetch_add(increment),
78                            increment);
79 }
80
81 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
82                                        Atomic32 old_value,
83                                        Atomic32 new_value) {
84   ((AtomicLocation32)ptr)
85       ->compare_exchange_strong(old_value,
86                                 new_value,
87                                 std::memory_order_acquire,
88                                 std::memory_order_acquire);
89   return old_value;
90 }
91
92 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
93                                        Atomic32 old_value,
94                                        Atomic32 new_value) {
95   ((AtomicLocation32)ptr)
96       ->compare_exchange_strong(old_value,
97                                 new_value,
98                                 std::memory_order_release,
99                                 std::memory_order_relaxed);
100   return old_value;
101 }
102
103 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
104   ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
105 }
106
107 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
108   ((AtomicLocation32)ptr)->store(value, std::memory_order_release);
109 }
110
111 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
112   return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
113 }
114
115 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
116   return ((AtomicLocation32)ptr)->load(std::memory_order_acquire);
117 }
118
119 #if defined(ARCH_CPU_64_BITS)
120
121 using AtomicU64 = std::make_unsigned_t<Atomic64>;
122
123 typedef volatile std::atomic<Atomic64>* AtomicLocation64;
124 static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),
125               "incompatible 64-bit atomic layout");
126
127 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
128                                          Atomic64 old_value,
129                                          Atomic64 new_value) {
130   ((AtomicLocation64)ptr)
131       ->compare_exchange_strong(old_value,
132                                 new_value,
133                                 std::memory_order_relaxed,
134                                 std::memory_order_relaxed);
135   return old_value;
136 }
137
138 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
139                                          Atomic64 new_value) {
140   return ((AtomicLocation64)ptr)
141       ->exchange(new_value, std::memory_order_relaxed);
142 }
143
144 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
145                                           Atomic64 increment) {
146   return base::WrappingAdd(
147       ((AtomicLocation64)ptr)->fetch_add(increment, std::memory_order_relaxed),
148       increment);
149 }
150
151 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
152                                         Atomic64 increment) {
153   return base::WrappingAdd(((AtomicLocation64)ptr)->fetch_add(increment),
154                            increment);
155 }
156
157 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
158                                        Atomic64 old_value,
159                                        Atomic64 new_value) {
160   ((AtomicLocation64)ptr)
161       ->compare_exchange_strong(old_value,
162                                 new_value,
163                                 std::memory_order_acquire,
164                                 std::memory_order_acquire);
165   return old_value;
166 }
167
168 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
169                                        Atomic64 old_value,
170                                        Atomic64 new_value) {
171   ((AtomicLocation64)ptr)
172       ->compare_exchange_strong(old_value,
173                                 new_value,
174                                 std::memory_order_release,
175                                 std::memory_order_relaxed);
176   return old_value;
177 }
178
179 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
180   ((AtomicLocation64)ptr)->store(value, std::memory_order_release);
181 }
182
183 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
184   return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
185 }
186
187 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
188   return ((AtomicLocation64)ptr)->load(std::memory_order_acquire);
189 }
190
191 #endif  // defined(ARCH_CPU_64_BITS)
192 }  // namespace subtle
193 }  // namespace base
194
195 #endif  // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_