- add sources.
[platform/framework/web/crosswalk.git] / src / third_party / protobuf / src / google / protobuf / stubs / atomicops_internals_tsan.h
1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2013 Google Inc.  All rights reserved.
3 // http://code.google.com/p/protobuf/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 //     * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 //     * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
14 // distribution.
15 //     * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 // This file is an internal atomic implementation for compiler-based
32 // ThreadSanitizer (http://clang.llvm.org/docs/ThreadSanitizer.html).
33 // Use atomicops.h instead.
34
35 #ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_
36 #define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_
37
38 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
39
40 namespace google {
41 namespace protobuf {
42 namespace internal {
43
44 #ifndef TSAN_INTERFACE_ATOMIC_H
45 #define TSAN_INTERFACE_ATOMIC_H
46
47 #ifdef __cplusplus
48 extern "C" {
49 #endif
50
51 typedef char  __tsan_atomic8;
52 typedef short __tsan_atomic16;  // NOLINT
53 typedef int   __tsan_atomic32;
54 typedef long  __tsan_atomic64;  // NOLINT
55
56 #if defined(__SIZEOF_INT128__) \
57     || (__clang_major__ * 100 + __clang_minor__ >= 302)
58 typedef __int128 __tsan_atomic128;
59 #define __TSAN_HAS_INT128 1
60 #else
61 typedef char     __tsan_atomic128;
62 #define __TSAN_HAS_INT128 0
63 #endif
64
65 typedef enum {
66   __tsan_memory_order_relaxed,
67   __tsan_memory_order_consume,
68   __tsan_memory_order_acquire,
69   __tsan_memory_order_release,
70   __tsan_memory_order_acq_rel,
71   __tsan_memory_order_seq_cst,
72 } __tsan_memory_order;
73
74 __tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
75     __tsan_memory_order mo);
76 __tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
77     __tsan_memory_order mo);
78 __tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
79     __tsan_memory_order mo);
80 __tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
81     __tsan_memory_order mo);
82 __tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
83     __tsan_memory_order mo);
84
85 void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
86     __tsan_memory_order mo);
87 void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
88     __tsan_memory_order mo);
89 void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
90     __tsan_memory_order mo);
91 void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
92     __tsan_memory_order mo);
93 void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
94     __tsan_memory_order mo);
95
96 __tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
97     __tsan_atomic8 v, __tsan_memory_order mo);
98 __tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
99     __tsan_atomic16 v, __tsan_memory_order mo);
100 __tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
101     __tsan_atomic32 v, __tsan_memory_order mo);
102 __tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
103     __tsan_atomic64 v, __tsan_memory_order mo);
104 __tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
105     __tsan_atomic128 v, __tsan_memory_order mo);
106
107 __tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
108     __tsan_atomic8 v, __tsan_memory_order mo);
109 __tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
110     __tsan_atomic16 v, __tsan_memory_order mo);
111 __tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
112     __tsan_atomic32 v, __tsan_memory_order mo);
113 __tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
114     __tsan_atomic64 v, __tsan_memory_order mo);
115 __tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
116     __tsan_atomic128 v, __tsan_memory_order mo);
117
118 __tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
119     __tsan_atomic8 v, __tsan_memory_order mo);
120 __tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
121     __tsan_atomic16 v, __tsan_memory_order mo);
122 __tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
123     __tsan_atomic32 v, __tsan_memory_order mo);
124 __tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
125     __tsan_atomic64 v, __tsan_memory_order mo);
126 __tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
127     __tsan_atomic128 v, __tsan_memory_order mo);
128
129 __tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
130     __tsan_atomic8 v, __tsan_memory_order mo);
131 __tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
132     __tsan_atomic16 v, __tsan_memory_order mo);
133 __tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
134     __tsan_atomic32 v, __tsan_memory_order mo);
135 __tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
136     __tsan_atomic64 v, __tsan_memory_order mo);
137 __tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
138     __tsan_atomic128 v, __tsan_memory_order mo);
139
140 __tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
141     __tsan_atomic8 v, __tsan_memory_order mo);
142 __tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
143     __tsan_atomic16 v, __tsan_memory_order mo);
144 __tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
145     __tsan_atomic32 v, __tsan_memory_order mo);
146 __tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
147     __tsan_atomic64 v, __tsan_memory_order mo);
148 __tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
149     __tsan_atomic128 v, __tsan_memory_order mo);
150
151 __tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
152     __tsan_atomic8 v, __tsan_memory_order mo);
153 __tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
154     __tsan_atomic16 v, __tsan_memory_order mo);
155 __tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
156     __tsan_atomic32 v, __tsan_memory_order mo);
157 __tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
158     __tsan_atomic64 v, __tsan_memory_order mo);
159 __tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
160     __tsan_atomic128 v, __tsan_memory_order mo);
161
162 int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
163     __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
164     __tsan_memory_order fail_mo);
165 int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
166     __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
167     __tsan_memory_order fail_mo);
168 int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
169     __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
170     __tsan_memory_order fail_mo);
171 int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
172     __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
173     __tsan_memory_order fail_mo);
174 int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
175     __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
176     __tsan_memory_order fail_mo);
177
178 int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
179     __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
180     __tsan_memory_order fail_mo);
181 int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
182     __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
183     __tsan_memory_order fail_mo);
184 int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
185     __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
186     __tsan_memory_order fail_mo);
187 int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
188     __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
189     __tsan_memory_order fail_mo);
190 int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
191     __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
192     __tsan_memory_order fail_mo);
193
194 __tsan_atomic8 __tsan_atomic8_compare_exchange_val(
195     volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
196     __tsan_memory_order mo, __tsan_memory_order fail_mo);
197 __tsan_atomic16 __tsan_atomic16_compare_exchange_val(
198     volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
199     __tsan_memory_order mo, __tsan_memory_order fail_mo);
200 __tsan_atomic32 __tsan_atomic32_compare_exchange_val(
201     volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
202     __tsan_memory_order mo, __tsan_memory_order fail_mo);
203 __tsan_atomic64 __tsan_atomic64_compare_exchange_val(
204     volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
205     __tsan_memory_order mo, __tsan_memory_order fail_mo);
206 __tsan_atomic128 __tsan_atomic128_compare_exchange_val(
207     volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
208     __tsan_memory_order mo, __tsan_memory_order fail_mo);
209
210 void __tsan_atomic_thread_fence(__tsan_memory_order mo);
211 void __tsan_atomic_signal_fence(__tsan_memory_order mo);
212
213 #ifdef __cplusplus
214 }  // extern "C"
215 #endif
216
217 #endif  // #ifndef TSAN_INTERFACE_ATOMIC_H
218
219 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
220                                          Atomic32 old_value,
221                                          Atomic32 new_value) {
222   Atomic32 cmp = old_value;
223   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
224       __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
225   return cmp;
226 }
227
228 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
229                                          Atomic32 new_value) {
230   return __tsan_atomic32_exchange(ptr, new_value,
231       __tsan_memory_order_relaxed);
232 }
233
234 inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
235                                        Atomic32 new_value) {
236   return __tsan_atomic32_exchange(ptr, new_value,
237       __tsan_memory_order_acquire);
238 }
239
240 inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
241                                        Atomic32 new_value) {
242   return __tsan_atomic32_exchange(ptr, new_value,
243       __tsan_memory_order_release);
244 }
245
246 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
247                                           Atomic32 increment) {
248   return increment + __tsan_atomic32_fetch_add(ptr, increment,
249       __tsan_memory_order_relaxed);
250 }
251
252 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
253                                         Atomic32 increment) {
254   return increment + __tsan_atomic32_fetch_add(ptr, increment,
255       __tsan_memory_order_acq_rel);
256 }
257
258 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
259                                        Atomic32 old_value,
260                                        Atomic32 new_value) {
261   Atomic32 cmp = old_value;
262   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
263       __tsan_memory_order_acquire, __tsan_memory_order_acquire);
264   return cmp;
265 }
266
267 inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
268                                        Atomic32 old_value,
269                                        Atomic32 new_value) {
270   Atomic32 cmp = old_value;
271   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
272       __tsan_memory_order_release, __tsan_memory_order_relaxed);
273   return cmp;
274 }
275
276 inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
277   __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
278 }
279
280 inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
281   __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
282   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
283 }
284
285 inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
286   __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
287 }
288
289 inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
290   return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
291 }
292
293 inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
294   return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
295 }
296
297 inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
298   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
299   return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
300 }
301
302 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
303                                          Atomic64 old_value,
304                                          Atomic64 new_value) {
305   Atomic64 cmp = old_value;
306   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
307       __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
308   return cmp;
309 }
310
311 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
312                                          Atomic64 new_value) {
313   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
314 }
315
316 inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
317                                        Atomic64 new_value) {
318   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
319 }
320
321 inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
322                                        Atomic64 new_value) {
323   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
324 }
325
326 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
327                                           Atomic64 increment) {
328   return increment + __tsan_atomic64_fetch_add(ptr, increment,
329       __tsan_memory_order_relaxed);
330 }
331
332 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
333                                         Atomic64 increment) {
334   return increment + __tsan_atomic64_fetch_add(ptr, increment,
335       __tsan_memory_order_acq_rel);
336 }
337
338 inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
339   __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
340 }
341
342 inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
343   __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
344   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
345 }
346
347 inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
348   __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
349 }
350
351 inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
352   return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
353 }
354
355 inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
356   return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
357 }
358
359 inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
360   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
361   return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
362 }
363
364 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
365                                        Atomic64 old_value,
366                                        Atomic64 new_value) {
367   Atomic64 cmp = old_value;
368   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
369       __tsan_memory_order_acquire, __tsan_memory_order_acquire);
370   return cmp;
371 }
372
373 inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
374                                        Atomic64 old_value,
375                                        Atomic64 new_value) {
376   Atomic64 cmp = old_value;
377   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
378       __tsan_memory_order_release, __tsan_memory_order_relaxed);
379   return cmp;
380 }
381
382 inline void MemoryBarrier() {
383   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
384 }
385
386 }  // namespace internal
387 }  // namespace protobuf
388 }  // namespace google
389
390 #undef ATOMICOPS_COMPILER_BARRIER
391
392 #endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_