76e0e8e2e34880a03abbd916110c7e64a1bcafc4
[platform/upstream/glibc.git] / sysdeps / i386 / i486 / bits / atomic.h
1 /* Copyright (C) 2002-2014 Free Software Foundation, Inc.
2    This file is part of the GNU C Library.
3    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, see
17    <http://www.gnu.org/licenses/>.  */
18
19 #include <stdint.h>
20 #include <tls.h>        /* For tcbhead_t.  */
21
22
23 typedef int8_t atomic8_t;
24 typedef uint8_t uatomic8_t;
25 typedef int_fast8_t atomic_fast8_t;
26 typedef uint_fast8_t uatomic_fast8_t;
27
28 typedef int16_t atomic16_t;
29 typedef uint16_t uatomic16_t;
30 typedef int_fast16_t atomic_fast16_t;
31 typedef uint_fast16_t uatomic_fast16_t;
32
33 typedef int32_t atomic32_t;
34 typedef uint32_t uatomic32_t;
35 typedef int_fast32_t atomic_fast32_t;
36 typedef uint_fast32_t uatomic_fast32_t;
37
38 typedef int64_t atomic64_t;
39 typedef uint64_t uatomic64_t;
40 typedef int_fast64_t atomic_fast64_t;
41 typedef uint_fast64_t uatomic_fast64_t;
42
43 typedef intptr_t atomicptr_t;
44 typedef uintptr_t uatomicptr_t;
45 typedef intmax_t atomic_max_t;
46 typedef uintmax_t uatomic_max_t;
47
48
49 #ifndef LOCK_PREFIX
50 # ifdef UP
51 #  define LOCK_PREFIX   /* nothing */
52 # else
53 #  define LOCK_PREFIX "lock;"
54 # endif
55 #endif
56
57
58 #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
59   __sync_val_compare_and_swap (mem, oldval, newval)
60 #define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
61   (! __sync_bool_compare_and_swap (mem, oldval, newval))
62
63
64 #define __arch_c_compare_and_exchange_val_8_acq(mem, newval, oldval) \
65   ({ __typeof (*mem) ret;                                                     \
66      __asm __volatile ("cmpl $0, %%gs:%P5\n\t"                                \
67                        "je 0f\n\t"                                            \
68                        "lock\n"                                               \
69                        "0:\tcmpxchgb %b2, %1"                                 \
70                        : "=a" (ret), "=m" (*mem)                              \
71                        : "q" (newval), "m" (*mem), "0" (oldval),              \
72                          "i" (offsetof (tcbhead_t, multiple_threads)));       \
73      ret; })
74
75 #define __arch_c_compare_and_exchange_val_16_acq(mem, newval, oldval) \
76   ({ __typeof (*mem) ret;                                                     \
77      __asm __volatile ("cmpl $0, %%gs:%P5\n\t"                                \
78                        "je 0f\n\t"                                            \
79                        "lock\n"                                               \
80                        "0:\tcmpxchgw %w2, %1"                                 \
81                        : "=a" (ret), "=m" (*mem)                              \
82                        : "r" (newval), "m" (*mem), "0" (oldval),              \
83                          "i" (offsetof (tcbhead_t, multiple_threads)));       \
84      ret; })
85
86 #define __arch_c_compare_and_exchange_val_32_acq(mem, newval, oldval) \
87   ({ __typeof (*mem) ret;                                                     \
88      __asm __volatile ("cmpl $0, %%gs:%P5\n\t"                                \
89                        "je 0f\n\t"                                            \
90                        "lock\n"                                               \
91                        "0:\tcmpxchgl %2, %1"                                  \
92                        : "=a" (ret), "=m" (*mem)                              \
93                        : "r" (newval), "m" (*mem), "0" (oldval),              \
94                          "i" (offsetof (tcbhead_t, multiple_threads)));       \
95      ret; })
96
97 /* XXX We do not really need 64-bit compare-and-exchange.  At least
98    not in the moment.  Using it would mean causing portability
99    problems since not many other 32-bit architectures have support for
100    such an operation.  So don't define any code for now.  If it is
101    really going to be used the code below can be used on Intel Pentium
102    and later, but NOT on i486.  */
103 #if 1
104 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval)          \
105   ({ __typeof (*mem) ret = *(mem);                                            \
106      abort ();                                                                \
107      ret = (newval);                                                          \
108      ret = (oldval);                                                          \
109      ret; })
110 # define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval)        \
111   ({ __typeof (*mem) ret = *(mem);                                            \
112      abort ();                                                                \
113      ret = (newval);                                                          \
114      ret = (oldval);                                                          \
115      ret; })
116 #else
117 # ifdef __PIC__
118 #  define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
119   ({ __typeof (*mem) ret;                                                     \
120      __asm __volatile ("xchgl %2, %%ebx\n\t"                                  \
121                        LOCK_PREFIX "cmpxchg8b %1\n\t"                         \
122                        "xchgl %2, %%ebx"                                      \
123                        : "=A" (ret), "=m" (*mem)                              \
124                        : "DS" (((unsigned long long int) (newval))            \
125                                & 0xffffffff),                                 \
126                          "c" (((unsigned long long int) (newval)) >> 32),     \
127                          "m" (*mem), "a" (((unsigned long long int) (oldval)) \
128                                           & 0xffffffff),                      \
129                          "d" (((unsigned long long int) (oldval)) >> 32));    \
130      ret; })
131
132 #  define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
133   ({ __typeof (*mem) ret;                                                     \
134      __asm __volatile ("xchgl %2, %%ebx\n\t"                                  \
135                        "cmpl $0, %%gs:%P7\n\t"                                \
136                        "je 0f\n\t"                                            \
137                        "lock\n"                                               \
138                        "0:\tcmpxchg8b %1\n\t"                                 \
139                        "xchgl %2, %%ebx"                                      \
140                        : "=A" (ret), "=m" (*mem)                              \
141                        : "DS" (((unsigned long long int) (newval))            \
142                                & 0xffffffff),                                 \
143                          "c" (((unsigned long long int) (newval)) >> 32),     \
144                          "m" (*mem), "a" (((unsigned long long int) (oldval)) \
145                                           & 0xffffffff),                      \
146                          "d" (((unsigned long long int) (oldval)) >> 32),     \
147                          "i" (offsetof (tcbhead_t, multiple_threads)));       \
148      ret; })
149 # else
150 #  define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
151   ({ __typeof (*mem) ret;                                                     \
152      __asm __volatile (LOCK_PREFIX "cmpxchg8b %1"                             \
153                        : "=A" (ret), "=m" (*mem)                              \
154                        : "b" (((unsigned long long int) (newval))             \
155                               & 0xffffffff),                                  \
156                          "c" (((unsigned long long int) (newval)) >> 32),     \
157                          "m" (*mem), "a" (((unsigned long long int) (oldval)) \
158                                           & 0xffffffff),                      \
159                          "d" (((unsigned long long int) (oldval)) >> 32));    \
160      ret; })
161
162 #  define __arch_c_compare_and_exchange_val_64_acq(mem, newval, oldval) \
163   ({ __typeof (*mem) ret;                                                     \
164      __asm __volatile ("cmpl $0, %%gs:%P7\n\t"                                \
165                        "je 0f\n\t"                                            \
166                        "lock\n"                                               \
167                        "0:\tcmpxchg8b %1"                                     \
168                        : "=A" (ret), "=m" (*mem)                              \
169                        : "b" (((unsigned long long int) (newval))             \
170                               & 0xffffffff),                                  \
171                          "c" (((unsigned long long int) (newval)) >> 32),     \
172                          "m" (*mem), "a" (((unsigned long long int) (oldval)) \
173                                           & 0xffffffff),                      \
174                          "d" (((unsigned long long int) (oldval)) >> 32),     \
175                          "i" (offsetof (tcbhead_t, multiple_threads)));       \
176      ret; })
177 # endif
178 #endif
179
180
181 /* Note that we need no lock prefix.  */
182 #define atomic_exchange_acq(mem, newvalue) \
183   ({ __typeof (*mem) result;                                                  \
184      if (sizeof (*mem) == 1)                                                  \
185        __asm __volatile ("xchgb %b0, %1"                                      \
186                          : "=q" (result), "=m" (*mem)                         \
187                          : "0" (newvalue), "m" (*mem));                       \
188      else if (sizeof (*mem) == 2)                                             \
189        __asm __volatile ("xchgw %w0, %1"                                      \
190                          : "=r" (result), "=m" (*mem)                         \
191                          : "0" (newvalue), "m" (*mem));                       \
192      else if (sizeof (*mem) == 4)                                             \
193        __asm __volatile ("xchgl %0, %1"                                       \
194                          : "=r" (result), "=m" (*mem)                         \
195                          : "0" (newvalue), "m" (*mem));                       \
196      else                                                                     \
197        {                                                                      \
198          result = 0;                                                          \
199          abort ();                                                            \
200        }                                                                      \
201      result; })
202
203
204 #define __arch_exchange_and_add_body(lock, pfx, mem, value) \
205   ({ __typeof (*mem) __result;                                                \
206      __typeof (value) __addval = (value);                                     \
207      if (sizeof (*mem) == 1)                                                  \
208        __asm __volatile (lock "xaddb %b0, %1"                                 \
209                          : "=q" (__result), "=m" (*mem)                       \
210                          : "0" (__addval), "m" (*mem),                        \
211                            "i" (offsetof (tcbhead_t, multiple_threads)));     \
212      else if (sizeof (*mem) == 2)                                             \
213        __asm __volatile (lock "xaddw %w0, %1"                                 \
214                          : "=r" (__result), "=m" (*mem)                       \
215                          : "0" (__addval), "m" (*mem),                        \
216                            "i" (offsetof (tcbhead_t, multiple_threads)));     \
217      else if (sizeof (*mem) == 4)                                             \
218        __asm __volatile (lock "xaddl %0, %1"                                  \
219                          : "=r" (__result), "=m" (*mem)                       \
220                          : "0" (__addval), "m" (*mem),                        \
221                            "i" (offsetof (tcbhead_t, multiple_threads)));     \
222      else                                                                     \
223        {                                                                      \
224          __typeof (mem) __memp = (mem);                                       \
225          __typeof (*mem) __tmpval;                                            \
226          __result = *__memp;                                                  \
227          do                                                                   \
228            __tmpval = __result;                                               \
229          while ((__result = pfx##_compare_and_exchange_val_64_acq             \
230                  (__memp, __result + __addval, __result)) == __tmpval);       \
231        }                                                                      \
232      __result; })
233
234 #define atomic_exchange_and_add(mem, value) \
235   __sync_fetch_and_add (mem, value)
236
237 #define __arch_exchange_and_add_cprefix \
238   "cmpl $0, %%gs:%P4\n\tje 0f\n\tlock\n0:\t"
239
240 #define catomic_exchange_and_add(mem, value) \
241   __arch_exchange_and_add_body (__arch_exchange_and_add_cprefix, __arch_c,    \
242                                 mem, value)
243
244
245 #define __arch_add_body(lock, pfx, mem, value) \
246   do {                                                                        \
247     if (__builtin_constant_p (value) && (value) == 1)                         \
248       atomic_increment (mem);                                                 \
249     else if (__builtin_constant_p (value) && (value) == -1)                   \
250       atomic_decrement (mem);                                                 \
251     else if (sizeof (*mem) == 1)                                              \
252       __asm __volatile (lock "addb %b1, %0"                                   \
253                         : "=m" (*mem)                                         \
254                         : "iq" (value), "m" (*mem),                           \
255                           "i" (offsetof (tcbhead_t, multiple_threads)));      \
256     else if (sizeof (*mem) == 2)                                              \
257       __asm __volatile (lock "addw %w1, %0"                                   \
258                         : "=m" (*mem)                                         \
259                         : "ir" (value), "m" (*mem),                           \
260                           "i" (offsetof (tcbhead_t, multiple_threads)));      \
261     else if (sizeof (*mem) == 4)                                              \
262       __asm __volatile (lock "addl %1, %0"                                    \
263                         : "=m" (*mem)                                         \
264                         : "ir" (value), "m" (*mem),                           \
265                           "i" (offsetof (tcbhead_t, multiple_threads)));      \
266     else                                                                      \
267       {                                                                       \
268         __typeof (value) __addval = (value);                                  \
269         __typeof (mem) __memp = (mem);                                        \
270         __typeof (*mem) __oldval = *__memp;                                   \
271         __typeof (*mem) __tmpval;                                             \
272         do                                                                    \
273           __tmpval = __oldval;                                                \
274         while ((__oldval = pfx##_compare_and_exchange_val_64_acq              \
275                 (__memp, __oldval + __addval, __oldval)) == __tmpval);        \
276       }                                                                       \
277   } while (0)
278
279 #define atomic_add(mem, value) \
280   __arch_add_body (LOCK_PREFIX, __arch, mem, value)
281
282 #define __arch_add_cprefix \
283   "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t"
284
285 #define catomic_add(mem, value) \
286   __arch_add_body (__arch_add_cprefix, __arch_c, mem, value)
287
288
289 #define atomic_add_negative(mem, value) \
290   ({ unsigned char __result;                                                  \
291      if (sizeof (*mem) == 1)                                                  \
292        __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1"                  \
293                          : "=m" (*mem), "=qm" (__result)                      \
294                          : "iq" (value), "m" (*mem));                         \
295      else if (sizeof (*mem) == 2)                                             \
296        __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1"                  \
297                          : "=m" (*mem), "=qm" (__result)                      \
298                          : "ir" (value), "m" (*mem));                         \
299      else if (sizeof (*mem) == 4)                                             \
300        __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1"                   \
301                          : "=m" (*mem), "=qm" (__result)                      \
302                          : "ir" (value), "m" (*mem));                         \
303      else                                                                     \
304        abort ();                                                              \
305      __result; })
306
307
308 #define atomic_add_zero(mem, value) \
309   ({ unsigned char __result;                                                  \
310      if (sizeof (*mem) == 1)                                                  \
311        __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1"                  \
312                          : "=m" (*mem), "=qm" (__result)                      \
313                          : "iq" (value), "m" (*mem));                         \
314      else if (sizeof (*mem) == 2)                                             \
315        __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1"                  \
316                          : "=m" (*mem), "=qm" (__result)                      \
317                          : "ir" (value), "m" (*mem));                         \
318      else if (sizeof (*mem) == 4)                                             \
319        __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1"                   \
320                          : "=m" (*mem), "=qm" (__result)                      \
321                          : "ir" (value), "m" (*mem));                         \
322      else                                                                     \
323        abort ();                                                              \
324      __result; })
325
326
327 #define __arch_increment_body(lock,  pfx, mem) \
328   do {                                                                        \
329     if (sizeof (*mem) == 1)                                                   \
330       __asm __volatile (lock "incb %b0"                                       \
331                         : "=m" (*mem)                                         \
332                         : "m" (*mem),                                         \
333                           "i" (offsetof (tcbhead_t, multiple_threads)));      \
334     else if (sizeof (*mem) == 2)                                              \
335       __asm __volatile (lock "incw %w0"                                       \
336                         : "=m" (*mem)                                         \
337                         : "m" (*mem),                                         \
338                           "i" (offsetof (tcbhead_t, multiple_threads)));      \
339     else if (sizeof (*mem) == 4)                                              \
340       __asm __volatile (lock "incl %0"                                        \
341                         : "=m" (*mem)                                         \
342                         : "m" (*mem),                                         \
343                           "i" (offsetof (tcbhead_t, multiple_threads)));      \
344     else                                                                      \
345       {                                                                       \
346         __typeof (mem) __memp = (mem);                                        \
347         __typeof (*mem) __oldval = *__memp;                                   \
348         __typeof (*mem) __tmpval;                                             \
349         do                                                                    \
350           __tmpval = __oldval;                                                \
351         while ((__oldval = pfx##_compare_and_exchange_val_64_acq              \
352                 (__memp, __oldval + 1, __oldval)) == __tmpval);               \
353       }                                                                       \
354   } while (0)
355
356 #define atomic_increment(mem) __arch_increment_body (LOCK_PREFIX, __arch, mem)
357
358 #define __arch_increment_cprefix \
359   "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t"
360
361 #define catomic_increment(mem) \
362   __arch_increment_body (__arch_increment_cprefix, __arch_c, mem)
363
364
365 #define atomic_increment_and_test(mem) \
366   ({ unsigned char __result;                                                  \
367      if (sizeof (*mem) == 1)                                                  \
368        __asm __volatile (LOCK_PREFIX "incb %0; sete %b1"                      \
369                          : "=m" (*mem), "=qm" (__result)                      \
370                          : "m" (*mem));                                       \
371      else if (sizeof (*mem) == 2)                                             \
372        __asm __volatile (LOCK_PREFIX "incw %0; sete %w1"                      \
373                          : "=m" (*mem), "=qm" (__result)                      \
374                          : "m" (*mem));                                       \
375      else if (sizeof (*mem) == 4)                                             \
376        __asm __volatile (LOCK_PREFIX "incl %0; sete %1"                       \
377                          : "=m" (*mem), "=qm" (__result)                      \
378                          : "m" (*mem));                                       \
379      else                                                                     \
380        abort ();                                                              \
381      __result; })
382
383
384 #define __arch_decrement_body(lock, pfx, mem) \
385   do {                                                                        \
386     if (sizeof (*mem) == 1)                                                   \
387       __asm __volatile (lock "decb %b0"                                       \
388                         : "=m" (*mem)                                         \
389                         : "m" (*mem),                                         \
390                           "i" (offsetof (tcbhead_t, multiple_threads)));      \
391     else if (sizeof (*mem) == 2)                                              \
392       __asm __volatile (lock "decw %w0"                                       \
393                         : "=m" (*mem)                                         \
394                         : "m" (*mem),                                         \
395                           "i" (offsetof (tcbhead_t, multiple_threads)));      \
396     else if (sizeof (*mem) == 4)                                              \
397       __asm __volatile (lock "decl %0"                                        \
398                         : "=m" (*mem)                                         \
399                         : "m" (*mem),                                         \
400                           "i" (offsetof (tcbhead_t, multiple_threads)));      \
401     else                                                                      \
402       {                                                                       \
403         __typeof (mem) __memp = (mem);                                        \
404         __typeof (*mem) __oldval = *__memp;                                   \
405         __typeof (*mem) __tmpval;                                             \
406         do                                                                    \
407           __tmpval = __oldval;                                                \
408         while ((__oldval = pfx##_compare_and_exchange_val_64_acq              \
409                 (__memp, __oldval - 1, __oldval)) == __tmpval);               \
410       }                                                                       \
411   } while (0)
412
413 #define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, __arch, mem)
414
415 #define __arch_decrement_cprefix \
416   "cmpl $0, %%gs:%P2\n\tje 0f\n\tlock\n0:\t"
417
418 #define catomic_decrement(mem) \
419   __arch_decrement_body (__arch_decrement_cprefix, __arch_c, mem)
420
421
422 #define atomic_decrement_and_test(mem) \
423   ({ unsigned char __result;                                                  \
424      if (sizeof (*mem) == 1)                                                  \
425        __asm __volatile (LOCK_PREFIX "decb %b0; sete %1"                      \
426                          : "=m" (*mem), "=qm" (__result)                      \
427                          : "m" (*mem));                                       \
428      else if (sizeof (*mem) == 2)                                             \
429        __asm __volatile (LOCK_PREFIX "decw %w0; sete %1"                      \
430                          : "=m" (*mem), "=qm" (__result)                      \
431                          : "m" (*mem));                                       \
432      else if (sizeof (*mem) == 4)                                             \
433        __asm __volatile (LOCK_PREFIX "decl %0; sete %1"                       \
434                          : "=m" (*mem), "=qm" (__result)                      \
435                          : "m" (*mem));                                       \
436      else                                                                     \
437        abort ();                                                              \
438      __result; })
439
440
441 #define atomic_bit_set(mem, bit) \
442   do {                                                                        \
443     if (sizeof (*mem) == 1)                                                   \
444       __asm __volatile (LOCK_PREFIX "orb %b2, %0"                             \
445                         : "=m" (*mem)                                         \
446                         : "m" (*mem), "iq" (1 << (bit)));                     \
447     else if (sizeof (*mem) == 2)                                              \
448       __asm __volatile (LOCK_PREFIX "orw %w2, %0"                             \
449                         : "=m" (*mem)                                         \
450                         : "m" (*mem), "ir" (1 << (bit)));                     \
451     else if (sizeof (*mem) == 4)                                              \
452       __asm __volatile (LOCK_PREFIX "orl %2, %0"                              \
453                         : "=m" (*mem)                                         \
454                         : "m" (*mem), "ir" (1 << (bit)));                     \
455     else                                                                      \
456       abort ();                                                               \
457   } while (0)
458
459
460 #define atomic_bit_test_set(mem, bit) \
461   ({ unsigned char __result;                                                  \
462      if (sizeof (*mem) == 1)                                                  \
463        __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0"                   \
464                          : "=q" (__result), "=m" (*mem)                       \
465                          : "m" (*mem), "ir" (bit));                           \
466      else if (sizeof (*mem) == 2)                                             \
467        __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0"                   \
468                          : "=q" (__result), "=m" (*mem)                       \
469                          : "m" (*mem), "ir" (bit));                           \
470      else if (sizeof (*mem) == 4)                                             \
471        __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0"                   \
472                          : "=q" (__result), "=m" (*mem)                       \
473                          : "m" (*mem), "ir" (bit));                           \
474      else                                                                     \
475        abort ();                                                              \
476      __result; })
477
478
479 #define atomic_delay() asm ("rep; nop")
480
481
482 #define __arch_and_body(lock, mem, mask) \
483   do {                                                                        \
484     if (sizeof (*mem) == 1)                                                   \
485       __asm __volatile (lock "andb %b1, %0"                                   \
486                         : "=m" (*mem)                                         \
487                         : "iq" (mask), "m" (*mem),                            \
488                           "i" (offsetof (tcbhead_t, multiple_threads)));      \
489     else if (sizeof (*mem) == 2)                                              \
490       __asm __volatile (lock "andw %w1, %0"                                   \
491                         : "=m" (*mem)                                         \
492                         : "ir" (mask), "m" (*mem),                            \
493                           "i" (offsetof (tcbhead_t, multiple_threads)));      \
494     else if (sizeof (*mem) == 4)                                              \
495       __asm __volatile (lock "andl %1, %0"                                    \
496                         : "=m" (*mem)                                         \
497                         : "ir" (mask), "m" (*mem),                            \
498                           "i" (offsetof (tcbhead_t, multiple_threads)));      \
499     else                                                                      \
500       abort ();                                                               \
501   } while (0)
502
503 #define __arch_cprefix \
504   "cmpl $0, %%gs:%P3\n\tje 0f\n\tlock\n0:\t"
505
506 #define atomic_and(mem, mask) __arch_and_body (LOCK_PREFIX, mem, mask)
507
508 #define catomic_and(mem, mask) __arch_and_body (__arch_cprefix, mem, mask)
509
510
511 #define __arch_or_body(lock, mem, mask) \
512   do {                                                                        \
513     if (sizeof (*mem) == 1)                                                   \
514       __asm __volatile (lock "orb %b1, %0"                                    \
515                         : "=m" (*mem)                                         \
516                         : "iq" (mask), "m" (*mem),                            \
517                           "i" (offsetof (tcbhead_t, multiple_threads)));      \
518     else if (sizeof (*mem) == 2)                                              \
519       __asm __volatile (lock "orw %w1, %0"                                    \
520                         : "=m" (*mem)                                         \
521                         : "ir" (mask), "m" (*mem),                            \
522                           "i" (offsetof (tcbhead_t, multiple_threads)));      \
523     else if (sizeof (*mem) == 4)                                              \
524       __asm __volatile (lock "orl %1, %0"                                     \
525                         : "=m" (*mem)                                         \
526                         : "ir" (mask), "m" (*mem),                            \
527                           "i" (offsetof (tcbhead_t, multiple_threads)));      \
528     else                                                                      \
529       abort ();                                                               \
530   } while (0)
531
532 #define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
533
534 #define catomic_or(mem, mask) __arch_or_body (__arch_cprefix, mem, mask)