Imported Upstream version 4.8.1
[platform/upstream/gcc48.git] / libgcc / config / tilepro / atomic.c
1 /* TILE atomics.
2    Copyright (C) 2011-2013 Free Software Foundation, Inc.
3    Contributed by Walter Lee (walt@tilera.com)
4
5    This file is free software; you can redistribute it and/or modify it
6    under the terms of the GNU General Public License as published by the
7    Free Software Foundation; either version 3, or (at your option) any
8    later version.
9
10    This file is distributed in the hope that it will be useful, but
11    WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    General Public License for more details.
14
15    Under Section 7 of GPL version 3, you are granted additional
16    permissions described in the GCC Runtime Library Exception, version
17    3.1, as published by the Free Software Foundation.
18
19    You should have received a copy of the GNU General Public License and
20    a copy of the GCC Runtime Library Exception along with this program;
21    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
22    <http://www.gnu.org/licenses/>.  */
23
24 #include "system.h"
25 #include "coretypes.h"
26 #include "atomic.h"
27
28 /* This code should be inlined by the compiler, but for now support
29    it as out-of-line methods in libgcc.  */
30
31 static void
32 pre_atomic_barrier (int model)
33 {
34   switch ((enum memmodel) model)
35     {
36     case MEMMODEL_RELEASE:
37     case MEMMODEL_ACQ_REL:
38     case MEMMODEL_SEQ_CST:
39       __atomic_thread_fence (model);
40       break;
41     default:
42       break;
43     }
44   return;
45 }
46
47 static void
48 post_atomic_barrier (int model)
49 {
50   switch ((enum memmodel) model)
51     {
52     case MEMMODEL_ACQUIRE:
53     case MEMMODEL_ACQ_REL:
54     case MEMMODEL_SEQ_CST:
55       __atomic_thread_fence (model);
56       break;
57     default:
58       break;
59     }
60   return;
61 }
62
63 #define __unused __attribute__((unused))
64
65 #define __atomic_fetch_and_do(type, size, opname)               \
66 type                                                            \
67 __atomic_fetch_##opname##_##size(type* p, type i, int model)    \
68 {                                                               \
69   pre_atomic_barrier(model);                                    \
70   type rv = arch_atomic_##opname(p, i);                         \
71   post_atomic_barrier(model);                                   \
72   return rv;                                                    \
73 }
74
75 __atomic_fetch_and_do (int, 4, add)
76 __atomic_fetch_and_do (int, 4, sub)
77 __atomic_fetch_and_do (int, 4, or)
78 __atomic_fetch_and_do (int, 4, and)
79 __atomic_fetch_and_do (int, 4, xor)
80 __atomic_fetch_and_do (int, 4, nand)
81 __atomic_fetch_and_do (long long, 8, add)
82 __atomic_fetch_and_do (long long, 8, sub)
83 __atomic_fetch_and_do (long long, 8, or)
84 __atomic_fetch_and_do (long long, 8, and)
85 __atomic_fetch_and_do (long long, 8, xor)
86 __atomic_fetch_and_do (long long, 8, nand)
87 #define __atomic_do_and_fetch(type, size, opname, op)           \
88 type                                                            \
89 __atomic_##opname##_fetch_##size(type* p, type i, int model)    \
90 {                                                               \
91   pre_atomic_barrier(model);                                    \
92   type rv = arch_atomic_##opname(p, i) op i;                    \
93   post_atomic_barrier(model);                                   \
94   return rv;                                                    \
95 }
96 __atomic_do_and_fetch (int, 4, add, +)
97 __atomic_do_and_fetch (int, 4, sub, -)
98 __atomic_do_and_fetch (int, 4, or, |)
99 __atomic_do_and_fetch (int, 4, and, &)
100 __atomic_do_and_fetch (int, 4, xor, |)
101 __atomic_do_and_fetch (int, 4, nand, &)
102 __atomic_do_and_fetch (long long, 8, add, +)
103 __atomic_do_and_fetch (long long, 8, sub, -)
104 __atomic_do_and_fetch (long long, 8, or, |)
105 __atomic_do_and_fetch (long long, 8, and, &)
106 __atomic_do_and_fetch (long long, 8, xor, |)
107 __atomic_do_and_fetch (long long, 8, nand, &)
108 #define __atomic_exchange_methods(type, size)                           \
109 bool                                                                    \
110 __atomic_compare_exchange_##size(volatile type* ptr, type* oldvalp,     \
111                                  type newval, bool weak __unused,       \
112                                  int models, int modelf __unused)       \
113 {                                                                       \
114   type oldval = *oldvalp;                                               \
115   pre_atomic_barrier(models);                                           \
116   type retval = arch_atomic_val_compare_and_exchange(ptr, oldval, newval); \
117   post_atomic_barrier(models);                                          \
118   bool success = (retval == oldval);                                    \
119   *oldvalp = retval;                                                    \
120   return success;                                                       \
121 }                                                                       \
122                                                                         \
123 type                                                                    \
124 __atomic_exchange_##size(volatile type* ptr, type val, int model)       \
125 {                                                                       \
126   pre_atomic_barrier(model);                                            \
127   type retval = arch_atomic_exchange(ptr, val);                         \
128   post_atomic_barrier(model);                                           \
129   return retval;                                                        \
130 }
131 __atomic_exchange_methods (int, 4)
132 __atomic_exchange_methods (long long, 8)
133
134 /* Subword methods require the same approach for both TILEPro and
135    TILE-Gx.  We load the background data for the word, insert the
136    desired subword piece, then compare-and-exchange it into place.  */
137 #define u8 unsigned char
138 #define u16 unsigned short
139 #define __atomic_subword_cmpxchg(type, size)                            \
140                                                                         \
141 bool                                                                    \
142 __atomic_compare_exchange_##size(volatile type* ptr, type* guess,       \
143                                  type val, bool weak __unused, int models, \
144                                  int modelf __unused)                   \
145 {                                                                       \
146   pre_atomic_barrier(models);                                           \
147   unsigned int *p = (unsigned int *)((unsigned long)ptr & ~3UL);        \
148   const int shift = ((unsigned long)ptr & 3UL) * 8;                     \
149   const unsigned int valmask = (1 << (sizeof(type) * 8)) - 1;           \
150   const unsigned int bgmask = ~(valmask << shift);                      \
151   unsigned int oldword = *p;                                            \
152   type oldval = (oldword >> shift) & valmask;                           \
153   if (__builtin_expect((oldval == *guess), 1)) {                        \
154     unsigned int word = (oldword & bgmask) | ((val & valmask) << shift); \
155     oldword = arch_atomic_val_compare_and_exchange(p, oldword, word);   \
156     oldval = (oldword >> shift) & valmask;                              \
157   }                                                                     \
158   post_atomic_barrier(models);                                          \
159   bool success = (oldval == *guess);                                    \
160   *guess = oldval;                                                      \
161   return success;                                                       \
162 }
163 __atomic_subword_cmpxchg (u8, 1)
164 __atomic_subword_cmpxchg (u16, 2)
165 /* For the atomic-update subword methods, we use the same approach as
166    above, but we retry until we succeed if the compare-and-exchange
167    fails.  */
168 #define __atomic_subword(type, proto, top, expr, bottom)                \
169 proto                                                                   \
170 {                                                                       \
171   top                                                                   \
172   unsigned int *p = (unsigned int *)((unsigned long)ptr & ~3UL);        \
173   const int shift = ((unsigned long)ptr & 3UL) * 8;                     \
174   const unsigned int valmask = (1 << (sizeof(type) * 8)) - 1;           \
175   const unsigned int bgmask = ~(valmask << shift);                      \
176   unsigned int oldword, xword = *p;                                     \
177   type val, oldval;                                                     \
178   do {                                                                  \
179     oldword = xword;                                                    \
180     oldval = (oldword >> shift) & valmask;                              \
181     val = expr;                                                         \
182     unsigned int word = (oldword & bgmask) | ((val & valmask) << shift); \
183     xword = arch_atomic_val_compare_and_exchange(p, oldword, word);     \
184   } while (__builtin_expect(xword != oldword, 0));                      \
185   bottom                                                                \
186 }
187 #define __atomic_subword_fetch(type, funcname, expr, retval)            \
188   __atomic_subword(type,                                                \
189                    type __atomic_ ## funcname(volatile type *ptr, type i, int model), \
190                    pre_atomic_barrier(model);,                          \
191                    expr,                                                \
192                    post_atomic_barrier(model); return retval;)
193 __atomic_subword_fetch (u8, fetch_add_1, oldval + i, oldval)
194 __atomic_subword_fetch (u8, fetch_sub_1, oldval - i, oldval)
195 __atomic_subword_fetch (u8, fetch_or_1, oldval | i, oldval)
196 __atomic_subword_fetch (u8, fetch_and_1, oldval & i, oldval)
197 __atomic_subword_fetch (u8, fetch_xor_1, oldval ^ i, oldval)
198 __atomic_subword_fetch (u8, fetch_nand_1, ~(oldval & i), oldval)
199 __atomic_subword_fetch (u16, fetch_add_2, oldval + i, oldval)
200 __atomic_subword_fetch (u16, fetch_sub_2, oldval - i, oldval)
201 __atomic_subword_fetch (u16, fetch_or_2, oldval | i, oldval)
202 __atomic_subword_fetch (u16, fetch_and_2, oldval & i, oldval)
203 __atomic_subword_fetch (u16, fetch_xor_2, oldval ^ i, oldval)
204 __atomic_subword_fetch (u16, fetch_nand_2, ~(oldval & i), oldval)
205 __atomic_subword_fetch (u8, add_fetch_1, oldval + i, val)
206 __atomic_subword_fetch (u8, sub_fetch_1, oldval - i, val)
207 __atomic_subword_fetch (u8, or_fetch_1, oldval | i, val)
208 __atomic_subword_fetch (u8, and_fetch_1, oldval & i, val)
209 __atomic_subword_fetch (u8, xor_fetch_1, oldval ^ i, val)
210 __atomic_subword_fetch (u8, nand_fetch_1, ~(oldval & i), val)
211 __atomic_subword_fetch (u16, add_fetch_2, oldval + i, val)
212 __atomic_subword_fetch (u16, sub_fetch_2, oldval - i, val)
213 __atomic_subword_fetch (u16, or_fetch_2, oldval | i, val)
214 __atomic_subword_fetch (u16, and_fetch_2, oldval & i, val)
215 __atomic_subword_fetch (u16, xor_fetch_2, oldval ^ i, val)
216 __atomic_subword_fetch (u16, nand_fetch_2, ~(oldval & i), val)
217 #define __atomic_subword_lock(type, size)                               \
218                                                                         \
219 __atomic_subword(type,                                                  \
220                  type __atomic_exchange_##size(volatile type* ptr, type nval, int model), \
221                  pre_atomic_barrier(model);,                            \
222                  nval,                                                  \
223                  post_atomic_barrier(model); return oldval;)
224 __atomic_subword_lock (u8, 1)
225 __atomic_subword_lock (u16, 2)