84eab2ce11321943b1e074da5798e019a6e59230
[platform/upstream/v8.git] / src / runtime / runtime-atomics.cc
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/runtime/runtime-utils.h"
6
7 #include "src/arguments.h"
8 #include "src/base/macros.h"
9 #include "src/base/platform/mutex.h"
10 #include "src/conversions-inl.h"
11 #include "src/factory.h"
12
13 // Implement Atomic accesses to SharedArrayBuffers as defined in the
14 // SharedArrayBuffer draft spec, found here
15 // https://github.com/lars-t-hansen/ecmascript_sharedmem
16
17 namespace v8 {
18 namespace internal {
19
20 namespace {
21
22 inline bool AtomicIsLockFree(uint32_t size) {
23   return size == 1 || size == 2 || size == 4;
24 }
25
26 #if V8_CC_GNU
27
28 template <typename T>
29 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
30   (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST,
31                                     __ATOMIC_SEQ_CST);
32   return oldval;
33 }
34
35 template <typename T>
36 inline T LoadSeqCst(T* p) {
37   T result;
38   __atomic_load(p, &result, __ATOMIC_SEQ_CST);
39   return result;
40 }
41
42 template <typename T>
43 inline void StoreSeqCst(T* p, T value) {
44   __atomic_store_n(p, value, __ATOMIC_SEQ_CST);
45 }
46
47 template <typename T>
48 inline T AddSeqCst(T* p, T value) {
49   return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
50 }
51
52 template <typename T>
53 inline T SubSeqCst(T* p, T value) {
54   return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST);
55 }
56
57 template <typename T>
58 inline T AndSeqCst(T* p, T value) {
59   return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST);
60 }
61
62 template <typename T>
63 inline T OrSeqCst(T* p, T value) {
64   return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST);
65 }
66
67 template <typename T>
68 inline T XorSeqCst(T* p, T value) {
69   return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
70 }
71
72 template <typename T>
73 inline T ExchangeSeqCst(T* p, T value) {
74   return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
75 }
76
77 #elif V8_CC_MSVC
78
79 #define InterlockedCompareExchange32 _InterlockedCompareExchange
80 #define InterlockedExchange32 _InterlockedExchange
81 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd
82 #define InterlockedAnd32 _InterlockedAnd
83 #define InterlockedOr32 _InterlockedOr
84 #define InterlockedXor32 _InterlockedXor
85 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16
86 #define InterlockedCompareExchange8 _InterlockedCompareExchange8
87 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
88
89 #define ATOMIC_OPS(type, suffix, vctype)                                    \
90   inline type AddSeqCst(type* p, type value) {                              \
91     return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p),     \
92                                           bit_cast<vctype>(value));         \
93   }                                                                         \
94   inline type SubSeqCst(type* p, type value) {                              \
95     return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p),     \
96                                           -bit_cast<vctype>(value));        \
97   }                                                                         \
98   inline type AndSeqCst(type* p, type value) {                              \
99     return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p),             \
100                                   bit_cast<vctype>(value));                 \
101   }                                                                         \
102   inline type OrSeqCst(type* p, type value) {                               \
103     return InterlockedOr##suffix(reinterpret_cast<vctype*>(p),              \
104                                  bit_cast<vctype>(value));                  \
105   }                                                                         \
106   inline type XorSeqCst(type* p, type value) {                              \
107     return InterlockedXor##suffix(reinterpret_cast<vctype*>(p),             \
108                                   bit_cast<vctype>(value));                 \
109   }                                                                         \
110   inline type ExchangeSeqCst(type* p, type value) {                         \
111     return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p),        \
112                                        bit_cast<vctype>(value));            \
113   }                                                                         \
114                                                                             \
115   inline type CompareExchangeSeqCst(type* p, type oldval, type newval) {    \
116     return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
117                                               bit_cast<vctype>(newval),     \
118                                               bit_cast<vctype>(oldval));    \
119   }                                                                         \
120   inline type LoadSeqCst(type* p) { return *p; }                            \
121   inline void StoreSeqCst(type* p, type value) {                            \
122     InterlockedExchange##suffix(reinterpret_cast<vctype*>(p),               \
123                                 bit_cast<vctype>(value));                   \
124   }
125
126 ATOMIC_OPS(int8_t, 8, char)
127 ATOMIC_OPS(uint8_t, 8, char)
128 ATOMIC_OPS(int16_t, 16, short)  /* NOLINT(runtime/int) */
129 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */
130 ATOMIC_OPS(int32_t, 32, long)   /* NOLINT(runtime/int) */
131 ATOMIC_OPS(uint32_t, 32, long)  /* NOLINT(runtime/int) */
132
133 #undef ATOMIC_OPS_INTEGER
134 #undef ATOMIC_OPS
135
136 #undef InterlockedCompareExchange32
137 #undef InterlockedExchange32
138 #undef InterlockedExchangeAdd32
139 #undef InterlockedAnd32
140 #undef InterlockedOr32
141 #undef InterlockedXor32
142 #undef InterlockedExchangeAdd16
143 #undef InterlockedCompareExchange8
144 #undef InterlockedExchangeAdd8
145
146 #else
147
148 #error Unsupported platform!
149
150 #endif
151
152 template <typename T>
153 T FromObject(Handle<Object> number);
154
155 template <>
156 inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
157   return NumberToUint32(*number);
158 }
159
160 template <>
161 inline int32_t FromObject<int32_t>(Handle<Object> number) {
162   return NumberToInt32(*number);
163 }
164
165 template <typename T, typename F>
166 inline T ToAtomic(F from) {
167   return static_cast<T>(from);
168 }
169
170 template <typename T, typename F>
171 inline T FromAtomic(F from) {
172   return static_cast<T>(from);
173 }
174
175 template <typename T>
176 inline Object* ToObject(Isolate* isolate, T t);
177
178 template <>
179 inline Object* ToObject<int8_t>(Isolate* isolate, int8_t t) {
180   return Smi::FromInt(t);
181 }
182
183 template <>
184 inline Object* ToObject<uint8_t>(Isolate* isolate, uint8_t t) {
185   return Smi::FromInt(t);
186 }
187
188 template <>
189 inline Object* ToObject<int16_t>(Isolate* isolate, int16_t t) {
190   return Smi::FromInt(t);
191 }
192
193 template <>
194 inline Object* ToObject<uint16_t>(Isolate* isolate, uint16_t t) {
195   return Smi::FromInt(t);
196 }
197
198 template <>
199 inline Object* ToObject<int32_t>(Isolate* isolate, int32_t t) {
200   return *isolate->factory()->NewNumber(t);
201 }
202
203 template <>
204 inline Object* ToObject<uint32_t>(Isolate* isolate, uint32_t t) {
205   return *isolate->factory()->NewNumber(t);
206 }
207
208 template <typename T>
209 struct FromObjectTraits {};
210
211 template <>
212 struct FromObjectTraits<int8_t> {
213   typedef int32_t convert_type;
214   typedef int8_t atomic_type;
215 };
216
217 template <>
218 struct FromObjectTraits<uint8_t> {
219   typedef uint32_t convert_type;
220   typedef uint8_t atomic_type;
221 };
222
223 template <>
224 struct FromObjectTraits<int16_t> {
225   typedef int32_t convert_type;
226   typedef int16_t atomic_type;
227 };
228
229 template <>
230 struct FromObjectTraits<uint16_t> {
231   typedef uint32_t convert_type;
232   typedef uint16_t atomic_type;
233 };
234
235 template <>
236 struct FromObjectTraits<int32_t> {
237   typedef int32_t convert_type;
238   typedef int32_t atomic_type;
239 };
240
241 template <>
242 struct FromObjectTraits<uint32_t> {
243   typedef uint32_t convert_type;
244   typedef uint32_t atomic_type;
245 };
246
247
248 template <typename T>
249 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
250                                  Handle<Object> oldobj, Handle<Object> newobj) {
251   typedef typename FromObjectTraits<T>::atomic_type atomic_type;
252   typedef typename FromObjectTraits<T>::convert_type convert_type;
253   atomic_type oldval = ToAtomic<atomic_type>(FromObject<convert_type>(oldobj));
254   atomic_type newval = ToAtomic<atomic_type>(FromObject<convert_type>(newobj));
255   atomic_type result = CompareExchangeSeqCst(
256       static_cast<atomic_type*>(buffer) + index, oldval, newval);
257   return ToObject<T>(isolate, FromAtomic<T>(result));
258 }
259
260
261 template <typename T>
262 inline Object* DoLoad(Isolate* isolate, void* buffer, size_t index) {
263   typedef typename FromObjectTraits<T>::atomic_type atomic_type;
264   atomic_type result = LoadSeqCst(static_cast<atomic_type*>(buffer) + index);
265   return ToObject<T>(isolate, FromAtomic<T>(result));
266 }
267
268
269 template <typename T>
270 inline Object* DoStore(Isolate* isolate, void* buffer, size_t index,
271                        Handle<Object> obj) {
272   typedef typename FromObjectTraits<T>::atomic_type atomic_type;
273   typedef typename FromObjectTraits<T>::convert_type convert_type;
274   atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
275   StoreSeqCst(static_cast<atomic_type*>(buffer) + index, value);
276   return *obj;
277 }
278
279
280 template <typename T>
281 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index,
282                      Handle<Object> obj) {
283   typedef typename FromObjectTraits<T>::atomic_type atomic_type;
284   typedef typename FromObjectTraits<T>::convert_type convert_type;
285   atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
286   atomic_type result =
287       AddSeqCst(static_cast<atomic_type*>(buffer) + index, value);
288   return ToObject<T>(isolate, FromAtomic<T>(result));
289 }
290
291
292 template <typename T>
293 inline Object* DoSub(Isolate* isolate, void* buffer, size_t index,
294                      Handle<Object> obj) {
295   typedef typename FromObjectTraits<T>::atomic_type atomic_type;
296   typedef typename FromObjectTraits<T>::convert_type convert_type;
297   atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
298   atomic_type result =
299       SubSeqCst(static_cast<atomic_type*>(buffer) + index, value);
300   return ToObject<T>(isolate, FromAtomic<T>(result));
301 }
302
303
304 template <typename T>
305 inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index,
306                      Handle<Object> obj) {
307   typedef typename FromObjectTraits<T>::atomic_type atomic_type;
308   typedef typename FromObjectTraits<T>::convert_type convert_type;
309   atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
310   atomic_type result =
311       AndSeqCst(static_cast<atomic_type*>(buffer) + index, value);
312   return ToObject<T>(isolate, FromAtomic<T>(result));
313 }
314
315
316 template <typename T>
317 inline Object* DoOr(Isolate* isolate, void* buffer, size_t index,
318                     Handle<Object> obj) {
319   typedef typename FromObjectTraits<T>::atomic_type atomic_type;
320   typedef typename FromObjectTraits<T>::convert_type convert_type;
321   atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
322   atomic_type result =
323       OrSeqCst(static_cast<atomic_type*>(buffer) + index, value);
324   return ToObject<T>(isolate, FromAtomic<T>(result));
325 }
326
327
328 template <typename T>
329 inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
330                      Handle<Object> obj) {
331   typedef typename FromObjectTraits<T>::atomic_type atomic_type;
332   typedef typename FromObjectTraits<T>::convert_type convert_type;
333   atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
334   atomic_type result =
335       XorSeqCst(static_cast<atomic_type*>(buffer) + index, value);
336   return ToObject<T>(isolate, FromAtomic<T>(result));
337 }
338
339
340 template <typename T>
341 inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index,
342                           Handle<Object> obj) {
343   typedef typename FromObjectTraits<T>::atomic_type atomic_type;
344   typedef typename FromObjectTraits<T>::convert_type convert_type;
345   atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
346   atomic_type result =
347       ExchangeSeqCst(static_cast<atomic_type*>(buffer) + index, value);
348   return ToObject<T>(isolate, FromAtomic<T>(result));
349 }
350
351
352 // Uint8Clamped functions
353
354 uint8_t ClampToUint8(int32_t value) {
355   if (value < 0) return 0;
356   if (value > 255) return 255;
357   return value;
358 }
359
360
361 inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer,
362                                              size_t index,
363                                              Handle<Object> oldobj,
364                                              Handle<Object> newobj) {
365   typedef int32_t convert_type;
366   typedef uint8_t atomic_type;
367   atomic_type oldval = ClampToUint8(FromObject<convert_type>(oldobj));
368   atomic_type newval = ClampToUint8(FromObject<convert_type>(newobj));
369   atomic_type result = CompareExchangeSeqCst(
370       static_cast<atomic_type*>(buffer) + index, oldval, newval);
371   return ToObject<uint8_t>(isolate, FromAtomic<uint8_t>(result));
372 }
373
374
375 inline Object* DoStoreUint8Clamped(Isolate* isolate, void* buffer, size_t index,
376                                    Handle<Object> obj) {
377   typedef int32_t convert_type;
378   typedef uint8_t atomic_type;
379   atomic_type value = ClampToUint8(FromObject<convert_type>(obj));
380   StoreSeqCst(static_cast<atomic_type*>(buffer) + index, value);
381   return *obj;
382 }
383
384
385 #define DO_UINT8_CLAMPED_OP(name, op)                                        \
386   inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer,      \
387                                         size_t index, Handle<Object> obj) {  \
388     typedef int32_t convert_type;                                            \
389     typedef uint8_t atomic_type;                                             \
390     atomic_type* p = static_cast<atomic_type*>(buffer) + index;              \
391     convert_type operand = FromObject<convert_type>(obj);                    \
392     atomic_type expected;                                                    \
393     atomic_type result;                                                      \
394     do {                                                                     \
395       expected = *p;                                                         \
396       result = ClampToUint8(static_cast<convert_type>(expected) op operand); \
397     } while (CompareExchangeSeqCst(p, expected, result) != expected);        \
398     return ToObject<uint8_t>(isolate, expected);                             \
399   }
400
401 DO_UINT8_CLAMPED_OP(Add, +)
402 DO_UINT8_CLAMPED_OP(Sub, -)
403 DO_UINT8_CLAMPED_OP(And, &)
404 DO_UINT8_CLAMPED_OP(Or, | )
405 DO_UINT8_CLAMPED_OP(Xor, ^)
406
407 #undef DO_UINT8_CLAMPED_OP
408
409
410 inline Object* DoExchangeUint8Clamped(Isolate* isolate, void* buffer,
411                                       size_t index, Handle<Object> obj) {
412   typedef int32_t convert_type;
413   typedef uint8_t atomic_type;
414   atomic_type* p = static_cast<atomic_type*>(buffer) + index;
415   atomic_type result = ClampToUint8(FromObject<convert_type>(obj));
416   atomic_type expected;
417   do {
418     expected = *p;
419   } while (CompareExchangeSeqCst(p, expected, result) != expected);
420   return ToObject<uint8_t>(isolate, expected);
421 }
422
423
424 }  // anonymous namespace
425
426 // Duplicated from objects.h
427 // V has parameters (Type, type, TYPE, C type, element_size)
428 #define INTEGER_TYPED_ARRAYS(V)          \
429   V(Uint8, uint8, UINT8, uint8_t, 1)     \
430   V(Int8, int8, INT8, int8_t, 1)         \
431   V(Uint16, uint16, UINT16, uint16_t, 2) \
432   V(Int16, int16, INT16, int16_t, 2)     \
433   V(Uint32, uint32, UINT32, uint32_t, 4) \
434   V(Int32, int32, INT32, int32_t, 4)
435
436
437 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
438   HandleScope scope(isolate);
439   DCHECK(args.length() == 4);
440   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
441   CONVERT_SIZE_ARG_CHECKED(index, 1);
442   CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2);
443   CONVERT_NUMBER_ARG_HANDLE_CHECKED(newobj, 3);
444   RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
445   RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
446
447   void* buffer = sta->GetBuffer()->backing_store();
448
449   switch (sta->type()) {
450 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
451   case kExternal##Type##Array:                              \
452     return DoCompareExchange<ctype>(isolate, buffer, index, oldobj, newobj);
453
454     INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
455 #undef TYPED_ARRAY_CASE
456
457     case kExternalUint8ClampedArray:
458       return DoCompareExchangeUint8Clamped(isolate, buffer, index, oldobj,
459                                            newobj);
460
461     default:
462       break;
463   }
464
465   UNREACHABLE();
466   return isolate->heap()->undefined_value();
467 }
468
469
470 RUNTIME_FUNCTION(Runtime_AtomicsLoad) {
471   HandleScope scope(isolate);
472   DCHECK(args.length() == 2);
473   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
474   CONVERT_SIZE_ARG_CHECKED(index, 1);
475   RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
476   RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
477
478   void* buffer = sta->GetBuffer()->backing_store();
479
480   switch (sta->type()) {
481 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
482   case kExternal##Type##Array:                              \
483     return DoLoad<ctype>(isolate, buffer, index);
484
485     INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
486 #undef TYPED_ARRAY_CASE
487
488     case kExternalUint8ClampedArray:
489       return DoLoad<uint8_t>(isolate, buffer, index);
490
491     default:
492       break;
493   }
494
495   UNREACHABLE();
496   return isolate->heap()->undefined_value();
497 }
498
499
500 RUNTIME_FUNCTION(Runtime_AtomicsStore) {
501   HandleScope scope(isolate);
502   DCHECK(args.length() == 3);
503   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
504   CONVERT_SIZE_ARG_CHECKED(index, 1);
505   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
506   RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
507   RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
508
509   void* buffer = sta->GetBuffer()->backing_store();
510
511   switch (sta->type()) {
512 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
513   case kExternal##Type##Array:                              \
514     return DoStore<ctype>(isolate, buffer, index, value);
515
516     INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
517 #undef TYPED_ARRAY_CASE
518
519     case kExternalUint8ClampedArray:
520       return DoStoreUint8Clamped(isolate, buffer, index, value);
521
522     default:
523       break;
524   }
525
526   UNREACHABLE();
527   return isolate->heap()->undefined_value();
528 }
529
530
531 RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
532   HandleScope scope(isolate);
533   DCHECK(args.length() == 3);
534   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
535   CONVERT_SIZE_ARG_CHECKED(index, 1);
536   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
537   RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
538   RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
539
540   void* buffer = sta->GetBuffer()->backing_store();
541
542   switch (sta->type()) {
543 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
544   case kExternal##Type##Array:                              \
545     return DoAdd<ctype>(isolate, buffer, index, value);
546
547     INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
548 #undef TYPED_ARRAY_CASE
549
550     case kExternalUint8ClampedArray:
551       return DoAddUint8Clamped(isolate, buffer, index, value);
552
553     default:
554       break;
555   }
556
557   UNREACHABLE();
558   return isolate->heap()->undefined_value();
559 }
560
561
562 RUNTIME_FUNCTION(Runtime_AtomicsSub) {
563   HandleScope scope(isolate);
564   DCHECK(args.length() == 3);
565   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
566   CONVERT_SIZE_ARG_CHECKED(index, 1);
567   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
568   RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
569   RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
570
571   void* buffer = sta->GetBuffer()->backing_store();
572
573   switch (sta->type()) {
574 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
575   case kExternal##Type##Array:                              \
576     return DoSub<ctype>(isolate, buffer, index, value);
577
578     INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
579 #undef TYPED_ARRAY_CASE
580
581     case kExternalUint8ClampedArray:
582       return DoSubUint8Clamped(isolate, buffer, index, value);
583
584     default:
585       break;
586   }
587
588   UNREACHABLE();
589   return isolate->heap()->undefined_value();
590 }
591
592
593 RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
594   HandleScope scope(isolate);
595   DCHECK(args.length() == 3);
596   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
597   CONVERT_SIZE_ARG_CHECKED(index, 1);
598   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
599   RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
600   RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
601
602   void* buffer = sta->GetBuffer()->backing_store();
603
604   switch (sta->type()) {
605 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
606   case kExternal##Type##Array:                              \
607     return DoAnd<ctype>(isolate, buffer, index, value);
608
609     INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
610 #undef TYPED_ARRAY_CASE
611
612     case kExternalUint8ClampedArray:
613       return DoAndUint8Clamped(isolate, buffer, index, value);
614
615     default:
616       break;
617   }
618
619   UNREACHABLE();
620   return isolate->heap()->undefined_value();
621 }
622
623
624 RUNTIME_FUNCTION(Runtime_AtomicsOr) {
625   HandleScope scope(isolate);
626   DCHECK(args.length() == 3);
627   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
628   CONVERT_SIZE_ARG_CHECKED(index, 1);
629   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
630   RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
631   RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
632
633   void* buffer = sta->GetBuffer()->backing_store();
634
635   switch (sta->type()) {
636 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
637   case kExternal##Type##Array:                              \
638     return DoOr<ctype>(isolate, buffer, index, value);
639
640     INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
641 #undef TYPED_ARRAY_CASE
642
643     case kExternalUint8ClampedArray:
644       return DoOrUint8Clamped(isolate, buffer, index, value);
645
646     default:
647       break;
648   }
649
650   UNREACHABLE();
651   return isolate->heap()->undefined_value();
652 }
653
654
655 RUNTIME_FUNCTION(Runtime_AtomicsXor) {
656   HandleScope scope(isolate);
657   DCHECK(args.length() == 3);
658   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
659   CONVERT_SIZE_ARG_CHECKED(index, 1);
660   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
661   RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
662   RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
663
664   void* buffer = sta->GetBuffer()->backing_store();
665
666   switch (sta->type()) {
667 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
668   case kExternal##Type##Array:                              \
669     return DoXor<ctype>(isolate, buffer, index, value);
670
671     INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
672 #undef TYPED_ARRAY_CASE
673
674     case kExternalUint8ClampedArray:
675       return DoXorUint8Clamped(isolate, buffer, index, value);
676
677     default:
678       break;
679   }
680
681   UNREACHABLE();
682   return isolate->heap()->undefined_value();
683 }
684
685
686 RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
687   HandleScope scope(isolate);
688   DCHECK(args.length() == 3);
689   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
690   CONVERT_SIZE_ARG_CHECKED(index, 1);
691   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
692   RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
693   RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
694
695   void* buffer = sta->GetBuffer()->backing_store();
696
697   switch (sta->type()) {
698 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
699   case kExternal##Type##Array:                              \
700     return DoExchange<ctype>(isolate, buffer, index, value);
701
702     INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
703 #undef TYPED_ARRAY_CASE
704
705     case kExternalUint8ClampedArray:
706       return DoExchangeUint8Clamped(isolate, buffer, index, value);
707
708     default:
709       break;
710   }
711
712   UNREACHABLE();
713   return isolate->heap()->undefined_value();
714 }
715
716
717 RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) {
718   HandleScope scope(isolate);
719   DCHECK(args.length() == 1);
720   CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0);
721   uint32_t usize = NumberToUint32(*size);
722   return isolate->heap()->ToBoolean(AtomicIsLockFree(usize));
723 }
724 }
725 }  // namespace v8::internal