1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/runtime/runtime-utils.h"
7 #include "src/arguments.h"
8 #include "src/base/macros.h"
9 #include "src/base/platform/mutex.h"
10 #include "src/conversions-inl.h"
11 #include "src/factory.h"
13 // Implement Atomic accesses to SharedArrayBuffers as defined in the
14 // SharedArrayBuffer draft spec, found here
15 // https://github.com/lars-t-hansen/ecmascript_sharedmem
22 inline bool AtomicIsLockFree(uint32_t size) {
23 return size == 1 || size == 2 || size == 4;
29 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
30 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST,
36 inline T LoadSeqCst(T* p) {
38 __atomic_load(p, &result, __ATOMIC_SEQ_CST);
43 inline void StoreSeqCst(T* p, T value) {
44 __atomic_store_n(p, value, __ATOMIC_SEQ_CST);
48 inline T AddSeqCst(T* p, T value) {
49 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
53 inline T SubSeqCst(T* p, T value) {
54 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST);
58 inline T AndSeqCst(T* p, T value) {
59 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST);
63 inline T OrSeqCst(T* p, T value) {
64 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST);
68 inline T XorSeqCst(T* p, T value) {
69 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
73 inline T ExchangeSeqCst(T* p, T value) {
74 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
79 #define InterlockedCompareExchange32 _InterlockedCompareExchange
80 #define InterlockedExchange32 _InterlockedExchange
81 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd
82 #define InterlockedAnd32 _InterlockedAnd
83 #define InterlockedOr32 _InterlockedOr
84 #define InterlockedXor32 _InterlockedXor
85 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16
86 #define InterlockedCompareExchange8 _InterlockedCompareExchange8
87 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
89 #define ATOMIC_OPS(type, suffix, vctype) \
90 inline type AddSeqCst(type* p, type value) { \
91 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
92 bit_cast<vctype>(value)); \
94 inline type SubSeqCst(type* p, type value) { \
95 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
96 -bit_cast<vctype>(value)); \
98 inline type AndSeqCst(type* p, type value) { \
99 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \
100 bit_cast<vctype>(value)); \
102 inline type OrSeqCst(type* p, type value) { \
103 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \
104 bit_cast<vctype>(value)); \
106 inline type XorSeqCst(type* p, type value) { \
107 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \
108 bit_cast<vctype>(value)); \
110 inline type ExchangeSeqCst(type* p, type value) { \
111 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
112 bit_cast<vctype>(value)); \
115 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \
116 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
117 bit_cast<vctype>(newval), \
118 bit_cast<vctype>(oldval)); \
120 inline type LoadSeqCst(type* p) { return *p; } \
121 inline void StoreSeqCst(type* p, type value) { \
122 InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
123 bit_cast<vctype>(value)); \
126 ATOMIC_OPS(int8_t, 8, char)
127 ATOMIC_OPS(uint8_t, 8, char)
128 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */
129 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */
130 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */
131 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
133 #undef ATOMIC_OPS_INTEGER
136 #undef InterlockedCompareExchange32
137 #undef InterlockedExchange32
138 #undef InterlockedExchangeAdd32
139 #undef InterlockedAnd32
140 #undef InterlockedOr32
141 #undef InterlockedXor32
142 #undef InterlockedExchangeAdd16
143 #undef InterlockedCompareExchange8
144 #undef InterlockedExchangeAdd8
148 #error Unsupported platform!
152 template <typename T>
153 T FromObject(Handle<Object> number);
156 inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
157 return NumberToUint32(*number);
161 inline int32_t FromObject<int32_t>(Handle<Object> number) {
162 return NumberToInt32(*number);
165 template <typename T, typename F>
166 inline T ToAtomic(F from) {
167 return static_cast<T>(from);
170 template <typename T, typename F>
171 inline T FromAtomic(F from) {
172 return static_cast<T>(from);
175 template <typename T>
176 inline Object* ToObject(Isolate* isolate, T t);
179 inline Object* ToObject<int8_t>(Isolate* isolate, int8_t t) {
180 return Smi::FromInt(t);
184 inline Object* ToObject<uint8_t>(Isolate* isolate, uint8_t t) {
185 return Smi::FromInt(t);
189 inline Object* ToObject<int16_t>(Isolate* isolate, int16_t t) {
190 return Smi::FromInt(t);
194 inline Object* ToObject<uint16_t>(Isolate* isolate, uint16_t t) {
195 return Smi::FromInt(t);
199 inline Object* ToObject<int32_t>(Isolate* isolate, int32_t t) {
200 return *isolate->factory()->NewNumber(t);
204 inline Object* ToObject<uint32_t>(Isolate* isolate, uint32_t t) {
205 return *isolate->factory()->NewNumber(t);
208 template <typename T>
209 struct FromObjectTraits {};
212 struct FromObjectTraits<int8_t> {
213 typedef int32_t convert_type;
214 typedef int8_t atomic_type;
218 struct FromObjectTraits<uint8_t> {
219 typedef uint32_t convert_type;
220 typedef uint8_t atomic_type;
224 struct FromObjectTraits<int16_t> {
225 typedef int32_t convert_type;
226 typedef int16_t atomic_type;
230 struct FromObjectTraits<uint16_t> {
231 typedef uint32_t convert_type;
232 typedef uint16_t atomic_type;
236 struct FromObjectTraits<int32_t> {
237 typedef int32_t convert_type;
238 typedef int32_t atomic_type;
242 struct FromObjectTraits<uint32_t> {
243 typedef uint32_t convert_type;
244 typedef uint32_t atomic_type;
248 template <typename T>
249 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
250 Handle<Object> oldobj, Handle<Object> newobj) {
251 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
252 typedef typename FromObjectTraits<T>::convert_type convert_type;
253 atomic_type oldval = ToAtomic<atomic_type>(FromObject<convert_type>(oldobj));
254 atomic_type newval = ToAtomic<atomic_type>(FromObject<convert_type>(newobj));
255 atomic_type result = CompareExchangeSeqCst(
256 static_cast<atomic_type*>(buffer) + index, oldval, newval);
257 return ToObject<T>(isolate, FromAtomic<T>(result));
261 template <typename T>
262 inline Object* DoLoad(Isolate* isolate, void* buffer, size_t index) {
263 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
264 atomic_type result = LoadSeqCst(static_cast<atomic_type*>(buffer) + index);
265 return ToObject<T>(isolate, FromAtomic<T>(result));
269 template <typename T>
270 inline Object* DoStore(Isolate* isolate, void* buffer, size_t index,
271 Handle<Object> obj) {
272 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
273 typedef typename FromObjectTraits<T>::convert_type convert_type;
274 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
275 StoreSeqCst(static_cast<atomic_type*>(buffer) + index, value);
280 template <typename T>
281 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index,
282 Handle<Object> obj) {
283 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
284 typedef typename FromObjectTraits<T>::convert_type convert_type;
285 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
287 AddSeqCst(static_cast<atomic_type*>(buffer) + index, value);
288 return ToObject<T>(isolate, FromAtomic<T>(result));
292 template <typename T>
293 inline Object* DoSub(Isolate* isolate, void* buffer, size_t index,
294 Handle<Object> obj) {
295 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
296 typedef typename FromObjectTraits<T>::convert_type convert_type;
297 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
299 SubSeqCst(static_cast<atomic_type*>(buffer) + index, value);
300 return ToObject<T>(isolate, FromAtomic<T>(result));
304 template <typename T>
305 inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index,
306 Handle<Object> obj) {
307 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
308 typedef typename FromObjectTraits<T>::convert_type convert_type;
309 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
311 AndSeqCst(static_cast<atomic_type*>(buffer) + index, value);
312 return ToObject<T>(isolate, FromAtomic<T>(result));
316 template <typename T>
317 inline Object* DoOr(Isolate* isolate, void* buffer, size_t index,
318 Handle<Object> obj) {
319 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
320 typedef typename FromObjectTraits<T>::convert_type convert_type;
321 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
323 OrSeqCst(static_cast<atomic_type*>(buffer) + index, value);
324 return ToObject<T>(isolate, FromAtomic<T>(result));
328 template <typename T>
329 inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
330 Handle<Object> obj) {
331 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
332 typedef typename FromObjectTraits<T>::convert_type convert_type;
333 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
335 XorSeqCst(static_cast<atomic_type*>(buffer) + index, value);
336 return ToObject<T>(isolate, FromAtomic<T>(result));
340 template <typename T>
341 inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index,
342 Handle<Object> obj) {
343 typedef typename FromObjectTraits<T>::atomic_type atomic_type;
344 typedef typename FromObjectTraits<T>::convert_type convert_type;
345 atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
347 ExchangeSeqCst(static_cast<atomic_type*>(buffer) + index, value);
348 return ToObject<T>(isolate, FromAtomic<T>(result));
352 // Uint8Clamped functions
354 uint8_t ClampToUint8(int32_t value) {
355 if (value < 0) return 0;
356 if (value > 255) return 255;
361 inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer,
363 Handle<Object> oldobj,
364 Handle<Object> newobj) {
365 typedef int32_t convert_type;
366 typedef uint8_t atomic_type;
367 atomic_type oldval = ClampToUint8(FromObject<convert_type>(oldobj));
368 atomic_type newval = ClampToUint8(FromObject<convert_type>(newobj));
369 atomic_type result = CompareExchangeSeqCst(
370 static_cast<atomic_type*>(buffer) + index, oldval, newval);
371 return ToObject<uint8_t>(isolate, FromAtomic<uint8_t>(result));
375 inline Object* DoStoreUint8Clamped(Isolate* isolate, void* buffer, size_t index,
376 Handle<Object> obj) {
377 typedef int32_t convert_type;
378 typedef uint8_t atomic_type;
379 atomic_type value = ClampToUint8(FromObject<convert_type>(obj));
380 StoreSeqCst(static_cast<atomic_type*>(buffer) + index, value);
385 #define DO_UINT8_CLAMPED_OP(name, op) \
386 inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer, \
387 size_t index, Handle<Object> obj) { \
388 typedef int32_t convert_type; \
389 typedef uint8_t atomic_type; \
390 atomic_type* p = static_cast<atomic_type*>(buffer) + index; \
391 convert_type operand = FromObject<convert_type>(obj); \
392 atomic_type expected; \
393 atomic_type result; \
396 result = ClampToUint8(static_cast<convert_type>(expected) op operand); \
397 } while (CompareExchangeSeqCst(p, expected, result) != expected); \
398 return ToObject<uint8_t>(isolate, expected); \
401 DO_UINT8_CLAMPED_OP(Add, +)
402 DO_UINT8_CLAMPED_OP(Sub, -)
403 DO_UINT8_CLAMPED_OP(And, &)
404 DO_UINT8_CLAMPED_OP(Or, | )
405 DO_UINT8_CLAMPED_OP(Xor, ^)
407 #undef DO_UINT8_CLAMPED_OP
410 inline Object* DoExchangeUint8Clamped(Isolate* isolate, void* buffer,
411 size_t index, Handle<Object> obj) {
412 typedef int32_t convert_type;
413 typedef uint8_t atomic_type;
414 atomic_type* p = static_cast<atomic_type*>(buffer) + index;
415 atomic_type result = ClampToUint8(FromObject<convert_type>(obj));
416 atomic_type expected;
419 } while (CompareExchangeSeqCst(p, expected, result) != expected);
420 return ToObject<uint8_t>(isolate, expected);
424 } // anonymous namespace
426 // Duplicated from objects.h
427 // V has parameters (Type, type, TYPE, C type, element_size)
428 #define INTEGER_TYPED_ARRAYS(V) \
429 V(Uint8, uint8, UINT8, uint8_t, 1) \
430 V(Int8, int8, INT8, int8_t, 1) \
431 V(Uint16, uint16, UINT16, uint16_t, 2) \
432 V(Int16, int16, INT16, int16_t, 2) \
433 V(Uint32, uint32, UINT32, uint32_t, 4) \
434 V(Int32, int32, INT32, int32_t, 4)
437 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
438 HandleScope scope(isolate);
439 DCHECK(args.length() == 4);
440 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
441 CONVERT_SIZE_ARG_CHECKED(index, 1);
442 CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2);
443 CONVERT_NUMBER_ARG_HANDLE_CHECKED(newobj, 3);
444 RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
445 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
447 void* buffer = sta->GetBuffer()->backing_store();
449 switch (sta->type()) {
450 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
451 case kExternal##Type##Array: \
452 return DoCompareExchange<ctype>(isolate, buffer, index, oldobj, newobj);
454 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
455 #undef TYPED_ARRAY_CASE
457 case kExternalUint8ClampedArray:
458 return DoCompareExchangeUint8Clamped(isolate, buffer, index, oldobj,
466 return isolate->heap()->undefined_value();
470 RUNTIME_FUNCTION(Runtime_AtomicsLoad) {
471 HandleScope scope(isolate);
472 DCHECK(args.length() == 2);
473 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
474 CONVERT_SIZE_ARG_CHECKED(index, 1);
475 RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
476 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
478 void* buffer = sta->GetBuffer()->backing_store();
480 switch (sta->type()) {
481 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
482 case kExternal##Type##Array: \
483 return DoLoad<ctype>(isolate, buffer, index);
485 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
486 #undef TYPED_ARRAY_CASE
488 case kExternalUint8ClampedArray:
489 return DoLoad<uint8_t>(isolate, buffer, index);
496 return isolate->heap()->undefined_value();
500 RUNTIME_FUNCTION(Runtime_AtomicsStore) {
501 HandleScope scope(isolate);
502 DCHECK(args.length() == 3);
503 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
504 CONVERT_SIZE_ARG_CHECKED(index, 1);
505 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
506 RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
507 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
509 void* buffer = sta->GetBuffer()->backing_store();
511 switch (sta->type()) {
512 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
513 case kExternal##Type##Array: \
514 return DoStore<ctype>(isolate, buffer, index, value);
516 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
517 #undef TYPED_ARRAY_CASE
519 case kExternalUint8ClampedArray:
520 return DoStoreUint8Clamped(isolate, buffer, index, value);
527 return isolate->heap()->undefined_value();
531 RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
532 HandleScope scope(isolate);
533 DCHECK(args.length() == 3);
534 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
535 CONVERT_SIZE_ARG_CHECKED(index, 1);
536 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
537 RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
538 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
540 void* buffer = sta->GetBuffer()->backing_store();
542 switch (sta->type()) {
543 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
544 case kExternal##Type##Array: \
545 return DoAdd<ctype>(isolate, buffer, index, value);
547 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
548 #undef TYPED_ARRAY_CASE
550 case kExternalUint8ClampedArray:
551 return DoAddUint8Clamped(isolate, buffer, index, value);
558 return isolate->heap()->undefined_value();
562 RUNTIME_FUNCTION(Runtime_AtomicsSub) {
563 HandleScope scope(isolate);
564 DCHECK(args.length() == 3);
565 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
566 CONVERT_SIZE_ARG_CHECKED(index, 1);
567 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
568 RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
569 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
571 void* buffer = sta->GetBuffer()->backing_store();
573 switch (sta->type()) {
574 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
575 case kExternal##Type##Array: \
576 return DoSub<ctype>(isolate, buffer, index, value);
578 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
579 #undef TYPED_ARRAY_CASE
581 case kExternalUint8ClampedArray:
582 return DoSubUint8Clamped(isolate, buffer, index, value);
589 return isolate->heap()->undefined_value();
593 RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
594 HandleScope scope(isolate);
595 DCHECK(args.length() == 3);
596 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
597 CONVERT_SIZE_ARG_CHECKED(index, 1);
598 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
599 RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
600 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
602 void* buffer = sta->GetBuffer()->backing_store();
604 switch (sta->type()) {
605 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
606 case kExternal##Type##Array: \
607 return DoAnd<ctype>(isolate, buffer, index, value);
609 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
610 #undef TYPED_ARRAY_CASE
612 case kExternalUint8ClampedArray:
613 return DoAndUint8Clamped(isolate, buffer, index, value);
620 return isolate->heap()->undefined_value();
624 RUNTIME_FUNCTION(Runtime_AtomicsOr) {
625 HandleScope scope(isolate);
626 DCHECK(args.length() == 3);
627 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
628 CONVERT_SIZE_ARG_CHECKED(index, 1);
629 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
630 RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
631 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
633 void* buffer = sta->GetBuffer()->backing_store();
635 switch (sta->type()) {
636 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
637 case kExternal##Type##Array: \
638 return DoOr<ctype>(isolate, buffer, index, value);
640 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
641 #undef TYPED_ARRAY_CASE
643 case kExternalUint8ClampedArray:
644 return DoOrUint8Clamped(isolate, buffer, index, value);
651 return isolate->heap()->undefined_value();
655 RUNTIME_FUNCTION(Runtime_AtomicsXor) {
656 HandleScope scope(isolate);
657 DCHECK(args.length() == 3);
658 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
659 CONVERT_SIZE_ARG_CHECKED(index, 1);
660 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
661 RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
662 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
664 void* buffer = sta->GetBuffer()->backing_store();
666 switch (sta->type()) {
667 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
668 case kExternal##Type##Array: \
669 return DoXor<ctype>(isolate, buffer, index, value);
671 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
672 #undef TYPED_ARRAY_CASE
674 case kExternalUint8ClampedArray:
675 return DoXorUint8Clamped(isolate, buffer, index, value);
682 return isolate->heap()->undefined_value();
686 RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
687 HandleScope scope(isolate);
688 DCHECK(args.length() == 3);
689 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
690 CONVERT_SIZE_ARG_CHECKED(index, 1);
691 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
692 RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
693 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
695 void* buffer = sta->GetBuffer()->backing_store();
697 switch (sta->type()) {
698 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
699 case kExternal##Type##Array: \
700 return DoExchange<ctype>(isolate, buffer, index, value);
702 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
703 #undef TYPED_ARRAY_CASE
705 case kExternalUint8ClampedArray:
706 return DoExchangeUint8Clamped(isolate, buffer, index, value);
713 return isolate->heap()->undefined_value();
717 RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) {
718 HandleScope scope(isolate);
719 DCHECK(args.length() == 1);
720 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0);
721 uint32_t usize = NumberToUint32(*size);
722 return isolate->heap()->ToBoolean(AtomicIsLockFree(usize));
725 } // namespace v8::internal