OLD | NEW |
1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/base/macros.h" | 5 #include "src/base/macros.h" |
6 #include "src/base/platform/mutex.h" | 6 #include "src/base/platform/mutex.h" |
7 #include "src/base/platform/time.h" | 7 #include "src/base/platform/time.h" |
8 #include "src/builtins/builtins-utils.h" | 8 #include "src/builtins/builtins-utils.h" |
9 #include "src/builtins/builtins.h" | 9 #include "src/builtins/builtins.h" |
10 #include "src/code-factory.h" | 10 #include "src/code-factory.h" |
(...skipping 323 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
334 | 334 |
335 a.Bind(&u32); | 335 a.Bind(&u32); |
336 a.Return(a.ChangeUint32ToTagged( | 336 a.Return(a.ChangeUint32ToTagged( |
337 a.AtomicExchange(MachineType::Uint32(), backing_store, | 337 a.AtomicExchange(MachineType::Uint32(), backing_store, |
338 a.WordShl(index_word, 2), value_word32))); | 338 a.WordShl(index_word, 2), value_word32))); |
339 | 339 |
340 // This shouldn't happen, we've already validated the type. | 340 // This shouldn't happen, we've already validated the type. |
341 a.Bind(&other); | 341 a.Bind(&other); |
342 a.Return(a.SmiConstant(0)); | 342 a.Return(a.SmiConstant(0)); |
343 } | 343 } |
| 344 |
| 345 void Builtins::Generate_AtomicsCompareExchange( |
| 346 compiler::CodeAssemblerState* state) { |
| 347 using compiler::Node; |
| 348 CodeStubAssembler a(state); |
| 349 Node* array = a.Parameter(1); |
| 350 Node* index = a.Parameter(2); |
| 351 Node* old_value = a.Parameter(3); |
| 352 Node* new_value = a.Parameter(4); |
| 353 Node* context = a.Parameter(5 + 2); |
| 354 |
| 355 Node* instance_type; |
| 356 Node* backing_store; |
| 357 ValidateSharedTypedArray(&a, array, context, &instance_type, &backing_store); |
| 358 |
| 359 Node* index_word32 = ConvertTaggedAtomicIndexToWord32(&a, index, context); |
| 360 Node* array_length_word32 = a.TruncateTaggedToWord32( |
| 361 context, a.LoadObjectField(array, JSTypedArray::kLengthOffset)); |
| 362 ValidateAtomicIndex(&a, index_word32, array_length_word32, context); |
| 363 Node* index_word = a.ChangeUint32ToWord(index_word32); |
| 364 |
| 365 Node* old_value_integer = a.ToInteger(context, old_value); |
| 366 Node* old_value_word32 = a.TruncateTaggedToWord32(context, old_value_integer); |
| 367 |
| 368 Node* new_value_integer = a.ToInteger(context, new_value); |
| 369 Node* new_value_word32 = a.TruncateTaggedToWord32(context, new_value_integer); |
| 370 |
| 371 CodeStubAssembler::Label i8(&a), u8(&a), i16(&a), u16(&a), i32(&a), u32(&a), |
| 372 other(&a); |
| 373 int32_t case_values[] = { |
| 374 FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE, |
| 375 FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE, |
| 376 }; |
| 377 CodeStubAssembler::Label* case_labels[] = { |
| 378 &i8, &u8, &i16, &u16, &i32, &u32, |
| 379 }; |
| 380 a.Switch(instance_type, &other, case_values, case_labels, |
| 381 arraysize(case_labels)); |
| 382 |
| 383 a.Bind(&i8); |
| 384 a.Return(a.SmiFromWord32( |
| 385 a.AtomicCompareExchange(MachineType::Int8(), backing_store, index_word, |
| 386 old_value_word32, new_value_word32))); |
| 387 |
| 388 a.Bind(&u8); |
| 389 a.Return(a.SmiFromWord32( |
| 390 a.AtomicCompareExchange(MachineType::Uint8(), backing_store, index_word, |
| 391 old_value_word32, new_value_word32))); |
| 392 |
| 393 a.Bind(&i16); |
| 394 a.Return(a.SmiFromWord32(a.AtomicCompareExchange( |
| 395 MachineType::Int16(), backing_store, a.WordShl(index_word, 1), |
| 396 old_value_word32, new_value_word32))); |
| 397 |
| 398 a.Bind(&u16); |
| 399 a.Return(a.SmiFromWord32(a.AtomicCompareExchange( |
| 400 MachineType::Uint16(), backing_store, a.WordShl(index_word, 1), |
| 401 old_value_word32, new_value_word32))); |
| 402 |
| 403 a.Bind(&i32); |
| 404 a.Return(a.ChangeInt32ToTagged(a.AtomicCompareExchange( |
| 405 MachineType::Int32(), backing_store, a.WordShl(index_word, 2), |
| 406 old_value_word32, new_value_word32))); |
| 407 |
| 408 a.Bind(&u32); |
| 409 a.Return(a.ChangeUint32ToTagged(a.AtomicCompareExchange( |
| 410 MachineType::Uint32(), backing_store, a.WordShl(index_word, 2), |
| 411 old_value_word32, new_value_word32))); |
| 412 |
| 413 // This shouldn't happen, we've already validated the type. |
| 414 a.Bind(&other); |
| 415 a.Return(a.SmiConstant(0)); |
| 416 } |
344 #endif | 417 #endif |
345 | 418 |
346 inline bool AtomicIsLockFree(uint32_t size) { | 419 inline bool AtomicIsLockFree(uint32_t size) { |
347 return size == 1 || size == 2 || size == 4; | 420 return size == 1 || size == 2 || size == 4; |
348 } | 421 } |
349 | 422 |
350 // ES #sec-atomics.islockfree | 423 // ES #sec-atomics.islockfree |
351 BUILTIN(AtomicsIsLockFree) { | 424 BUILTIN(AtomicsIsLockFree) { |
352 HandleScope scope(isolate); | 425 HandleScope scope(isolate); |
353 Handle<Object> size = args.atOrUndefined(isolate, 1); | 426 Handle<Object> size = args.atOrUndefined(isolate, 1); |
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
489 | 562 |
490 return FutexEmulation::Wait(isolate, array_buffer, addr, value_int32, | 563 return FutexEmulation::Wait(isolate, array_buffer, addr, value_int32, |
491 timeout_number); | 564 timeout_number); |
492 } | 565 } |
493 | 566 |
494 namespace { | 567 namespace { |
495 | 568 |
496 #if V8_CC_GNU | 569 #if V8_CC_GNU |
497 | 570 |
498 template <typename T> | 571 template <typename T> |
499 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { | |
500 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, | |
501 __ATOMIC_SEQ_CST); | |
502 return oldval; | |
503 } | |
504 | |
505 template <typename T> | |
506 inline T AddSeqCst(T* p, T value) { | 572 inline T AddSeqCst(T* p, T value) { |
507 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); | 573 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); |
508 } | 574 } |
509 | 575 |
510 template <typename T> | 576 template <typename T> |
511 inline T SubSeqCst(T* p, T value) { | 577 inline T SubSeqCst(T* p, T value) { |
512 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST); | 578 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST); |
513 } | 579 } |
514 | 580 |
515 template <typename T> | 581 template <typename T> |
516 inline T AndSeqCst(T* p, T value) { | 582 inline T AndSeqCst(T* p, T value) { |
517 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST); | 583 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST); |
518 } | 584 } |
519 | 585 |
520 template <typename T> | 586 template <typename T> |
521 inline T OrSeqCst(T* p, T value) { | 587 inline T OrSeqCst(T* p, T value) { |
522 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); | 588 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); |
523 } | 589 } |
524 | 590 |
525 template <typename T> | 591 template <typename T> |
526 inline T XorSeqCst(T* p, T value) { | 592 inline T XorSeqCst(T* p, T value) { |
527 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); | 593 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); |
528 } | 594 } |
529 | 595 |
530 | 596 |
531 #elif V8_CC_MSVC | 597 #elif V8_CC_MSVC |
532 | 598 |
533 #define InterlockedCompareExchange32 _InterlockedCompareExchange | |
534 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd | 599 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd |
535 #define InterlockedAnd32 _InterlockedAnd | 600 #define InterlockedAnd32 _InterlockedAnd |
536 #define InterlockedOr32 _InterlockedOr | 601 #define InterlockedOr32 _InterlockedOr |
537 #define InterlockedXor32 _InterlockedXor | 602 #define InterlockedXor32 _InterlockedXor |
538 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 | 603 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 |
539 #define InterlockedCompareExchange8 _InterlockedCompareExchange8 | |
540 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 | 604 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 |
541 | 605 |
542 #define ATOMIC_OPS(type, suffix, vctype) \ | 606 #define ATOMIC_OPS(type, suffix, vctype) \ |
543 inline type AddSeqCst(type* p, type value) { \ | 607 inline type AddSeqCst(type* p, type value) { \ |
544 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ | 608 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
545 bit_cast<vctype>(value)); \ | 609 bit_cast<vctype>(value)); \ |
546 } \ | 610 } \ |
547 inline type SubSeqCst(type* p, type value) { \ | 611 inline type SubSeqCst(type* p, type value) { \ |
548 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ | 612 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
549 -bit_cast<vctype>(value)); \ | 613 -bit_cast<vctype>(value)); \ |
550 } \ | 614 } \ |
551 inline type AndSeqCst(type* p, type value) { \ | 615 inline type AndSeqCst(type* p, type value) { \ |
552 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ | 616 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ |
553 bit_cast<vctype>(value)); \ | 617 bit_cast<vctype>(value)); \ |
554 } \ | 618 } \ |
555 inline type OrSeqCst(type* p, type value) { \ | 619 inline type OrSeqCst(type* p, type value) { \ |
556 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ | 620 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ |
557 bit_cast<vctype>(value)); \ | 621 bit_cast<vctype>(value)); \ |
558 } \ | 622 } \ |
559 inline type XorSeqCst(type* p, type value) { \ | 623 inline type XorSeqCst(type* p, type value) { \ |
560 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ | 624 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ |
561 bit_cast<vctype>(value)); \ | 625 bit_cast<vctype>(value)); \ |
562 } \ | |
563 \ | |
564 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ | |
565 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ | |
566 bit_cast<vctype>(newval), \ | |
567 bit_cast<vctype>(oldval)); \ | |
568 } | 626 } |
569 | 627 |
570 ATOMIC_OPS(int8_t, 8, char) | 628 ATOMIC_OPS(int8_t, 8, char) |
571 ATOMIC_OPS(uint8_t, 8, char) | 629 ATOMIC_OPS(uint8_t, 8, char) |
572 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ | 630 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ |
573 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ | 631 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ |
574 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ | 632 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ |
575 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ | 633 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ |
576 | 634 |
577 #undef ATOMIC_OPS_INTEGER | 635 #undef ATOMIC_OPS_INTEGER |
578 #undef ATOMIC_OPS | 636 #undef ATOMIC_OPS |
579 | 637 |
580 #undef InterlockedCompareExchange32 | |
581 #undef InterlockedExchangeAdd32 | 638 #undef InterlockedExchangeAdd32 |
582 #undef InterlockedAnd32 | 639 #undef InterlockedAnd32 |
583 #undef InterlockedOr32 | 640 #undef InterlockedOr32 |
584 #undef InterlockedXor32 | 641 #undef InterlockedXor32 |
585 #undef InterlockedExchangeAdd16 | 642 #undef InterlockedExchangeAdd16 |
586 #undef InterlockedCompareExchange8 | |
587 #undef InterlockedExchangeAdd8 | 643 #undef InterlockedExchangeAdd8 |
588 | 644 |
589 #else | 645 #else |
590 | 646 |
591 #error Unsupported platform! | 647 #error Unsupported platform! |
592 | 648 |
593 #endif | 649 #endif |
594 | 650 |
595 template <typename T> | 651 template <typename T> |
596 T FromObject(Handle<Object> number); | 652 T FromObject(Handle<Object> number); |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
637 | 693 |
638 inline Object* ToObject(Isolate* isolate, int32_t t) { | 694 inline Object* ToObject(Isolate* isolate, int32_t t) { |
639 return *isolate->factory()->NewNumber(t); | 695 return *isolate->factory()->NewNumber(t); |
640 } | 696 } |
641 | 697 |
642 inline Object* ToObject(Isolate* isolate, uint32_t t) { | 698 inline Object* ToObject(Isolate* isolate, uint32_t t) { |
643 return *isolate->factory()->NewNumber(t); | 699 return *isolate->factory()->NewNumber(t); |
644 } | 700 } |
645 | 701 |
646 template <typename T> | 702 template <typename T> |
647 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, | |
648 Handle<Object> oldobj, Handle<Object> newobj) { | |
649 T oldval = FromObject<T>(oldobj); | |
650 T newval = FromObject<T>(newobj); | |
651 T result = | |
652 CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval); | |
653 return ToObject(isolate, result); | |
654 } | |
655 | |
656 template <typename T> | |
657 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, | 703 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, |
658 Handle<Object> obj) { | 704 Handle<Object> obj) { |
659 T value = FromObject<T>(obj); | 705 T value = FromObject<T>(obj); |
660 T result = AddSeqCst(static_cast<T*>(buffer) + index, value); | 706 T result = AddSeqCst(static_cast<T*>(buffer) + index, value); |
661 return ToObject(isolate, result); | 707 return ToObject(isolate, result); |
662 } | 708 } |
663 | 709 |
664 template <typename T> | 710 template <typename T> |
665 inline Object* DoSub(Isolate* isolate, void* buffer, size_t index, | 711 inline Object* DoSub(Isolate* isolate, void* buffer, size_t index, |
666 Handle<Object> obj) { | 712 Handle<Object> obj) { |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
699 // Duplicated from objects.h | 745 // Duplicated from objects.h |
700 // V has parameters (Type, type, TYPE, C type, element_size) | 746 // V has parameters (Type, type, TYPE, C type, element_size) |
701 #define INTEGER_TYPED_ARRAYS(V) \ | 747 #define INTEGER_TYPED_ARRAYS(V) \ |
702 V(Uint8, uint8, UINT8, uint8_t, 1) \ | 748 V(Uint8, uint8, UINT8, uint8_t, 1) \ |
703 V(Int8, int8, INT8, int8_t, 1) \ | 749 V(Int8, int8, INT8, int8_t, 1) \ |
704 V(Uint16, uint16, UINT16, uint16_t, 2) \ | 750 V(Uint16, uint16, UINT16, uint16_t, 2) \ |
705 V(Int16, int16, INT16, int16_t, 2) \ | 751 V(Int16, int16, INT16, int16_t, 2) \ |
706 V(Uint32, uint32, UINT32, uint32_t, 4) \ | 752 V(Uint32, uint32, UINT32, uint32_t, 4) \ |
707 V(Int32, int32, INT32, int32_t, 4) | 753 V(Int32, int32, INT32, int32_t, 4) |
708 | 754 |
709 // ES #sec-atomics.wait | |
710 // Atomics.compareExchange( typedArray, index, expectedValue, replacementValue ) | |
711 BUILTIN(AtomicsCompareExchange) { | |
712 HandleScope scope(isolate); | |
713 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
714 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
715 Handle<Object> expected_value = args.atOrUndefined(isolate, 3); | |
716 Handle<Object> replacement_value = args.atOrUndefined(isolate, 4); | |
717 | |
718 Handle<JSTypedArray> sta; | |
719 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
720 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); | |
721 | |
722 Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index); | |
723 if (maybe_index.IsNothing()) return isolate->heap()->exception(); | |
724 size_t i = maybe_index.FromJust(); | |
725 | |
726 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
727 isolate, expected_value, Object::ToInteger(isolate, expected_value)); | |
728 | |
729 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
730 isolate, replacement_value, | |
731 Object::ToInteger(isolate, replacement_value)); | |
732 | |
733 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
734 NumberToSize(sta->byte_offset()); | |
735 | |
736 switch (sta->type()) { | |
737 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
738 case kExternal##Type##Array: \ | |
739 return DoCompareExchange<ctype>(isolate, source, i, expected_value, \ | |
740 replacement_value); | |
741 | |
742 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
743 #undef TYPED_ARRAY_CASE | |
744 | |
745 default: | |
746 break; | |
747 } | |
748 | |
749 UNREACHABLE(); | |
750 return isolate->heap()->undefined_value(); | |
751 } | |
752 | |
753 // ES #sec-atomics.add | 755 // ES #sec-atomics.add |
754 // Atomics.add( typedArray, index, value ) | 756 // Atomics.add( typedArray, index, value ) |
755 BUILTIN(AtomicsAdd) { | 757 BUILTIN(AtomicsAdd) { |
756 HandleScope scope(isolate); | 758 HandleScope scope(isolate); |
757 Handle<Object> array = args.atOrUndefined(isolate, 1); | 759 Handle<Object> array = args.atOrUndefined(isolate, 1); |
758 Handle<Object> index = args.atOrUndefined(isolate, 2); | 760 Handle<Object> index = args.atOrUndefined(isolate, 2); |
759 Handle<Object> value = args.atOrUndefined(isolate, 3); | 761 Handle<Object> value = args.atOrUndefined(isolate, 3); |
760 | 762 |
761 Handle<JSTypedArray> sta; | 763 Handle<JSTypedArray> sta; |
762 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | 764 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
984 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | 986 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
985 #undef TYPED_ARRAY_CASE | 987 #undef TYPED_ARRAY_CASE |
986 | 988 |
987 default: | 989 default: |
988 break; | 990 break; |
989 } | 991 } |
990 | 992 |
991 UNREACHABLE(); | 993 UNREACHABLE(); |
992 return isolate->heap()->undefined_value(); | 994 return isolate->heap()->undefined_value(); |
993 } | 995 } |
| 996 |
| 997 template <typename T> |
| 998 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { |
| 999 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, |
| 1000 __ATOMIC_SEQ_CST); |
| 1001 return oldval; |
| 1002 } |
| 1003 |
| 1004 template <typename T> |
| 1005 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, |
| 1006 Handle<Object> oldobj, Handle<Object> newobj) { |
| 1007 T oldval = FromObject<T>(oldobj); |
| 1008 T newval = FromObject<T>(newobj); |
| 1009 T result = |
| 1010 CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval); |
| 1011 return ToObject(isolate, result); |
| 1012 } |
| 1013 |
| 1014 // ES #sec-atomics.wait |
| 1015 // Atomics.compareExchange( typedArray, index, expectedValue, replacementValue ) |
| 1016 BUILTIN(AtomicsCompareExchange) { |
| 1017 HandleScope scope(isolate); |
| 1018 Handle<Object> array = args.atOrUndefined(isolate, 1); |
| 1019 Handle<Object> index = args.atOrUndefined(isolate, 2); |
| 1020 Handle<Object> expected_value = args.atOrUndefined(isolate, 3); |
| 1021 Handle<Object> replacement_value = args.atOrUndefined(isolate, 4); |
| 1022 |
| 1023 Handle<JSTypedArray> sta; |
| 1024 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
| 1025 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); |
| 1026 |
| 1027 Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index); |
| 1028 if (maybe_index.IsNothing()) return isolate->heap()->exception(); |
| 1029 size_t i = maybe_index.FromJust(); |
| 1030 |
| 1031 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
| 1032 isolate, expected_value, Object::ToInteger(isolate, expected_value)); |
| 1033 |
| 1034 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
| 1035 isolate, replacement_value, |
| 1036 Object::ToInteger(isolate, replacement_value)); |
| 1037 |
| 1038 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + |
| 1039 NumberToSize(sta->byte_offset()); |
| 1040 |
| 1041 switch (sta->type()) { |
| 1042 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
| 1043 case kExternal##Type##Array: \ |
| 1044 return DoCompareExchange<ctype>(isolate, source, i, expected_value, \ |
| 1045 replacement_value); |
| 1046 |
| 1047 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| 1048 #undef TYPED_ARRAY_CASE |
| 1049 |
| 1050 default: |
| 1051 break; |
| 1052 } |
| 1053 |
| 1054 UNREACHABLE(); |
| 1055 return isolate->heap()->undefined_value(); |
| 1056 } |
994 #endif | 1057 #endif |
995 | 1058 |
996 } // namespace internal | 1059 } // namespace internal |
997 } // namespace v8 | 1060 } // namespace v8 |
OLD | NEW |