OLD | NEW |
1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/base/macros.h" | 5 #include "src/base/macros.h" |
6 #include "src/base/platform/mutex.h" | 6 #include "src/base/platform/mutex.h" |
7 #include "src/base/platform/time.h" | 7 #include "src/base/platform/time.h" |
8 #include "src/builtins/builtins-utils.h" | 8 #include "src/builtins/builtins-utils.h" |
9 #include "src/builtins/builtins.h" | 9 #include "src/builtins/builtins.h" |
10 #include "src/code-factory.h" | 10 #include "src/code-factory.h" |
(...skipping 255 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
266 a.Bind(&u32); | 266 a.Bind(&u32); |
267 a.AtomicStore(MachineRepresentation::kWord32, backing_store, | 267 a.AtomicStore(MachineRepresentation::kWord32, backing_store, |
268 a.WordShl(index_word, 2), value_word32); | 268 a.WordShl(index_word, 2), value_word32); |
269 a.Return(value_integer); | 269 a.Return(value_integer); |
270 | 270 |
271 // This shouldn't happen, we've already validated the type. | 271 // This shouldn't happen, we've already validated the type. |
272 a.Bind(&other); | 272 a.Bind(&other); |
273 a.Return(a.SmiConstant(0)); | 273 a.Return(a.SmiConstant(0)); |
274 } | 274 } |
275 | 275 |
| 276 void Builtins::Generate_AtomicsExchange(compiler::CodeAssemblerState* state) { |
| 277 using compiler::Node; |
| 278 CodeStubAssembler a(state); |
| 279 Node* array = a.Parameter(1); |
| 280 Node* index = a.Parameter(2); |
| 281 Node* value = a.Parameter(3); |
| 282 Node* context = a.Parameter(4 + 2); |
| 283 |
| 284 Node* instance_type; |
| 285 Node* backing_store; |
| 286 ValidateSharedTypedArray(&a, array, context, &instance_type, &backing_store); |
| 287 |
| 288 Node* index_word32 = ConvertTaggedAtomicIndexToWord32(&a, index, context); |
| 289 Node* array_length_word32 = a.TruncateTaggedToWord32( |
| 290 context, a.LoadObjectField(array, JSTypedArray::kLengthOffset)); |
| 291 ValidateAtomicIndex(&a, index_word32, array_length_word32, context); |
| 292 Node* index_word = a.ChangeUint32ToWord(index_word32); |
| 293 |
| 294 Node* value_integer = a.ToInteger(context, value); |
| 295 Node* value_word32 = a.TruncateTaggedToWord32(context, value_integer); |
| 296 |
| 297 CodeStubAssembler::Label i8(&a), u8(&a), i16(&a), u16(&a), i32(&a), u32(&a), |
| 298 other(&a); |
| 299 int32_t case_values[] = { |
| 300 FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE, |
| 301 FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE, |
| 302 }; |
| 303 CodeStubAssembler::Label* case_labels[] = { |
| 304 &i8, &u8, &i16, &u16, &i32, &u32, |
| 305 }; |
| 306 a.Switch(instance_type, &other, case_values, case_labels, |
| 307 arraysize(case_labels)); |
| 308 |
| 309 a.Bind(&i8); |
| 310 a.Return(a.SmiFromWord32(a.AtomicExchange(MachineType::Int8(), backing_store, |
| 311 index_word, value_word32))); |
| 312 |
| 313 a.Bind(&u8); |
| 314 a.Return(a.SmiFromWord32(a.AtomicExchange(MachineType::Uint8(), backing_store, |
| 315 index_word, value_word32))); |
| 316 |
| 317 a.Bind(&i16); |
| 318 a.Return(a.SmiFromWord32(a.AtomicExchange(MachineType::Int16(), backing_store, |
| 319 a.WordShl(index_word, 1), |
| 320 value_word32))); |
| 321 |
| 322 a.Bind(&u16); |
| 323 a.Return(a.SmiFromWord32( |
| 324 a.AtomicExchange(MachineType::Uint16(), backing_store, |
| 325 a.WordShl(index_word, 1), value_word32))); |
| 326 |
| 327 a.Bind(&i32); |
| 328 a.Return(a.ChangeInt32ToTagged( |
| 329 a.AtomicExchange(MachineType::Int32(), backing_store, |
| 330 a.WordShl(index_word, 2), value_word32))); |
| 331 |
| 332 a.Bind(&u32); |
| 333 a.Return(a.ChangeUint32ToTagged( |
| 334 a.AtomicExchange(MachineType::Uint32(), backing_store, |
| 335 a.WordShl(index_word, 2), value_word32))); |
| 336 |
| 337 // This shouldn't happen, we've already validated the type. |
| 338 a.Bind(&other); |
| 339 a.Return(a.SmiConstant(0)); |
| 340 } |
| 341 |
276 inline bool AtomicIsLockFree(uint32_t size) { | 342 inline bool AtomicIsLockFree(uint32_t size) { |
277 return size == 1 || size == 2 || size == 4; | 343 return size == 1 || size == 2 || size == 4; |
278 } | 344 } |
279 | 345 |
280 // ES #sec-atomics.islockfree | 346 // ES #sec-atomics.islockfree |
281 BUILTIN(AtomicsIsLockFree) { | 347 BUILTIN(AtomicsIsLockFree) { |
282 HandleScope scope(isolate); | 348 HandleScope scope(isolate); |
283 Handle<Object> size = args.atOrUndefined(isolate, 1); | 349 Handle<Object> size = args.atOrUndefined(isolate, 1); |
284 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, size, Object::ToNumber(size)); | 350 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, size, Object::ToNumber(size)); |
285 return *isolate->factory()->ToBoolean(AtomicIsLockFree(size->Number())); | 351 return *isolate->factory()->ToBoolean(AtomicIsLockFree(size->Number())); |
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
447 template <typename T> | 513 template <typename T> |
448 inline T OrSeqCst(T* p, T value) { | 514 inline T OrSeqCst(T* p, T value) { |
449 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); | 515 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); |
450 } | 516 } |
451 | 517 |
452 template <typename T> | 518 template <typename T> |
453 inline T XorSeqCst(T* p, T value) { | 519 inline T XorSeqCst(T* p, T value) { |
454 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); | 520 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); |
455 } | 521 } |
456 | 522 |
457 template <typename T> | |
458 inline T ExchangeSeqCst(T* p, T value) { | |
459 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); | |
460 } | |
461 | 523 |
462 #elif V8_CC_MSVC | 524 #elif V8_CC_MSVC |
463 | 525 |
464 #define InterlockedCompareExchange32 _InterlockedCompareExchange | 526 #define InterlockedCompareExchange32 _InterlockedCompareExchange |
465 #define InterlockedExchange32 _InterlockedExchange | |
466 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd | 527 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd |
467 #define InterlockedAnd32 _InterlockedAnd | 528 #define InterlockedAnd32 _InterlockedAnd |
468 #define InterlockedOr32 _InterlockedOr | 529 #define InterlockedOr32 _InterlockedOr |
469 #define InterlockedXor32 _InterlockedXor | 530 #define InterlockedXor32 _InterlockedXor |
470 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 | 531 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 |
471 #define InterlockedCompareExchange8 _InterlockedCompareExchange8 | 532 #define InterlockedCompareExchange8 _InterlockedCompareExchange8 |
472 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 | 533 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 |
473 | 534 |
474 #define ATOMIC_OPS(type, suffix, vctype) \ | 535 #define ATOMIC_OPS(type, suffix, vctype) \ |
475 inline type AddSeqCst(type* p, type value) { \ | 536 inline type AddSeqCst(type* p, type value) { \ |
476 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ | 537 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
477 bit_cast<vctype>(value)); \ | 538 bit_cast<vctype>(value)); \ |
478 } \ | 539 } \ |
479 inline type SubSeqCst(type* p, type value) { \ | 540 inline type SubSeqCst(type* p, type value) { \ |
480 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ | 541 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
481 -bit_cast<vctype>(value)); \ | 542 -bit_cast<vctype>(value)); \ |
482 } \ | 543 } \ |
483 inline type AndSeqCst(type* p, type value) { \ | 544 inline type AndSeqCst(type* p, type value) { \ |
484 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ | 545 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ |
485 bit_cast<vctype>(value)); \ | 546 bit_cast<vctype>(value)); \ |
486 } \ | 547 } \ |
487 inline type OrSeqCst(type* p, type value) { \ | 548 inline type OrSeqCst(type* p, type value) { \ |
488 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ | 549 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ |
489 bit_cast<vctype>(value)); \ | 550 bit_cast<vctype>(value)); \ |
490 } \ | 551 } \ |
491 inline type XorSeqCst(type* p, type value) { \ | 552 inline type XorSeqCst(type* p, type value) { \ |
492 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ | 553 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ |
493 bit_cast<vctype>(value)); \ | 554 bit_cast<vctype>(value)); \ |
494 } \ | 555 } \ |
495 inline type ExchangeSeqCst(type* p, type value) { \ | |
496 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ | |
497 bit_cast<vctype>(value)); \ | |
498 } \ | |
499 \ | 556 \ |
500 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ | 557 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ |
501 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ | 558 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ |
502 bit_cast<vctype>(newval), \ | 559 bit_cast<vctype>(newval), \ |
503 bit_cast<vctype>(oldval)); \ | 560 bit_cast<vctype>(oldval)); \ |
504 } | 561 } |
505 | 562 |
506 ATOMIC_OPS(int8_t, 8, char) | 563 ATOMIC_OPS(int8_t, 8, char) |
507 ATOMIC_OPS(uint8_t, 8, char) | 564 ATOMIC_OPS(uint8_t, 8, char) |
508 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ | 565 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ |
509 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ | 566 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ |
510 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ | 567 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ |
511 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ | 568 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ |
512 | 569 |
513 #undef ATOMIC_OPS_INTEGER | 570 #undef ATOMIC_OPS_INTEGER |
514 #undef ATOMIC_OPS | 571 #undef ATOMIC_OPS |
515 | 572 |
516 #undef InterlockedCompareExchange32 | 573 #undef InterlockedCompareExchange32 |
517 #undef InterlockedExchange32 | |
518 #undef InterlockedExchangeAdd32 | 574 #undef InterlockedExchangeAdd32 |
519 #undef InterlockedAnd32 | 575 #undef InterlockedAnd32 |
520 #undef InterlockedOr32 | 576 #undef InterlockedOr32 |
521 #undef InterlockedXor32 | 577 #undef InterlockedXor32 |
522 #undef InterlockedExchangeAdd16 | 578 #undef InterlockedExchangeAdd16 |
523 #undef InterlockedCompareExchange8 | 579 #undef InterlockedCompareExchange8 |
524 #undef InterlockedExchangeAdd8 | 580 #undef InterlockedExchangeAdd8 |
525 | 581 |
526 #else | 582 #else |
527 | 583 |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
623 } | 679 } |
624 | 680 |
625 template <typename T> | 681 template <typename T> |
626 inline Object* DoXor(Isolate* isolate, void* buffer, size_t index, | 682 inline Object* DoXor(Isolate* isolate, void* buffer, size_t index, |
627 Handle<Object> obj) { | 683 Handle<Object> obj) { |
628 T value = FromObject<T>(obj); | 684 T value = FromObject<T>(obj); |
629 T result = XorSeqCst(static_cast<T*>(buffer) + index, value); | 685 T result = XorSeqCst(static_cast<T*>(buffer) + index, value); |
630 return ToObject(isolate, result); | 686 return ToObject(isolate, result); |
631 } | 687 } |
632 | 688 |
633 template <typename T> | |
634 inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index, | |
635 Handle<Object> obj) { | |
636 T value = FromObject<T>(obj); | |
637 T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value); | |
638 return ToObject(isolate, result); | |
639 } | |
640 | |
641 // Uint8Clamped functions | 689 // Uint8Clamped functions |
642 | 690 |
643 uint8_t ClampToUint8(int32_t value) { | 691 uint8_t ClampToUint8(int32_t value) { |
644 if (value < 0) return 0; | 692 if (value < 0) return 0; |
645 if (value > 255) return 255; | 693 if (value > 255) return 255; |
646 return value; | 694 return value; |
647 } | 695 } |
648 | 696 |
649 inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer, | 697 inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer, |
650 size_t index, | 698 size_t index, |
(...skipping 23 matching lines...) Expand all Loading... |
674 } | 722 } |
675 | 723 |
676 DO_UINT8_CLAMPED_OP(Add, +) | 724 DO_UINT8_CLAMPED_OP(Add, +) |
677 DO_UINT8_CLAMPED_OP(Sub, -) | 725 DO_UINT8_CLAMPED_OP(Sub, -) |
678 DO_UINT8_CLAMPED_OP(And, &) | 726 DO_UINT8_CLAMPED_OP(And, &) |
679 DO_UINT8_CLAMPED_OP(Or, |) | 727 DO_UINT8_CLAMPED_OP(Or, |) |
680 DO_UINT8_CLAMPED_OP(Xor, ^) | 728 DO_UINT8_CLAMPED_OP(Xor, ^) |
681 | 729 |
682 #undef DO_UINT8_CLAMPED_OP | 730 #undef DO_UINT8_CLAMPED_OP |
683 | 731 |
684 inline Object* DoExchangeUint8Clamped(Isolate* isolate, void* buffer, | |
685 size_t index, Handle<Object> obj) { | |
686 typedef int32_t convert_type; | |
687 uint8_t* p = static_cast<uint8_t*>(buffer) + index; | |
688 uint8_t result = ClampToUint8(FromObject<convert_type>(obj)); | |
689 uint8_t expected; | |
690 do { | |
691 expected = *p; | |
692 } while (CompareExchangeSeqCst(p, expected, result) != expected); | |
693 return ToObject(isolate, expected); | |
694 } | |
695 | 732 |
696 } // anonymous namespace | 733 } // anonymous namespace |
697 | 734 |
698 // Duplicated from objects.h | 735 // Duplicated from objects.h |
699 // V has parameters (Type, type, TYPE, C type, element_size) | 736 // V has parameters (Type, type, TYPE, C type, element_size) |
700 #define INTEGER_TYPED_ARRAYS(V) \ | 737 #define INTEGER_TYPED_ARRAYS(V) \ |
701 V(Uint8, uint8, UINT8, uint8_t, 1) \ | 738 V(Uint8, uint8, UINT8, uint8_t, 1) \ |
702 V(Int8, int8, INT8, int8_t, 1) \ | 739 V(Int8, int8, INT8, int8_t, 1) \ |
703 V(Uint16, uint16, UINT16, uint16_t, 2) \ | 740 V(Uint16, uint16, UINT16, uint16_t, 2) \ |
704 V(Int16, int16, INT16, int16_t, 2) \ | 741 V(Int16, int16, INT16, int16_t, 2) \ |
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
950 return DoXorUint8Clamped(isolate, source, i, value); | 987 return DoXorUint8Clamped(isolate, source, i, value); |
951 | 988 |
952 default: | 989 default: |
953 break; | 990 break; |
954 } | 991 } |
955 | 992 |
956 UNREACHABLE(); | 993 UNREACHABLE(); |
957 return isolate->heap()->undefined_value(); | 994 return isolate->heap()->undefined_value(); |
958 } | 995 } |
959 | 996 |
960 // ES #sec-atomics.exchange | |
961 // Atomics.exchange( typedArray, index, value ) | |
962 BUILTIN(AtomicsExchange) { | |
963 HandleScope scope(isolate); | |
964 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
965 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
966 Handle<Object> value = args.atOrUndefined(isolate, 3); | |
967 | |
968 Handle<JSTypedArray> sta; | |
969 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
970 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); | |
971 | |
972 Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); | |
973 if (maybeIndex.IsNothing()) return isolate->heap()->exception(); | |
974 size_t i = maybeIndex.FromJust(); | |
975 | |
976 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, | |
977 Object::ToInteger(isolate, value)); | |
978 | |
979 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
980 NumberToSize(sta->byte_offset()); | |
981 | |
982 switch (sta->type()) { | |
983 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
984 case kExternal##Type##Array: \ | |
985 return DoExchange<ctype>(isolate, source, i, value); | |
986 | |
987 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
988 #undef TYPED_ARRAY_CASE | |
989 | |
990 case kExternalUint8ClampedArray: | |
991 return DoExchangeUint8Clamped(isolate, source, i, value); | |
992 | |
993 default: | |
994 break; | |
995 } | |
996 | |
997 UNREACHABLE(); | |
998 return isolate->heap()->undefined_value(); | |
999 } | |
1000 | 997 |
1001 } // namespace internal | 998 } // namespace internal |
1002 } // namespace v8 | 999 } // namespace v8 |
OLD | NEW |