| OLD | NEW |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/base/macros.h" | |
| 6 #include "src/base/platform/mutex.h" | |
| 7 #include "src/base/platform/time.h" | |
| 8 #include "src/builtins/builtins-utils.h" | 5 #include "src/builtins/builtins-utils.h" |
| 9 #include "src/builtins/builtins.h" | 6 #include "src/builtins/builtins.h" |
| 10 #include "src/code-factory.h" | 7 #include "src/code-factory.h" |
| 11 #include "src/code-stub-assembler.h" | 8 #include "src/code-stub-assembler.h" |
| 12 #include "src/conversions-inl.h" | |
| 13 #include "src/counters.h" | |
| 14 #include "src/factory.h" | |
| 15 #include "src/futex-emulation.h" | |
| 16 #include "src/globals.h" | |
| 17 #include "src/objects-inl.h" | |
| 18 | 9 |
| 19 namespace v8 { | 10 namespace v8 { |
| 20 namespace internal { | 11 namespace internal { |
| 21 | 12 |
| 22 // ES7 sharedmem 6.3.4.1 get SharedArrayBuffer.prototype.byteLength | 13 // ES7 sharedmem 6.3.4.1 get SharedArrayBuffer.prototype.byteLength |
| 23 BUILTIN(SharedArrayBufferPrototypeGetByteLength) { | 14 BUILTIN(SharedArrayBufferPrototypeGetByteLength) { |
| 24 HandleScope scope(isolate); | 15 HandleScope scope(isolate); |
| 25 CHECK_RECEIVER(JSArrayBuffer, array_buffer, | 16 CHECK_RECEIVER(JSArrayBuffer, array_buffer, |
| 26 "get SharedArrayBuffer.prototype.byteLength"); | 17 "get SharedArrayBuffer.prototype.byteLength"); |
| 27 if (!array_buffer->is_shared()) { | 18 if (!array_buffer->is_shared()) { |
| (...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 268 a.Bind(&u32); | 259 a.Bind(&u32); |
| 269 a.AtomicStore(MachineRepresentation::kWord32, backing_store, | 260 a.AtomicStore(MachineRepresentation::kWord32, backing_store, |
| 270 a.WordShl(index_word, 2), value_word32); | 261 a.WordShl(index_word, 2), value_word32); |
| 271 a.Return(value_integer); | 262 a.Return(value_integer); |
| 272 | 263 |
| 273 // This shouldn't happen, we've already validated the type. | 264 // This shouldn't happen, we've already validated the type. |
| 274 a.Bind(&other); | 265 a.Bind(&other); |
| 275 a.Return(a.SmiConstant(0)); | 266 a.Return(a.SmiConstant(0)); |
| 276 } | 267 } |
| 277 | 268 |
| 278 inline bool AtomicIsLockFree(uint32_t size) { | |
| 279 return size == 1 || size == 2 || size == 4; | |
| 280 } | |
| 281 | |
| 282 // ES #sec-atomics.islockfree | |
| 283 BUILTIN(AtomicsIsLockFree) { | |
| 284 HandleScope scope(isolate); | |
| 285 Handle<Object> size = args.atOrUndefined(isolate, 1); | |
| 286 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, size, Object::ToNumber(size)); | |
| 287 return *isolate->factory()->ToBoolean(AtomicIsLockFree(size->Number())); | |
| 288 } | |
| 289 | |
| 290 // ES #sec-validatesharedintegertypedarray | |
| 291 MUST_USE_RESULT MaybeHandle<JSTypedArray> ValidateSharedIntegerTypedArray( | |
| 292 Isolate* isolate, Handle<Object> object, bool only_int32 = false) { | |
| 293 if (object->IsJSTypedArray()) { | |
| 294 Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object); | |
| 295 if (typed_array->GetBuffer()->is_shared()) { | |
| 296 if (only_int32) { | |
| 297 if (typed_array->type() == kExternalInt32Array) return typed_array; | |
| 298 } else { | |
| 299 if (typed_array->type() != kExternalFloat32Array && | |
| 300 typed_array->type() != kExternalFloat64Array && | |
| 301 typed_array->type() != kExternalUint8ClampedArray) | |
| 302 return typed_array; | |
| 303 } | |
| 304 } | |
| 305 } | |
| 306 | |
| 307 THROW_NEW_ERROR( | |
| 308 isolate, | |
| 309 NewTypeError(only_int32 ? MessageTemplate::kNotInt32SharedTypedArray | |
| 310 : MessageTemplate::kNotIntegerSharedTypedArray, | |
| 311 object), | |
| 312 JSTypedArray); | |
| 313 } | |
| 314 | |
| 315 // ES #sec-validateatomicaccess | |
| 316 // ValidateAtomicAccess( typedArray, requestIndex ) | |
| 317 MUST_USE_RESULT Maybe<size_t> ValidateAtomicAccess( | |
| 318 Isolate* isolate, Handle<JSTypedArray> typed_array, | |
| 319 Handle<Object> request_index) { | |
| 320 // TOOD(v8:5961): Use ToIndex for indexes | |
| 321 ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, request_index, | |
| 322 Object::ToNumber(request_index), | |
| 323 Nothing<size_t>()); | |
| 324 Handle<Object> offset; | |
| 325 ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, offset, | |
| 326 Object::ToInteger(isolate, request_index), | |
| 327 Nothing<size_t>()); | |
| 328 if (!request_index->SameValue(*offset)) { | |
| 329 isolate->Throw(*isolate->factory()->NewRangeError( | |
| 330 MessageTemplate::kInvalidAtomicAccessIndex)); | |
| 331 return Nothing<size_t>(); | |
| 332 } | |
| 333 size_t access_index; | |
| 334 uint32_t length = typed_array->length_value(); | |
| 335 if (!TryNumberToSize(*request_index, &access_index) || | |
| 336 access_index >= length) { | |
| 337 isolate->Throw(*isolate->factory()->NewRangeError( | |
| 338 MessageTemplate::kInvalidAtomicAccessIndex)); | |
| 339 return Nothing<size_t>(); | |
| 340 } | |
| 341 return Just<size_t>(access_index); | |
| 342 } | |
| 343 | |
| 344 // ES #sec-atomics.wake | |
| 345 // Atomics.wake( typedArray, index, count ) | |
| 346 BUILTIN(AtomicsWake) { | |
| 347 HandleScope scope(isolate); | |
| 348 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
| 349 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
| 350 Handle<Object> count = args.atOrUndefined(isolate, 3); | |
| 351 | |
| 352 Handle<JSTypedArray> sta; | |
| 353 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
| 354 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array, true)); | |
| 355 | |
| 356 Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index); | |
| 357 if (maybe_index.IsNothing()) return isolate->heap()->exception(); | |
| 358 size_t i = maybe_index.FromJust(); | |
| 359 | |
| 360 uint32_t c; | |
| 361 if (count->IsUndefined(isolate)) { | |
| 362 c = kMaxUInt32; | |
| 363 } else { | |
| 364 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, count, | |
| 365 Object::ToInteger(isolate, count)); | |
| 366 double count_double = count->Number(); | |
| 367 if (count_double < 0) | |
| 368 count_double = 0; | |
| 369 else if (count_double > kMaxUInt32) | |
| 370 count_double = kMaxUInt32; | |
| 371 c = static_cast<uint32_t>(count_double); | |
| 372 } | |
| 373 | |
| 374 Handle<JSArrayBuffer> array_buffer = sta->GetBuffer(); | |
| 375 size_t addr = (i << 2) + NumberToSize(sta->byte_offset()); | |
| 376 | |
| 377 return FutexEmulation::Wake(isolate, array_buffer, addr, c); | |
| 378 } | |
| 379 | |
| 380 // ES #sec-atomics.wait | |
| 381 // Atomics.wait( typedArray, index, value, timeout ) | |
| 382 BUILTIN(AtomicsWait) { | |
| 383 HandleScope scope(isolate); | |
| 384 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
| 385 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
| 386 Handle<Object> value = args.atOrUndefined(isolate, 3); | |
| 387 Handle<Object> timeout = args.atOrUndefined(isolate, 4); | |
| 388 | |
| 389 Handle<JSTypedArray> sta; | |
| 390 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
| 391 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array, true)); | |
| 392 | |
| 393 Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index); | |
| 394 if (maybe_index.IsNothing()) return isolate->heap()->exception(); | |
| 395 size_t i = maybe_index.FromJust(); | |
| 396 | |
| 397 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, | |
| 398 Object::ToInt32(isolate, value)); | |
| 399 int32_t value_int32 = NumberToInt32(*value); | |
| 400 | |
| 401 double timeout_number; | |
| 402 if (timeout->IsUndefined(isolate)) { | |
| 403 timeout_number = isolate->heap()->infinity_value()->Number(); | |
| 404 } else { | |
| 405 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, timeout, | |
| 406 Object::ToNumber(timeout)); | |
| 407 timeout_number = timeout->Number(); | |
| 408 if (std::isnan(timeout_number)) | |
| 409 timeout_number = isolate->heap()->infinity_value()->Number(); | |
| 410 else if (timeout_number < 0) | |
| 411 timeout_number = 0; | |
| 412 } | |
| 413 | |
| 414 if (!isolate->allow_atomics_wait()) { | |
| 415 THROW_NEW_ERROR_RETURN_FAILURE( | |
| 416 isolate, NewTypeError(MessageTemplate::kAtomicsWaitNotAllowed)); | |
| 417 } | |
| 418 | |
| 419 Handle<JSArrayBuffer> array_buffer = sta->GetBuffer(); | |
| 420 size_t addr = (i << 2) + NumberToSize(sta->byte_offset()); | |
| 421 | |
| 422 return FutexEmulation::Wait(isolate, array_buffer, addr, value_int32, | |
| 423 timeout_number); | |
| 424 } | |
| 425 | |
| 426 namespace { | |
| 427 | |
| 428 #if V8_CC_GNU | |
| 429 | |
| 430 template <typename T> | |
| 431 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { | |
| 432 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, | |
| 433 __ATOMIC_SEQ_CST); | |
| 434 return oldval; | |
| 435 } | |
| 436 | |
| 437 template <typename T> | |
| 438 inline T AddSeqCst(T* p, T value) { | |
| 439 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); | |
| 440 } | |
| 441 | |
| 442 template <typename T> | |
| 443 inline T SubSeqCst(T* p, T value) { | |
| 444 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST); | |
| 445 } | |
| 446 | |
| 447 template <typename T> | |
| 448 inline T AndSeqCst(T* p, T value) { | |
| 449 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST); | |
| 450 } | |
| 451 | |
| 452 template <typename T> | |
| 453 inline T OrSeqCst(T* p, T value) { | |
| 454 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); | |
| 455 } | |
| 456 | |
| 457 template <typename T> | |
| 458 inline T XorSeqCst(T* p, T value) { | |
| 459 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); | |
| 460 } | |
| 461 | |
| 462 template <typename T> | |
| 463 inline T ExchangeSeqCst(T* p, T value) { | |
| 464 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); | |
| 465 } | |
| 466 | |
| 467 #elif V8_CC_MSVC | |
| 468 | |
| 469 #define InterlockedCompareExchange32 _InterlockedCompareExchange | |
| 470 #define InterlockedExchange32 _InterlockedExchange | |
| 471 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd | |
| 472 #define InterlockedAnd32 _InterlockedAnd | |
| 473 #define InterlockedOr32 _InterlockedOr | |
| 474 #define InterlockedXor32 _InterlockedXor | |
| 475 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 | |
| 476 #define InterlockedCompareExchange8 _InterlockedCompareExchange8 | |
| 477 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 | |
| 478 | |
| 479 #define ATOMIC_OPS(type, suffix, vctype) \ | |
| 480 inline type AddSeqCst(type* p, type value) { \ | |
| 481 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ | |
| 482 bit_cast<vctype>(value)); \ | |
| 483 } \ | |
| 484 inline type SubSeqCst(type* p, type value) { \ | |
| 485 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ | |
| 486 -bit_cast<vctype>(value)); \ | |
| 487 } \ | |
| 488 inline type AndSeqCst(type* p, type value) { \ | |
| 489 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ | |
| 490 bit_cast<vctype>(value)); \ | |
| 491 } \ | |
| 492 inline type OrSeqCst(type* p, type value) { \ | |
| 493 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ | |
| 494 bit_cast<vctype>(value)); \ | |
| 495 } \ | |
| 496 inline type XorSeqCst(type* p, type value) { \ | |
| 497 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ | |
| 498 bit_cast<vctype>(value)); \ | |
| 499 } \ | |
| 500 inline type ExchangeSeqCst(type* p, type value) { \ | |
| 501 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ | |
| 502 bit_cast<vctype>(value)); \ | |
| 503 } \ | |
| 504 \ | |
| 505 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ | |
| 506 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ | |
| 507 bit_cast<vctype>(newval), \ | |
| 508 bit_cast<vctype>(oldval)); \ | |
| 509 } | |
| 510 | |
| 511 ATOMIC_OPS(int8_t, 8, char) | |
| 512 ATOMIC_OPS(uint8_t, 8, char) | |
| 513 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ | |
| 514 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ | |
| 515 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ | |
| 516 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ | |
| 517 | |
| 518 #undef ATOMIC_OPS_INTEGER | |
| 519 #undef ATOMIC_OPS | |
| 520 | |
| 521 #undef InterlockedCompareExchange32 | |
| 522 #undef InterlockedExchange32 | |
| 523 #undef InterlockedExchangeAdd32 | |
| 524 #undef InterlockedAnd32 | |
| 525 #undef InterlockedOr32 | |
| 526 #undef InterlockedXor32 | |
| 527 #undef InterlockedExchangeAdd16 | |
| 528 #undef InterlockedCompareExchange8 | |
| 529 #undef InterlockedExchangeAdd8 | |
| 530 | |
| 531 #else | |
| 532 | |
| 533 #error Unsupported platform! | |
| 534 | |
| 535 #endif | |
| 536 | |
| 537 template <typename T> | |
| 538 T FromObject(Handle<Object> number); | |
| 539 | |
| 540 template <> | |
| 541 inline uint8_t FromObject<uint8_t>(Handle<Object> number) { | |
| 542 return NumberToUint32(*number); | |
| 543 } | |
| 544 | |
| 545 template <> | |
| 546 inline int8_t FromObject<int8_t>(Handle<Object> number) { | |
| 547 return NumberToInt32(*number); | |
| 548 } | |
| 549 | |
| 550 template <> | |
| 551 inline uint16_t FromObject<uint16_t>(Handle<Object> number) { | |
| 552 return NumberToUint32(*number); | |
| 553 } | |
| 554 | |
| 555 template <> | |
| 556 inline int16_t FromObject<int16_t>(Handle<Object> number) { | |
| 557 return NumberToInt32(*number); | |
| 558 } | |
| 559 | |
| 560 template <> | |
| 561 inline uint32_t FromObject<uint32_t>(Handle<Object> number) { | |
| 562 return NumberToUint32(*number); | |
| 563 } | |
| 564 | |
| 565 template <> | |
| 566 inline int32_t FromObject<int32_t>(Handle<Object> number) { | |
| 567 return NumberToInt32(*number); | |
| 568 } | |
| 569 | |
| 570 inline Object* ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); } | |
| 571 | |
| 572 inline Object* ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); } | |
| 573 | |
| 574 inline Object* ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); } | |
| 575 | |
| 576 inline Object* ToObject(Isolate* isolate, uint16_t t) { | |
| 577 return Smi::FromInt(t); | |
| 578 } | |
| 579 | |
| 580 inline Object* ToObject(Isolate* isolate, int32_t t) { | |
| 581 return *isolate->factory()->NewNumber(t); | |
| 582 } | |
| 583 | |
| 584 inline Object* ToObject(Isolate* isolate, uint32_t t) { | |
| 585 return *isolate->factory()->NewNumber(t); | |
| 586 } | |
| 587 | |
| 588 template <typename T> | |
| 589 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, | |
| 590 Handle<Object> oldobj, Handle<Object> newobj) { | |
| 591 T oldval = FromObject<T>(oldobj); | |
| 592 T newval = FromObject<T>(newobj); | |
| 593 T result = | |
| 594 CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval); | |
| 595 return ToObject(isolate, result); | |
| 596 } | |
| 597 | |
| 598 template <typename T> | |
| 599 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, | |
| 600 Handle<Object> obj) { | |
| 601 T value = FromObject<T>(obj); | |
| 602 T result = AddSeqCst(static_cast<T*>(buffer) + index, value); | |
| 603 return ToObject(isolate, result); | |
| 604 } | |
| 605 | |
| 606 template <typename T> | |
| 607 inline Object* DoSub(Isolate* isolate, void* buffer, size_t index, | |
| 608 Handle<Object> obj) { | |
| 609 T value = FromObject<T>(obj); | |
| 610 T result = SubSeqCst(static_cast<T*>(buffer) + index, value); | |
| 611 return ToObject(isolate, result); | |
| 612 } | |
| 613 | |
| 614 template <typename T> | |
| 615 inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index, | |
| 616 Handle<Object> obj) { | |
| 617 T value = FromObject<T>(obj); | |
| 618 T result = AndSeqCst(static_cast<T*>(buffer) + index, value); | |
| 619 return ToObject(isolate, result); | |
| 620 } | |
| 621 | |
| 622 template <typename T> | |
| 623 inline Object* DoOr(Isolate* isolate, void* buffer, size_t index, | |
| 624 Handle<Object> obj) { | |
| 625 T value = FromObject<T>(obj); | |
| 626 T result = OrSeqCst(static_cast<T*>(buffer) + index, value); | |
| 627 return ToObject(isolate, result); | |
| 628 } | |
| 629 | |
| 630 template <typename T> | |
| 631 inline Object* DoXor(Isolate* isolate, void* buffer, size_t index, | |
| 632 Handle<Object> obj) { | |
| 633 T value = FromObject<T>(obj); | |
| 634 T result = XorSeqCst(static_cast<T*>(buffer) + index, value); | |
| 635 return ToObject(isolate, result); | |
| 636 } | |
| 637 | |
| 638 template <typename T> | |
| 639 inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index, | |
| 640 Handle<Object> obj) { | |
| 641 T value = FromObject<T>(obj); | |
| 642 T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value); | |
| 643 return ToObject(isolate, result); | |
| 644 } | |
| 645 | |
| 646 } // anonymous namespace | |
| 647 | |
| 648 // Duplicated from objects.h | |
| 649 // V has parameters (Type, type, TYPE, C type, element_size) | |
| 650 #define INTEGER_TYPED_ARRAYS(V) \ | |
| 651 V(Uint8, uint8, UINT8, uint8_t, 1) \ | |
| 652 V(Int8, int8, INT8, int8_t, 1) \ | |
| 653 V(Uint16, uint16, UINT16, uint16_t, 2) \ | |
| 654 V(Int16, int16, INT16, int16_t, 2) \ | |
| 655 V(Uint32, uint32, UINT32, uint32_t, 4) \ | |
| 656 V(Int32, int32, INT32, int32_t, 4) | |
| 657 | |
| 658 // ES #sec-atomics.wait | |
| 659 // Atomics.compareExchange( typedArray, index, expectedValue, replacementValue ) | |
| 660 BUILTIN(AtomicsCompareExchange) { | |
| 661 HandleScope scope(isolate); | |
| 662 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
| 663 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
| 664 Handle<Object> expected_value = args.atOrUndefined(isolate, 3); | |
| 665 Handle<Object> replacement_value = args.atOrUndefined(isolate, 4); | |
| 666 | |
| 667 Handle<JSTypedArray> sta; | |
| 668 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
| 669 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); | |
| 670 | |
| 671 Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index); | |
| 672 if (maybe_index.IsNothing()) return isolate->heap()->exception(); | |
| 673 size_t i = maybe_index.FromJust(); | |
| 674 | |
| 675 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
| 676 isolate, expected_value, Object::ToInteger(isolate, expected_value)); | |
| 677 | |
| 678 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
| 679 isolate, replacement_value, | |
| 680 Object::ToInteger(isolate, replacement_value)); | |
| 681 | |
| 682 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
| 683 NumberToSize(sta->byte_offset()); | |
| 684 | |
| 685 switch (sta->type()) { | |
| 686 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
| 687 case kExternal##Type##Array: \ | |
| 688 return DoCompareExchange<ctype>(isolate, source, i, expected_value, \ | |
| 689 replacement_value); | |
| 690 | |
| 691 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
| 692 #undef TYPED_ARRAY_CASE | |
| 693 | |
| 694 default: | |
| 695 break; | |
| 696 } | |
| 697 | |
| 698 UNREACHABLE(); | |
| 699 return isolate->heap()->undefined_value(); | |
| 700 } | |
| 701 | |
| 702 // ES #sec-atomics.add | |
| 703 // Atomics.add( typedArray, index, value ) | |
| 704 BUILTIN(AtomicsAdd) { | |
| 705 HandleScope scope(isolate); | |
| 706 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
| 707 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
| 708 Handle<Object> value = args.atOrUndefined(isolate, 3); | |
| 709 | |
| 710 Handle<JSTypedArray> sta; | |
| 711 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
| 712 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); | |
| 713 | |
| 714 Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index); | |
| 715 if (maybe_index.IsNothing()) return isolate->heap()->exception(); | |
| 716 size_t i = maybe_index.FromJust(); | |
| 717 | |
| 718 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, | |
| 719 Object::ToInteger(isolate, value)); | |
| 720 | |
| 721 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
| 722 NumberToSize(sta->byte_offset()); | |
| 723 | |
| 724 switch (sta->type()) { | |
| 725 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
| 726 case kExternal##Type##Array: \ | |
| 727 return DoAdd<ctype>(isolate, source, i, value); | |
| 728 | |
| 729 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
| 730 #undef TYPED_ARRAY_CASE | |
| 731 | |
| 732 default: | |
| 733 break; | |
| 734 } | |
| 735 | |
| 736 UNREACHABLE(); | |
| 737 return isolate->heap()->undefined_value(); | |
| 738 } | |
| 739 | |
| 740 // ES #sec-atomics.sub | |
| 741 // Atomics.sub( typedArray, index, value ) | |
| 742 BUILTIN(AtomicsSub) { | |
| 743 HandleScope scope(isolate); | |
| 744 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
| 745 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
| 746 Handle<Object> value = args.atOrUndefined(isolate, 3); | |
| 747 | |
| 748 Handle<JSTypedArray> sta; | |
| 749 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
| 750 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); | |
| 751 | |
| 752 Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index); | |
| 753 if (maybe_index.IsNothing()) return isolate->heap()->exception(); | |
| 754 size_t i = maybe_index.FromJust(); | |
| 755 | |
| 756 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, | |
| 757 Object::ToInteger(isolate, value)); | |
| 758 | |
| 759 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
| 760 NumberToSize(sta->byte_offset()); | |
| 761 | |
| 762 switch (sta->type()) { | |
| 763 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
| 764 case kExternal##Type##Array: \ | |
| 765 return DoSub<ctype>(isolate, source, i, value); | |
| 766 | |
| 767 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
| 768 #undef TYPED_ARRAY_CASE | |
| 769 | |
| 770 default: | |
| 771 break; | |
| 772 } | |
| 773 | |
| 774 UNREACHABLE(); | |
| 775 return isolate->heap()->undefined_value(); | |
| 776 } | |
| 777 | |
| 778 // ES #sec-atomics.and | |
| 779 // Atomics.and( typedArray, index, value ) | |
| 780 BUILTIN(AtomicsAnd) { | |
| 781 HandleScope scope(isolate); | |
| 782 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
| 783 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
| 784 Handle<Object> value = args.atOrUndefined(isolate, 3); | |
| 785 | |
| 786 Handle<JSTypedArray> sta; | |
| 787 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
| 788 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); | |
| 789 | |
| 790 Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index); | |
| 791 if (maybe_index.IsNothing()) return isolate->heap()->exception(); | |
| 792 size_t i = maybe_index.FromJust(); | |
| 793 | |
| 794 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, | |
| 795 Object::ToInteger(isolate, value)); | |
| 796 | |
| 797 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
| 798 NumberToSize(sta->byte_offset()); | |
| 799 | |
| 800 switch (sta->type()) { | |
| 801 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
| 802 case kExternal##Type##Array: \ | |
| 803 return DoAnd<ctype>(isolate, source, i, value); | |
| 804 | |
| 805 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
| 806 #undef TYPED_ARRAY_CASE | |
| 807 | |
| 808 default: | |
| 809 break; | |
| 810 } | |
| 811 | |
| 812 UNREACHABLE(); | |
| 813 return isolate->heap()->undefined_value(); | |
| 814 } | |
| 815 | |
| 816 // ES #sec-atomics.or | |
| 817 // Atomics.or( typedArray, index, value ) | |
| 818 BUILTIN(AtomicsOr) { | |
| 819 HandleScope scope(isolate); | |
| 820 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
| 821 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
| 822 Handle<Object> value = args.atOrUndefined(isolate, 3); | |
| 823 | |
| 824 Handle<JSTypedArray> sta; | |
| 825 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
| 826 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); | |
| 827 | |
| 828 Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index); | |
| 829 if (maybe_index.IsNothing()) return isolate->heap()->exception(); | |
| 830 size_t i = maybe_index.FromJust(); | |
| 831 | |
| 832 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, | |
| 833 Object::ToInteger(isolate, value)); | |
| 834 | |
| 835 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
| 836 NumberToSize(sta->byte_offset()); | |
| 837 | |
| 838 switch (sta->type()) { | |
| 839 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
| 840 case kExternal##Type##Array: \ | |
| 841 return DoOr<ctype>(isolate, source, i, value); | |
| 842 | |
| 843 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
| 844 #undef TYPED_ARRAY_CASE | |
| 845 | |
| 846 default: | |
| 847 break; | |
| 848 } | |
| 849 | |
| 850 UNREACHABLE(); | |
| 851 return isolate->heap()->undefined_value(); | |
| 852 } | |
| 853 | |
| 854 // ES #sec-atomics.xor | |
| 855 // Atomics.xor( typedArray, index, value ) | |
| 856 BUILTIN(AtomicsXor) { | |
| 857 HandleScope scope(isolate); | |
| 858 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
| 859 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
| 860 Handle<Object> value = args.atOrUndefined(isolate, 3); | |
| 861 | |
| 862 Handle<JSTypedArray> sta; | |
| 863 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
| 864 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); | |
| 865 | |
| 866 Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index); | |
| 867 if (maybe_index.IsNothing()) return isolate->heap()->exception(); | |
| 868 size_t i = maybe_index.FromJust(); | |
| 869 | |
| 870 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, | |
| 871 Object::ToInteger(isolate, value)); | |
| 872 | |
| 873 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
| 874 NumberToSize(sta->byte_offset()); | |
| 875 | |
| 876 switch (sta->type()) { | |
| 877 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
| 878 case kExternal##Type##Array: \ | |
| 879 return DoXor<ctype>(isolate, source, i, value); | |
| 880 | |
| 881 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
| 882 #undef TYPED_ARRAY_CASE | |
| 883 | |
| 884 default: | |
| 885 break; | |
| 886 } | |
| 887 | |
| 888 UNREACHABLE(); | |
| 889 return isolate->heap()->undefined_value(); | |
| 890 } | |
| 891 | |
| 892 // ES #sec-atomics.exchange | |
| 893 // Atomics.exchange( typedArray, index, value ) | |
| 894 BUILTIN(AtomicsExchange) { | |
| 895 HandleScope scope(isolate); | |
| 896 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
| 897 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
| 898 Handle<Object> value = args.atOrUndefined(isolate, 3); | |
| 899 | |
| 900 Handle<JSTypedArray> sta; | |
| 901 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
| 902 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); | |
| 903 | |
| 904 Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index); | |
| 905 if (maybe_index.IsNothing()) return isolate->heap()->exception(); | |
| 906 size_t i = maybe_index.FromJust(); | |
| 907 | |
| 908 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, | |
| 909 Object::ToInteger(isolate, value)); | |
| 910 | |
| 911 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
| 912 NumberToSize(sta->byte_offset()); | |
| 913 | |
| 914 switch (sta->type()) { | |
| 915 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
| 916 case kExternal##Type##Array: \ | |
| 917 return DoExchange<ctype>(isolate, source, i, value); | |
| 918 | |
| 919 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
| 920 #undef TYPED_ARRAY_CASE | |
| 921 | |
| 922 default: | |
| 923 break; | |
| 924 } | |
| 925 | |
| 926 UNREACHABLE(); | |
| 927 return isolate->heap()->undefined_value(); | |
| 928 } | |
| 929 | |
| 930 } // namespace internal | 269 } // namespace internal |
| 931 } // namespace v8 | 270 } // namespace v8 |
| OLD | NEW |