OLD | NEW |
---|---|
1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/base/macros.h" | |
6 #include "src/base/platform/mutex.h" | |
7 #include "src/base/platform/time.h" | |
5 #include "src/builtins/builtins-utils.h" | 8 #include "src/builtins/builtins-utils.h" |
6 #include "src/builtins/builtins.h" | 9 #include "src/builtins/builtins.h" |
7 #include "src/code-factory.h" | 10 #include "src/code-factory.h" |
8 #include "src/code-stub-assembler.h" | 11 #include "src/code-stub-assembler.h" |
12 #include "src/conversions-inl.h" | |
13 #include "src/factory.h" | |
14 #include "src/futex-emulation.h" | |
15 #include "src/globals.h" | |
9 | 16 |
10 namespace v8 { | 17 namespace v8 { |
11 namespace internal { | 18 namespace internal { |
12 | 19 |
13 // ES7 sharedmem 6.3.4.1 get SharedArrayBuffer.prototype.byteLength | 20 // ES7 sharedmem 6.3.4.1 get SharedArrayBuffer.prototype.byteLength |
14 BUILTIN(SharedArrayBufferPrototypeGetByteLength) { | 21 BUILTIN(SharedArrayBufferPrototypeGetByteLength) { |
15 HandleScope scope(isolate); | 22 HandleScope scope(isolate); |
16 CHECK_RECEIVER(JSArrayBuffer, array_buffer, | 23 CHECK_RECEIVER(JSArrayBuffer, array_buffer, |
17 "get SharedArrayBuffer.prototype.byteLength"); | 24 "get SharedArrayBuffer.prototype.byteLength"); |
18 if (!array_buffer->is_shared()) { | 25 if (!array_buffer->is_shared()) { |
(...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
259 a.Bind(&u32); | 266 a.Bind(&u32); |
260 a.AtomicStore(MachineRepresentation::kWord32, backing_store, | 267 a.AtomicStore(MachineRepresentation::kWord32, backing_store, |
261 a.WordShl(index_word, 2), value_word32); | 268 a.WordShl(index_word, 2), value_word32); |
262 a.Return(value_integer); | 269 a.Return(value_integer); |
263 | 270 |
264 // This shouldn't happen, we've already validated the type. | 271 // This shouldn't happen, we've already validated the type. |
265 a.Bind(&other); | 272 a.Bind(&other); |
266 a.Return(a.SmiConstant(0)); | 273 a.Return(a.SmiConstant(0)); |
267 } | 274 } |
268 | 275 |
276 inline bool AtomicIsLockFree(uint32_t size) { | |
277 return size == 1 || size == 2 || size == 4; | |
278 } | |
279 | |
280 // ES #sec-atomics.islockfree | |
281 BUILTIN(AtomicsIsLockFree) { | |
282 HandleScope scope(isolate); | |
283 Handle<Object> size = args.atOrUndefined(isolate, 1); | |
284 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, size, Object::ToNumber(size)); | |
285 return *isolate->factory()->ToBoolean(AtomicIsLockFree(size->Number())); | |
286 } | |
287 | |
288 // ES #sec-validatesharedintegertypedarray | |
289 MUST_USE_RESULT MaybeHandle<JSTypedArray> ValidateSharedIntegerTypedArray( | |
290 Isolate* isolate, Handle<Object> object, bool onlyInt32 = false) { | |
binji
2017/02/17 19:44:41
nit: hacker_case for variables... or is the idea t
Dan Ehrenberg
2017/02/20 21:34:21
You're right, fixed.
| |
291 if (object->IsJSTypedArray()) { | |
292 Handle<JSTypedArray> typedArray = Handle<JSTypedArray>::cast(object); | |
293 if (typedArray->GetBuffer()->is_shared()) { | |
294 if (onlyInt32) { | |
295 if (typedArray->type() == kExternalInt32Array) return typedArray; | |
296 } else { | |
297 if (typedArray->type() != kExternalFloat32Array && | |
298 typedArray->type() != kExternalFloat64Array && | |
299 typedArray->type() != kExternalUint8ClampedArray) | |
300 return typedArray; | |
301 } | |
302 } | |
303 } | |
304 | |
305 THROW_NEW_ERROR( | |
306 isolate, | |
307 NewTypeError(onlyInt32 ? MessageTemplate::kNotInt32SharedTypedArray | |
308 : MessageTemplate::kNotIntegerSharedTypedArray, | |
309 object), | |
310 JSTypedArray); | |
311 } | |
312 | |
313 // ES #sec-validateatomicaccess | |
314 MUST_USE_RESULT Maybe<size_t> ValidateAtomicAccess( | |
315 Isolate* isolate, Handle<JSTypedArray> typedArray, | |
316 Handle<Object> requestIndex) { | |
317 // TOOD(v8:5961): Use ToIndex for indexes | |
318 ASSIGN_RETURN_ON_EXCEPTION_VALUE( | |
319 isolate, requestIndex, Object::ToNumber(requestIndex), Nothing<size_t>()); | |
320 Handle<Object> offset; | |
321 ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, offset, | |
322 Object::ToInteger(isolate, requestIndex), | |
323 Nothing<size_t>()); | |
324 if (!requestIndex->SameValue(*offset)) { | |
325 isolate->Throw(*isolate->factory()->NewRangeError( | |
326 MessageTemplate::kInvalidAtomicAccessIndex)); | |
327 return Nothing<size_t>(); | |
328 } | |
329 size_t accessIndex; | |
330 uint32_t length = typedArray->length_value(); | |
331 if (!TryNumberToSize(*requestIndex, &accessIndex) || accessIndex >= length) { | |
332 isolate->Throw(*isolate->factory()->NewRangeError( | |
333 MessageTemplate::kInvalidAtomicAccessIndex)); | |
334 return Nothing<size_t>(); | |
335 } | |
336 return Just<size_t>(accessIndex); | |
337 } | |
338 | |
339 // ES #sec-atomics.wake | |
340 // Atomics.wake( typedArray, index, count ) | |
341 BUILTIN(AtomicsWake) { | |
342 HandleScope scope(isolate); | |
343 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
binji
2017/02/17 19:44:41
Can these just be at, since they're not optional?
Dan Ehrenberg
2017/02/20 21:34:21
It looks like that would make a DCHECK fail if it'
| |
344 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
345 Handle<Object> count = args.atOrUndefined(isolate, 3); | |
346 | |
347 Handle<JSTypedArray> sta; | |
348 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
349 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array, true)); | |
350 | |
351 Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); | |
352 if (maybeIndex.IsNothing()) return isolate->heap()->exception(); | |
353 size_t i = maybeIndex.FromJust(); | |
354 | |
355 uint32_t c; | |
356 if (count->IsUndefined(isolate)) { | |
357 c = kMaxUInt32; | |
358 } else { | |
359 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, count, | |
360 Object::ToInteger(isolate, count)); | |
361 double countDouble = count->Number(); | |
362 if (countDouble < 0) | |
363 countDouble = 0; | |
364 else if (countDouble > kMaxUInt32) | |
365 countDouble = kMaxUInt32; | |
366 c = static_cast<uint32_t>(countDouble); | |
367 } | |
368 | |
369 Handle<JSArrayBuffer> array_buffer = sta->GetBuffer(); | |
370 size_t addr = (i << 2) + NumberToSize(sta->byte_offset()); | |
371 | |
372 return FutexEmulation::Wake(isolate, array_buffer, addr, c); | |
373 } | |
374 | |
375 // ES #sec-atomics.wait | |
376 // Atomics.wait( typedArray, index, value, timeout ) | |
377 BUILTIN(AtomicsWait) { | |
378 HandleScope scope(isolate); | |
379 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
380 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
381 Handle<Object> value = args.atOrUndefined(isolate, 3); | |
382 Handle<Object> timeout = args.atOrUndefined(isolate, 4); | |
383 | |
384 Handle<JSTypedArray> sta; | |
385 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
386 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array, true)); | |
387 | |
388 Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); | |
389 if (maybeIndex.IsNothing()) return isolate->heap()->exception(); | |
390 size_t i = maybeIndex.FromJust(); | |
391 | |
392 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, | |
393 Object::ToInt32(isolate, value)); | |
394 int32_t valueInt32 = NumberToInt32(*value); | |
395 | |
396 double timeoutNumber; | |
397 if (timeout->IsUndefined(isolate)) { | |
398 timeoutNumber = isolate->heap()->infinity_value()->Number(); | |
399 } else { | |
400 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, timeout, | |
401 Object::ToNumber(timeout)); | |
402 timeoutNumber = timeout->Number(); | |
403 if (std::isnan(timeoutNumber)) | |
404 timeoutNumber = isolate->heap()->infinity_value()->Number(); | |
405 else if (timeoutNumber < 0) | |
406 timeoutNumber = 0; | |
407 } | |
408 | |
409 if (!isolate->allow_atomics_wait()) { | |
410 THROW_NEW_ERROR_RETURN_FAILURE( | |
411 isolate, NewTypeError(MessageTemplate::kAtomicsWaitNotAllowed)); | |
412 } | |
413 | |
414 Handle<JSArrayBuffer> array_buffer = sta->GetBuffer(); | |
415 size_t addr = (i << 2) + NumberToSize(sta->byte_offset()); | |
416 | |
417 return FutexEmulation::Wait(isolate, array_buffer, addr, valueInt32, | |
418 timeoutNumber); | |
419 } | |
420 | |
421 namespace { | |
422 | |
423 #if V8_CC_GNU | |
424 | |
425 template <typename T> | |
426 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { | |
427 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, | |
428 __ATOMIC_SEQ_CST); | |
429 return oldval; | |
430 } | |
431 | |
432 template <typename T> | |
433 inline T AddSeqCst(T* p, T value) { | |
434 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); | |
435 } | |
436 | |
437 template <typename T> | |
438 inline T SubSeqCst(T* p, T value) { | |
439 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST); | |
440 } | |
441 | |
442 template <typename T> | |
443 inline T AndSeqCst(T* p, T value) { | |
444 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST); | |
445 } | |
446 | |
447 template <typename T> | |
448 inline T OrSeqCst(T* p, T value) { | |
449 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); | |
450 } | |
451 | |
452 template <typename T> | |
453 inline T XorSeqCst(T* p, T value) { | |
454 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); | |
455 } | |
456 | |
457 template <typename T> | |
458 inline T ExchangeSeqCst(T* p, T value) { | |
459 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); | |
460 } | |
461 | |
462 #elif V8_CC_MSVC | |
463 | |
464 #define InterlockedCompareExchange32 _InterlockedCompareExchange | |
465 #define InterlockedExchange32 _InterlockedExchange | |
466 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd | |
467 #define InterlockedAnd32 _InterlockedAnd | |
468 #define InterlockedOr32 _InterlockedOr | |
469 #define InterlockedXor32 _InterlockedXor | |
470 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 | |
471 #define InterlockedCompareExchange8 _InterlockedCompareExchange8 | |
472 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 | |
473 | |
474 #define ATOMIC_OPS(type, suffix, vctype) \ | |
475 inline type AddSeqCst(type* p, type value) { \ | |
476 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ | |
477 bit_cast<vctype>(value)); \ | |
478 } \ | |
479 inline type SubSeqCst(type* p, type value) { \ | |
480 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ | |
481 -bit_cast<vctype>(value)); \ | |
482 } \ | |
483 inline type AndSeqCst(type* p, type value) { \ | |
484 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ | |
485 bit_cast<vctype>(value)); \ | |
486 } \ | |
487 inline type OrSeqCst(type* p, type value) { \ | |
488 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ | |
489 bit_cast<vctype>(value)); \ | |
490 } \ | |
491 inline type XorSeqCst(type* p, type value) { \ | |
492 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ | |
493 bit_cast<vctype>(value)); \ | |
494 } \ | |
495 inline type ExchangeSeqCst(type* p, type value) { \ | |
496 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ | |
497 bit_cast<vctype>(value)); \ | |
498 } \ | |
499 \ | |
500 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ | |
501 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ | |
502 bit_cast<vctype>(newval), \ | |
503 bit_cast<vctype>(oldval)); \ | |
504 } | |
505 | |
506 ATOMIC_OPS(int8_t, 8, char) | |
507 ATOMIC_OPS(uint8_t, 8, char) | |
508 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ | |
509 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ | |
510 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ | |
511 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ | |
512 | |
513 #undef ATOMIC_OPS_INTEGER | |
514 #undef ATOMIC_OPS | |
515 | |
516 #undef InterlockedCompareExchange32 | |
517 #undef InterlockedExchange32 | |
518 #undef InterlockedExchangeAdd32 | |
519 #undef InterlockedAnd32 | |
520 #undef InterlockedOr32 | |
521 #undef InterlockedXor32 | |
522 #undef InterlockedExchangeAdd16 | |
523 #undef InterlockedCompareExchange8 | |
524 #undef InterlockedExchangeAdd8 | |
525 | |
526 #else | |
527 | |
528 #error Unsupported platform! | |
529 | |
530 #endif | |
531 | |
532 template <typename T> | |
533 T FromObject(Handle<Object> number); | |
534 | |
535 template <> | |
536 inline uint8_t FromObject<uint8_t>(Handle<Object> number) { | |
537 return NumberToUint32(*number); | |
538 } | |
539 | |
540 template <> | |
541 inline int8_t FromObject<int8_t>(Handle<Object> number) { | |
542 return NumberToInt32(*number); | |
543 } | |
544 | |
545 template <> | |
546 inline uint16_t FromObject<uint16_t>(Handle<Object> number) { | |
547 return NumberToUint32(*number); | |
548 } | |
549 | |
550 template <> | |
551 inline int16_t FromObject<int16_t>(Handle<Object> number) { | |
552 return NumberToInt32(*number); | |
553 } | |
554 | |
555 template <> | |
556 inline uint32_t FromObject<uint32_t>(Handle<Object> number) { | |
557 return NumberToUint32(*number); | |
558 } | |
559 | |
560 template <> | |
561 inline int32_t FromObject<int32_t>(Handle<Object> number) { | |
562 return NumberToInt32(*number); | |
563 } | |
564 | |
565 inline Object* ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); } | |
566 | |
567 inline Object* ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); } | |
568 | |
569 inline Object* ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); } | |
570 | |
571 inline Object* ToObject(Isolate* isolate, uint16_t t) { | |
572 return Smi::FromInt(t); | |
573 } | |
574 | |
575 inline Object* ToObject(Isolate* isolate, int32_t t) { | |
576 return *isolate->factory()->NewNumber(t); | |
577 } | |
578 | |
579 inline Object* ToObject(Isolate* isolate, uint32_t t) { | |
580 return *isolate->factory()->NewNumber(t); | |
581 } | |
582 | |
583 template <typename T> | |
584 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, | |
585 Handle<Object> oldobj, Handle<Object> newobj) { | |
586 T oldval = FromObject<T>(oldobj); | |
587 T newval = FromObject<T>(newobj); | |
588 T result = | |
589 CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval); | |
590 return ToObject(isolate, result); | |
591 } | |
592 | |
593 template <typename T> | |
594 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, | |
595 Handle<Object> obj) { | |
596 T value = FromObject<T>(obj); | |
597 T result = AddSeqCst(static_cast<T*>(buffer) + index, value); | |
598 return ToObject(isolate, result); | |
599 } | |
600 | |
601 template <typename T> | |
602 inline Object* DoSub(Isolate* isolate, void* buffer, size_t index, | |
603 Handle<Object> obj) { | |
604 T value = FromObject<T>(obj); | |
605 T result = SubSeqCst(static_cast<T*>(buffer) + index, value); | |
606 return ToObject(isolate, result); | |
607 } | |
608 | |
609 template <typename T> | |
610 inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index, | |
611 Handle<Object> obj) { | |
612 T value = FromObject<T>(obj); | |
613 T result = AndSeqCst(static_cast<T*>(buffer) + index, value); | |
614 return ToObject(isolate, result); | |
615 } | |
616 | |
617 template <typename T> | |
618 inline Object* DoOr(Isolate* isolate, void* buffer, size_t index, | |
619 Handle<Object> obj) { | |
620 T value = FromObject<T>(obj); | |
621 T result = OrSeqCst(static_cast<T*>(buffer) + index, value); | |
622 return ToObject(isolate, result); | |
623 } | |
624 | |
625 template <typename T> | |
626 inline Object* DoXor(Isolate* isolate, void* buffer, size_t index, | |
627 Handle<Object> obj) { | |
628 T value = FromObject<T>(obj); | |
629 T result = XorSeqCst(static_cast<T*>(buffer) + index, value); | |
630 return ToObject(isolate, result); | |
631 } | |
632 | |
633 template <typename T> | |
634 inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index, | |
635 Handle<Object> obj) { | |
636 T value = FromObject<T>(obj); | |
637 T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value); | |
638 return ToObject(isolate, result); | |
639 } | |
640 | |
641 // Uint8Clamped functions | |
binji
2017/02/17 19:44:41
Oops, the Uint8Clamped versions should have been r
Dan Ehrenberg
2017/02/20 21:34:21
Would you mind if I removed this in a follow-on pa
| |
642 | |
643 uint8_t ClampToUint8(int32_t value) { | |
644 if (value < 0) return 0; | |
645 if (value > 255) return 255; | |
646 return value; | |
647 } | |
648 | |
649 inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer, | |
650 size_t index, | |
651 Handle<Object> oldobj, | |
652 Handle<Object> newobj) { | |
653 typedef int32_t convert_type; | |
654 uint8_t oldval = ClampToUint8(FromObject<convert_type>(oldobj)); | |
655 uint8_t newval = ClampToUint8(FromObject<convert_type>(newobj)); | |
656 uint8_t result = CompareExchangeSeqCst(static_cast<uint8_t*>(buffer) + index, | |
657 oldval, newval); | |
658 return ToObject(isolate, result); | |
659 } | |
660 | |
661 #define DO_UINT8_CLAMPED_OP(name, op) \ | |
662 inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer, \ | |
663 size_t index, Handle<Object> obj) { \ | |
664 typedef int32_t convert_type; \ | |
665 uint8_t* p = static_cast<uint8_t*>(buffer) + index; \ | |
666 convert_type operand = FromObject<convert_type>(obj); \ | |
667 uint8_t expected; \ | |
668 uint8_t result; \ | |
669 do { \ | |
670 expected = *p; \ | |
671 result = ClampToUint8(static_cast<convert_type>(expected) op operand); \ | |
672 } while (CompareExchangeSeqCst(p, expected, result) != expected); \ | |
673 return ToObject(isolate, expected); \ | |
674 } | |
675 | |
676 DO_UINT8_CLAMPED_OP(Add, +) | |
677 DO_UINT8_CLAMPED_OP(Sub, -) | |
678 DO_UINT8_CLAMPED_OP(And, &) | |
679 DO_UINT8_CLAMPED_OP(Or, |) | |
680 DO_UINT8_CLAMPED_OP(Xor, ^) | |
681 | |
682 #undef DO_UINT8_CLAMPED_OP | |
683 | |
684 inline Object* DoExchangeUint8Clamped(Isolate* isolate, void* buffer, | |
685 size_t index, Handle<Object> obj) { | |
686 typedef int32_t convert_type; | |
687 uint8_t* p = static_cast<uint8_t*>(buffer) + index; | |
688 uint8_t result = ClampToUint8(FromObject<convert_type>(obj)); | |
689 uint8_t expected; | |
690 do { | |
691 expected = *p; | |
692 } while (CompareExchangeSeqCst(p, expected, result) != expected); | |
693 return ToObject(isolate, expected); | |
694 } | |
695 | |
696 } // anonymous namespace | |
697 | |
698 // Duplicated from objects.h | |
699 // V has parameters (Type, type, TYPE, C type, element_size) | |
700 #define INTEGER_TYPED_ARRAYS(V) \ | |
701 V(Uint8, uint8, UINT8, uint8_t, 1) \ | |
702 V(Int8, int8, INT8, int8_t, 1) \ | |
703 V(Uint16, uint16, UINT16, uint16_t, 2) \ | |
704 V(Int16, int16, INT16, int16_t, 2) \ | |
705 V(Uint32, uint32, UINT32, uint32_t, 4) \ | |
706 V(Int32, int32, INT32, int32_t, 4) | |
707 | |
708 // ES #sec-atomics.wait | |
709 // Atomics.compareExchange( typedArray, index, expectedValue, replacementValue ) | |
710 BUILTIN(AtomicsCompareExchange) { | |
711 HandleScope scope(isolate); | |
712 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
713 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
714 Handle<Object> expectedValue = args.atOrUndefined(isolate, 3); | |
715 Handle<Object> replacementValue = args.atOrUndefined(isolate, 4); | |
716 | |
717 Handle<JSTypedArray> sta; | |
718 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
719 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); | |
720 | |
721 Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); | |
722 if (maybeIndex.IsNothing()) return isolate->heap()->exception(); | |
723 size_t i = maybeIndex.FromJust(); | |
724 | |
725 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, expectedValue, | |
726 Object::ToInteger(isolate, expectedValue)); | |
727 | |
728 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
729 isolate, replacementValue, Object::ToInteger(isolate, replacementValue)); | |
730 | |
731 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
732 NumberToSize(sta->byte_offset()); | |
733 | |
734 switch (sta->type()) { | |
735 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
736 case kExternal##Type##Array: \ | |
737 return DoCompareExchange<ctype>(isolate, source, i, expectedValue, \ | |
738 replacementValue); | |
739 | |
740 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
741 #undef TYPED_ARRAY_CASE | |
742 | |
743 case kExternalUint8ClampedArray: | |
744 return DoCompareExchangeUint8Clamped(isolate, source, i, expectedValue, | |
745 replacementValue); | |
746 | |
747 default: | |
748 break; | |
749 } | |
750 | |
751 UNREACHABLE(); | |
752 return isolate->heap()->undefined_value(); | |
753 } | |
754 | |
755 // ES #sec-atomics.add | |
756 // Atomics.add( typedArray, index, value ) | |
757 BUILTIN(AtomicsAdd) { | |
758 HandleScope scope(isolate); | |
759 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
760 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
761 Handle<Object> value = args.atOrUndefined(isolate, 3); | |
762 | |
763 Handle<JSTypedArray> sta; | |
764 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
765 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); | |
766 | |
767 Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); | |
768 if (maybeIndex.IsNothing()) return isolate->heap()->exception(); | |
769 size_t i = maybeIndex.FromJust(); | |
770 | |
771 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, | |
772 Object::ToInteger(isolate, value)); | |
773 | |
774 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
775 NumberToSize(sta->byte_offset()); | |
776 | |
777 switch (sta->type()) { | |
778 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
779 case kExternal##Type##Array: \ | |
780 return DoAdd<ctype>(isolate, source, i, value); | |
781 | |
782 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
783 #undef TYPED_ARRAY_CASE | |
784 | |
785 case kExternalUint8ClampedArray: | |
786 return DoAddUint8Clamped(isolate, source, i, value); | |
787 | |
788 default: | |
789 break; | |
790 } | |
791 | |
792 UNREACHABLE(); | |
793 return isolate->heap()->undefined_value(); | |
794 } | |
795 | |
796 // ES #sec-atomics.sub | |
797 // Atomics.sub( typedArray, index, value ) | |
798 BUILTIN(AtomicsSub) { | |
799 HandleScope scope(isolate); | |
800 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
801 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
802 Handle<Object> value = args.atOrUndefined(isolate, 3); | |
803 | |
804 Handle<JSTypedArray> sta; | |
805 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
806 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); | |
807 | |
808 Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); | |
809 if (maybeIndex.IsNothing()) return isolate->heap()->exception(); | |
810 size_t i = maybeIndex.FromJust(); | |
811 | |
812 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, | |
813 Object::ToInteger(isolate, value)); | |
814 | |
815 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
816 NumberToSize(sta->byte_offset()); | |
817 | |
818 switch (sta->type()) { | |
819 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
820 case kExternal##Type##Array: \ | |
821 return DoSub<ctype>(isolate, source, i, value); | |
822 | |
823 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
824 #undef TYPED_ARRAY_CASE | |
825 | |
826 case kExternalUint8ClampedArray: | |
827 return DoSubUint8Clamped(isolate, source, i, value); | |
828 | |
829 default: | |
830 break; | |
831 } | |
832 | |
833 UNREACHABLE(); | |
834 return isolate->heap()->undefined_value(); | |
835 } | |
836 | |
837 // ES #sec-atomics.and | |
838 // Atomics.and( typedArray, index, value ) | |
839 BUILTIN(AtomicsAnd) { | |
840 HandleScope scope(isolate); | |
841 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
842 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
843 Handle<Object> value = args.atOrUndefined(isolate, 3); | |
844 | |
845 Handle<JSTypedArray> sta; | |
846 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
847 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); | |
848 | |
849 Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); | |
850 if (maybeIndex.IsNothing()) return isolate->heap()->exception(); | |
851 size_t i = maybeIndex.FromJust(); | |
852 | |
853 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, | |
854 Object::ToInteger(isolate, value)); | |
855 | |
856 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
857 NumberToSize(sta->byte_offset()); | |
858 | |
859 switch (sta->type()) { | |
860 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
861 case kExternal##Type##Array: \ | |
862 return DoAnd<ctype>(isolate, source, i, value); | |
863 | |
864 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
865 #undef TYPED_ARRAY_CASE | |
866 | |
867 case kExternalUint8ClampedArray: | |
868 return DoAndUint8Clamped(isolate, source, i, value); | |
869 | |
870 default: | |
871 break; | |
872 } | |
873 | |
874 UNREACHABLE(); | |
875 return isolate->heap()->undefined_value(); | |
876 } | |
877 | |
878 // ES #sec-atomics.or | |
879 // Atomics.or( typedArray, index, value ) | |
880 BUILTIN(AtomicsOr) { | |
881 HandleScope scope(isolate); | |
882 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
883 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
884 Handle<Object> value = args.atOrUndefined(isolate, 3); | |
885 | |
886 Handle<JSTypedArray> sta; | |
887 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
888 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); | |
889 | |
890 Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); | |
891 if (maybeIndex.IsNothing()) return isolate->heap()->exception(); | |
892 size_t i = maybeIndex.FromJust(); | |
893 | |
894 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, | |
895 Object::ToInteger(isolate, value)); | |
896 | |
897 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
898 NumberToSize(sta->byte_offset()); | |
899 | |
900 switch (sta->type()) { | |
901 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
902 case kExternal##Type##Array: \ | |
903 return DoOr<ctype>(isolate, source, i, value); | |
904 | |
905 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
906 #undef TYPED_ARRAY_CASE | |
907 | |
908 case kExternalUint8ClampedArray: | |
909 return DoOrUint8Clamped(isolate, source, i, value); | |
910 | |
911 default: | |
912 break; | |
913 } | |
914 | |
915 UNREACHABLE(); | |
916 return isolate->heap()->undefined_value(); | |
917 } | |
918 | |
919 // ES #sec-atomics.xor | |
920 // Atomics.xor( typedArray, index, value ) | |
921 BUILTIN(AtomicsXor) { | |
922 HandleScope scope(isolate); | |
923 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
924 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
925 Handle<Object> value = args.atOrUndefined(isolate, 3); | |
926 | |
927 Handle<JSTypedArray> sta; | |
928 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
929 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); | |
930 | |
931 Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); | |
932 if (maybeIndex.IsNothing()) return isolate->heap()->exception(); | |
933 size_t i = maybeIndex.FromJust(); | |
934 | |
935 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, | |
936 Object::ToInteger(isolate, value)); | |
937 | |
938 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
939 NumberToSize(sta->byte_offset()); | |
940 | |
941 switch (sta->type()) { | |
942 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
943 case kExternal##Type##Array: \ | |
944 return DoXor<ctype>(isolate, source, i, value); | |
945 | |
946 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
947 #undef TYPED_ARRAY_CASE | |
948 | |
949 case kExternalUint8ClampedArray: | |
950 return DoXorUint8Clamped(isolate, source, i, value); | |
951 | |
952 default: | |
953 break; | |
954 } | |
955 | |
956 UNREACHABLE(); | |
957 return isolate->heap()->undefined_value(); | |
958 } | |
959 | |
960 // ES #sec-atomics.exchange | |
961 // Atomics.exchange( typedArray, index, value ) | |
962 BUILTIN(AtomicsExchange) { | |
963 HandleScope scope(isolate); | |
964 Handle<Object> array = args.atOrUndefined(isolate, 1); | |
965 Handle<Object> index = args.atOrUndefined(isolate, 2); | |
966 Handle<Object> value = args.atOrUndefined(isolate, 3); | |
967 | |
968 Handle<JSTypedArray> sta; | |
969 ASSIGN_RETURN_FAILURE_ON_EXCEPTION( | |
970 isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); | |
971 | |
972 Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); | |
973 if (maybeIndex.IsNothing()) return isolate->heap()->exception(); | |
974 size_t i = maybeIndex.FromJust(); | |
975 | |
976 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, | |
977 Object::ToInteger(isolate, value)); | |
978 | |
979 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + | |
980 NumberToSize(sta->byte_offset()); | |
981 | |
982 switch (sta->type()) { | |
983 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ | |
984 case kExternal##Type##Array: \ | |
985 return DoExchange<ctype>(isolate, source, i, value); | |
986 | |
987 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
988 #undef TYPED_ARRAY_CASE | |
989 | |
990 case kExternalUint8ClampedArray: | |
991 return DoExchangeUint8Clamped(isolate, source, i, value); | |
992 | |
993 default: | |
994 break; | |
995 } | |
996 | |
997 UNREACHABLE(); | |
998 return isolate->heap()->undefined_value(); | |
999 } | |
1000 | |
269 } // namespace internal | 1001 } // namespace internal |
270 } // namespace v8 | 1002 } // namespace v8 |
OLD | NEW |