Index: src/builtins/builtins-sharedarraybuffer.cc |
diff --git a/src/builtins/builtins-sharedarraybuffer.cc b/src/builtins/builtins-sharedarraybuffer.cc |
index 53caf1fe21640ef1afb948c0f94c0bf8d2a1ee93..c590c6e73b54790bfd3e828bf1b711c5bb7c37b2 100644 |
--- a/src/builtins/builtins-sharedarraybuffer.cc |
+++ b/src/builtins/builtins-sharedarraybuffer.cc |
@@ -2,10 +2,17 @@ |
// Use of this source code is governed by a BSD-style license that can be |
// found in the LICENSE file. |
+#include "src/base/macros.h" |
+#include "src/base/platform/mutex.h" |
+#include "src/base/platform/time.h" |
#include "src/builtins/builtins-utils.h" |
#include "src/builtins/builtins.h" |
#include "src/code-factory.h" |
#include "src/code-stub-assembler.h" |
+#include "src/conversions-inl.h" |
+#include "src/factory.h" |
+#include "src/futex-emulation.h" |
+#include "src/globals.h" |
namespace v8 { |
namespace internal { |
@@ -266,5 +273,730 @@ void Builtins::Generate_AtomicsStore(compiler::CodeAssemblerState* state) { |
a.Return(a.SmiConstant(0)); |
} |
+inline bool AtomicIsLockFree(uint32_t size) { |
+ return size == 1 || size == 2 || size == 4; |
+} |
+ |
+// ES #sec-atomics.islockfree |
+BUILTIN(AtomicsIsLockFree) { |
+ HandleScope scope(isolate); |
+ Handle<Object> size = args.atOrUndefined(isolate, 1); |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, size, Object::ToNumber(size)); |
+ return *isolate->factory()->ToBoolean(AtomicIsLockFree(size->Number())); |
+} |
+ |
+// ES #sec-validatesharedintegertypedarray |
+MUST_USE_RESULT MaybeHandle<JSTypedArray> ValidateSharedIntegerTypedArray( |
+ Isolate* isolate, Handle<Object> object, bool onlyInt32 = false) { |
binji
2017/02/17 19:44:41
nit: hacker_case for variables... or is the idea t
Dan Ehrenberg
2017/02/20 21:34:21
You're right, fixed.
|
+ if (object->IsJSTypedArray()) { |
+ Handle<JSTypedArray> typedArray = Handle<JSTypedArray>::cast(object); |
+ if (typedArray->GetBuffer()->is_shared()) { |
+ if (onlyInt32) { |
+ if (typedArray->type() == kExternalInt32Array) return typedArray; |
+ } else { |
+ if (typedArray->type() != kExternalFloat32Array && |
+ typedArray->type() != kExternalFloat64Array && |
+ typedArray->type() != kExternalUint8ClampedArray) |
+ return typedArray; |
+ } |
+ } |
+ } |
+ |
+ THROW_NEW_ERROR( |
+ isolate, |
+ NewTypeError(onlyInt32 ? MessageTemplate::kNotInt32SharedTypedArray |
+ : MessageTemplate::kNotIntegerSharedTypedArray, |
+ object), |
+ JSTypedArray); |
+} |
+ |
+// ES #sec-validateatomicaccess |
+MUST_USE_RESULT Maybe<size_t> ValidateAtomicAccess( |
+ Isolate* isolate, Handle<JSTypedArray> typedArray, |
+ Handle<Object> requestIndex) { |
+ // TOOD(v8:5961): Use ToIndex for indexes |
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE( |
+ isolate, requestIndex, Object::ToNumber(requestIndex), Nothing<size_t>()); |
+ Handle<Object> offset; |
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, offset, |
+ Object::ToInteger(isolate, requestIndex), |
+ Nothing<size_t>()); |
+ if (!requestIndex->SameValue(*offset)) { |
+ isolate->Throw(*isolate->factory()->NewRangeError( |
+ MessageTemplate::kInvalidAtomicAccessIndex)); |
+ return Nothing<size_t>(); |
+ } |
+ size_t accessIndex; |
+ uint32_t length = typedArray->length_value(); |
+ if (!TryNumberToSize(*requestIndex, &accessIndex) || accessIndex >= length) { |
+ isolate->Throw(*isolate->factory()->NewRangeError( |
+ MessageTemplate::kInvalidAtomicAccessIndex)); |
+ return Nothing<size_t>(); |
+ } |
+ return Just<size_t>(accessIndex); |
+} |
+ |
+// ES #sec-atomics.wake |
+// Atomics.wake( typedArray, index, count ) |
+BUILTIN(AtomicsWake) { |
+ HandleScope scope(isolate); |
+ Handle<Object> array = args.atOrUndefined(isolate, 1); |
binji
2017/02/17 19:44:41
Can these just be at, since they're not optional?
Dan Ehrenberg
2017/02/20 21:34:21
It looks like that would make a DCHECK fail if it'
|
+ Handle<Object> index = args.atOrUndefined(isolate, 2); |
+ Handle<Object> count = args.atOrUndefined(isolate, 3); |
+ |
+ Handle<JSTypedArray> sta; |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
+ isolate, sta, ValidateSharedIntegerTypedArray(isolate, array, true)); |
+ |
+ Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); |
+ if (maybeIndex.IsNothing()) return isolate->heap()->exception(); |
+ size_t i = maybeIndex.FromJust(); |
+ |
+ uint32_t c; |
+ if (count->IsUndefined(isolate)) { |
+ c = kMaxUInt32; |
+ } else { |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, count, |
+ Object::ToInteger(isolate, count)); |
+ double countDouble = count->Number(); |
+ if (countDouble < 0) |
+ countDouble = 0; |
+ else if (countDouble > kMaxUInt32) |
+ countDouble = kMaxUInt32; |
+ c = static_cast<uint32_t>(countDouble); |
+ } |
+ |
+ Handle<JSArrayBuffer> array_buffer = sta->GetBuffer(); |
+ size_t addr = (i << 2) + NumberToSize(sta->byte_offset()); |
+ |
+ return FutexEmulation::Wake(isolate, array_buffer, addr, c); |
+} |
+ |
+// ES #sec-atomics.wait |
+// Atomics.wait( typedArray, index, value, timeout ) |
+BUILTIN(AtomicsWait) { |
+ HandleScope scope(isolate); |
+ Handle<Object> array = args.atOrUndefined(isolate, 1); |
+ Handle<Object> index = args.atOrUndefined(isolate, 2); |
+ Handle<Object> value = args.atOrUndefined(isolate, 3); |
+ Handle<Object> timeout = args.atOrUndefined(isolate, 4); |
+ |
+ Handle<JSTypedArray> sta; |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
+ isolate, sta, ValidateSharedIntegerTypedArray(isolate, array, true)); |
+ |
+ Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); |
+ if (maybeIndex.IsNothing()) return isolate->heap()->exception(); |
+ size_t i = maybeIndex.FromJust(); |
+ |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, |
+ Object::ToInt32(isolate, value)); |
+ int32_t valueInt32 = NumberToInt32(*value); |
+ |
+ double timeoutNumber; |
+ if (timeout->IsUndefined(isolate)) { |
+ timeoutNumber = isolate->heap()->infinity_value()->Number(); |
+ } else { |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, timeout, |
+ Object::ToNumber(timeout)); |
+ timeoutNumber = timeout->Number(); |
+ if (std::isnan(timeoutNumber)) |
+ timeoutNumber = isolate->heap()->infinity_value()->Number(); |
+ else if (timeoutNumber < 0) |
+ timeoutNumber = 0; |
+ } |
+ |
+ if (!isolate->allow_atomics_wait()) { |
+ THROW_NEW_ERROR_RETURN_FAILURE( |
+ isolate, NewTypeError(MessageTemplate::kAtomicsWaitNotAllowed)); |
+ } |
+ |
+ Handle<JSArrayBuffer> array_buffer = sta->GetBuffer(); |
+ size_t addr = (i << 2) + NumberToSize(sta->byte_offset()); |
+ |
+ return FutexEmulation::Wait(isolate, array_buffer, addr, valueInt32, |
+ timeoutNumber); |
+} |
+ |
+namespace { |
+ |
+#if V8_CC_GNU |
+ |
+template <typename T> |
+inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { |
+ (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, |
+ __ATOMIC_SEQ_CST); |
+ return oldval; |
+} |
+ |
+template <typename T> |
+inline T AddSeqCst(T* p, T value) { |
+ return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); |
+} |
+ |
+template <typename T> |
+inline T SubSeqCst(T* p, T value) { |
+ return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST); |
+} |
+ |
+template <typename T> |
+inline T AndSeqCst(T* p, T value) { |
+ return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST); |
+} |
+ |
+template <typename T> |
+inline T OrSeqCst(T* p, T value) { |
+ return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); |
+} |
+ |
+template <typename T> |
+inline T XorSeqCst(T* p, T value) { |
+ return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); |
+} |
+ |
+template <typename T> |
+inline T ExchangeSeqCst(T* p, T value) { |
+ return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); |
+} |
+ |
+#elif V8_CC_MSVC |
+ |
+#define InterlockedCompareExchange32 _InterlockedCompareExchange |
+#define InterlockedExchange32 _InterlockedExchange |
+#define InterlockedExchangeAdd32 _InterlockedExchangeAdd |
+#define InterlockedAnd32 _InterlockedAnd |
+#define InterlockedOr32 _InterlockedOr |
+#define InterlockedXor32 _InterlockedXor |
+#define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 |
+#define InterlockedCompareExchange8 _InterlockedCompareExchange8 |
+#define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 |
+ |
+#define ATOMIC_OPS(type, suffix, vctype) \ |
+ inline type AddSeqCst(type* p, type value) { \ |
+ return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
+ bit_cast<vctype>(value)); \ |
+ } \ |
+ inline type SubSeqCst(type* p, type value) { \ |
+ return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
+ -bit_cast<vctype>(value)); \ |
+ } \ |
+ inline type AndSeqCst(type* p, type value) { \ |
+ return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ |
+ bit_cast<vctype>(value)); \ |
+ } \ |
+ inline type OrSeqCst(type* p, type value) { \ |
+ return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ |
+ bit_cast<vctype>(value)); \ |
+ } \ |
+ inline type XorSeqCst(type* p, type value) { \ |
+ return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ |
+ bit_cast<vctype>(value)); \ |
+ } \ |
+ inline type ExchangeSeqCst(type* p, type value) { \ |
+ return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ |
+ bit_cast<vctype>(value)); \ |
+ } \ |
+ \ |
+ inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ |
+ return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ |
+ bit_cast<vctype>(newval), \ |
+ bit_cast<vctype>(oldval)); \ |
+ } |
+ |
+ATOMIC_OPS(int8_t, 8, char) |
+ATOMIC_OPS(uint8_t, 8, char) |
+ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ |
+ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ |
+ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ |
+ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ |
+ |
+#undef ATOMIC_OPS_INTEGER |
+#undef ATOMIC_OPS |
+ |
+#undef InterlockedCompareExchange32 |
+#undef InterlockedExchange32 |
+#undef InterlockedExchangeAdd32 |
+#undef InterlockedAnd32 |
+#undef InterlockedOr32 |
+#undef InterlockedXor32 |
+#undef InterlockedExchangeAdd16 |
+#undef InterlockedCompareExchange8 |
+#undef InterlockedExchangeAdd8 |
+ |
+#else |
+ |
+#error Unsupported platform! |
+ |
+#endif |
+ |
+template <typename T> |
+T FromObject(Handle<Object> number); |
+ |
+template <> |
+inline uint8_t FromObject<uint8_t>(Handle<Object> number) { |
+ return NumberToUint32(*number); |
+} |
+ |
+template <> |
+inline int8_t FromObject<int8_t>(Handle<Object> number) { |
+ return NumberToInt32(*number); |
+} |
+ |
+template <> |
+inline uint16_t FromObject<uint16_t>(Handle<Object> number) { |
+ return NumberToUint32(*number); |
+} |
+ |
+template <> |
+inline int16_t FromObject<int16_t>(Handle<Object> number) { |
+ return NumberToInt32(*number); |
+} |
+ |
+template <> |
+inline uint32_t FromObject<uint32_t>(Handle<Object> number) { |
+ return NumberToUint32(*number); |
+} |
+ |
+template <> |
+inline int32_t FromObject<int32_t>(Handle<Object> number) { |
+ return NumberToInt32(*number); |
+} |
+ |
+inline Object* ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); } |
+ |
+inline Object* ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); } |
+ |
+inline Object* ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); } |
+ |
+inline Object* ToObject(Isolate* isolate, uint16_t t) { |
+ return Smi::FromInt(t); |
+} |
+ |
+inline Object* ToObject(Isolate* isolate, int32_t t) { |
+ return *isolate->factory()->NewNumber(t); |
+} |
+ |
+inline Object* ToObject(Isolate* isolate, uint32_t t) { |
+ return *isolate->factory()->NewNumber(t); |
+} |
+ |
+template <typename T> |
+inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, |
+ Handle<Object> oldobj, Handle<Object> newobj) { |
+ T oldval = FromObject<T>(oldobj); |
+ T newval = FromObject<T>(newobj); |
+ T result = |
+ CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval); |
+ return ToObject(isolate, result); |
+} |
+ |
+template <typename T> |
+inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, |
+ Handle<Object> obj) { |
+ T value = FromObject<T>(obj); |
+ T result = AddSeqCst(static_cast<T*>(buffer) + index, value); |
+ return ToObject(isolate, result); |
+} |
+ |
+template <typename T> |
+inline Object* DoSub(Isolate* isolate, void* buffer, size_t index, |
+ Handle<Object> obj) { |
+ T value = FromObject<T>(obj); |
+ T result = SubSeqCst(static_cast<T*>(buffer) + index, value); |
+ return ToObject(isolate, result); |
+} |
+ |
+template <typename T> |
+inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index, |
+ Handle<Object> obj) { |
+ T value = FromObject<T>(obj); |
+ T result = AndSeqCst(static_cast<T*>(buffer) + index, value); |
+ return ToObject(isolate, result); |
+} |
+ |
+template <typename T> |
+inline Object* DoOr(Isolate* isolate, void* buffer, size_t index, |
+ Handle<Object> obj) { |
+ T value = FromObject<T>(obj); |
+ T result = OrSeqCst(static_cast<T*>(buffer) + index, value); |
+ return ToObject(isolate, result); |
+} |
+ |
+template <typename T> |
+inline Object* DoXor(Isolate* isolate, void* buffer, size_t index, |
+ Handle<Object> obj) { |
+ T value = FromObject<T>(obj); |
+ T result = XorSeqCst(static_cast<T*>(buffer) + index, value); |
+ return ToObject(isolate, result); |
+} |
+ |
+template <typename T> |
+inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index, |
+ Handle<Object> obj) { |
+ T value = FromObject<T>(obj); |
+ T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value); |
+ return ToObject(isolate, result); |
+} |
+ |
+// Uint8Clamped functions |
binji
2017/02/17 19:44:41
Oops, the Uint8Clamped versions should have been r
Dan Ehrenberg
2017/02/20 21:34:21
Would you mind if I removed this in a follow-on pa
|
+ |
+uint8_t ClampToUint8(int32_t value) { |
+ if (value < 0) return 0; |
+ if (value > 255) return 255; |
+ return value; |
+} |
+ |
+inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer, |
+ size_t index, |
+ Handle<Object> oldobj, |
+ Handle<Object> newobj) { |
+ typedef int32_t convert_type; |
+ uint8_t oldval = ClampToUint8(FromObject<convert_type>(oldobj)); |
+ uint8_t newval = ClampToUint8(FromObject<convert_type>(newobj)); |
+ uint8_t result = CompareExchangeSeqCst(static_cast<uint8_t*>(buffer) + index, |
+ oldval, newval); |
+ return ToObject(isolate, result); |
+} |
+ |
+#define DO_UINT8_CLAMPED_OP(name, op) \ |
+ inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer, \ |
+ size_t index, Handle<Object> obj) { \ |
+ typedef int32_t convert_type; \ |
+ uint8_t* p = static_cast<uint8_t*>(buffer) + index; \ |
+ convert_type operand = FromObject<convert_type>(obj); \ |
+ uint8_t expected; \ |
+ uint8_t result; \ |
+ do { \ |
+ expected = *p; \ |
+ result = ClampToUint8(static_cast<convert_type>(expected) op operand); \ |
+ } while (CompareExchangeSeqCst(p, expected, result) != expected); \ |
+ return ToObject(isolate, expected); \ |
+ } |
+ |
+DO_UINT8_CLAMPED_OP(Add, +) |
+DO_UINT8_CLAMPED_OP(Sub, -) |
+DO_UINT8_CLAMPED_OP(And, &) |
+DO_UINT8_CLAMPED_OP(Or, |) |
+DO_UINT8_CLAMPED_OP(Xor, ^) |
+ |
+#undef DO_UINT8_CLAMPED_OP |
+ |
+inline Object* DoExchangeUint8Clamped(Isolate* isolate, void* buffer, |
+ size_t index, Handle<Object> obj) { |
+ typedef int32_t convert_type; |
+ uint8_t* p = static_cast<uint8_t*>(buffer) + index; |
+ uint8_t result = ClampToUint8(FromObject<convert_type>(obj)); |
+ uint8_t expected; |
+ do { |
+ expected = *p; |
+ } while (CompareExchangeSeqCst(p, expected, result) != expected); |
+ return ToObject(isolate, expected); |
+} |
+ |
+} // anonymous namespace |
+ |
+// Duplicated from objects.h |
+// V has parameters (Type, type, TYPE, C type, element_size) |
+#define INTEGER_TYPED_ARRAYS(V) \ |
+ V(Uint8, uint8, UINT8, uint8_t, 1) \ |
+ V(Int8, int8, INT8, int8_t, 1) \ |
+ V(Uint16, uint16, UINT16, uint16_t, 2) \ |
+ V(Int16, int16, INT16, int16_t, 2) \ |
+ V(Uint32, uint32, UINT32, uint32_t, 4) \ |
+ V(Int32, int32, INT32, int32_t, 4) |
+ |
+// ES #sec-atomics.wait |
+// Atomics.compareExchange( typedArray, index, expectedValue, replacementValue ) |
+BUILTIN(AtomicsCompareExchange) { |
+ HandleScope scope(isolate); |
+ Handle<Object> array = args.atOrUndefined(isolate, 1); |
+ Handle<Object> index = args.atOrUndefined(isolate, 2); |
+ Handle<Object> expectedValue = args.atOrUndefined(isolate, 3); |
+ Handle<Object> replacementValue = args.atOrUndefined(isolate, 4); |
+ |
+ Handle<JSTypedArray> sta; |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
+ isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); |
+ |
+ Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); |
+ if (maybeIndex.IsNothing()) return isolate->heap()->exception(); |
+ size_t i = maybeIndex.FromJust(); |
+ |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, expectedValue, |
+ Object::ToInteger(isolate, expectedValue)); |
+ |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
+ isolate, replacementValue, Object::ToInteger(isolate, replacementValue)); |
+ |
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + |
+ NumberToSize(sta->byte_offset()); |
+ |
+ switch (sta->type()) { |
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
+ case kExternal##Type##Array: \ |
+ return DoCompareExchange<ctype>(isolate, source, i, expectedValue, \ |
+ replacementValue); |
+ |
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
+#undef TYPED_ARRAY_CASE |
+ |
+ case kExternalUint8ClampedArray: |
+ return DoCompareExchangeUint8Clamped(isolate, source, i, expectedValue, |
+ replacementValue); |
+ |
+ default: |
+ break; |
+ } |
+ |
+ UNREACHABLE(); |
+ return isolate->heap()->undefined_value(); |
+} |
+ |
+// ES #sec-atomics.add |
+// Atomics.add( typedArray, index, value ) |
+BUILTIN(AtomicsAdd) { |
+ HandleScope scope(isolate); |
+ Handle<Object> array = args.atOrUndefined(isolate, 1); |
+ Handle<Object> index = args.atOrUndefined(isolate, 2); |
+ Handle<Object> value = args.atOrUndefined(isolate, 3); |
+ |
+ Handle<JSTypedArray> sta; |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
+ isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); |
+ |
+ Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); |
+ if (maybeIndex.IsNothing()) return isolate->heap()->exception(); |
+ size_t i = maybeIndex.FromJust(); |
+ |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, |
+ Object::ToInteger(isolate, value)); |
+ |
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + |
+ NumberToSize(sta->byte_offset()); |
+ |
+ switch (sta->type()) { |
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
+ case kExternal##Type##Array: \ |
+ return DoAdd<ctype>(isolate, source, i, value); |
+ |
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
+#undef TYPED_ARRAY_CASE |
+ |
+ case kExternalUint8ClampedArray: |
+ return DoAddUint8Clamped(isolate, source, i, value); |
+ |
+ default: |
+ break; |
+ } |
+ |
+ UNREACHABLE(); |
+ return isolate->heap()->undefined_value(); |
+} |
+ |
+// ES #sec-atomics.sub |
+// Atomics.sub( typedArray, index, value ) |
+BUILTIN(AtomicsSub) { |
+ HandleScope scope(isolate); |
+ Handle<Object> array = args.atOrUndefined(isolate, 1); |
+ Handle<Object> index = args.atOrUndefined(isolate, 2); |
+ Handle<Object> value = args.atOrUndefined(isolate, 3); |
+ |
+ Handle<JSTypedArray> sta; |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
+ isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); |
+ |
+ Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); |
+ if (maybeIndex.IsNothing()) return isolate->heap()->exception(); |
+ size_t i = maybeIndex.FromJust(); |
+ |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, |
+ Object::ToInteger(isolate, value)); |
+ |
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + |
+ NumberToSize(sta->byte_offset()); |
+ |
+ switch (sta->type()) { |
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
+ case kExternal##Type##Array: \ |
+ return DoSub<ctype>(isolate, source, i, value); |
+ |
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
+#undef TYPED_ARRAY_CASE |
+ |
+ case kExternalUint8ClampedArray: |
+ return DoSubUint8Clamped(isolate, source, i, value); |
+ |
+ default: |
+ break; |
+ } |
+ |
+ UNREACHABLE(); |
+ return isolate->heap()->undefined_value(); |
+} |
+ |
+// ES #sec-atomics.and |
+// Atomics.and( typedArray, index, value ) |
+BUILTIN(AtomicsAnd) { |
+ HandleScope scope(isolate); |
+ Handle<Object> array = args.atOrUndefined(isolate, 1); |
+ Handle<Object> index = args.atOrUndefined(isolate, 2); |
+ Handle<Object> value = args.atOrUndefined(isolate, 3); |
+ |
+ Handle<JSTypedArray> sta; |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
+ isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); |
+ |
+ Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); |
+ if (maybeIndex.IsNothing()) return isolate->heap()->exception(); |
+ size_t i = maybeIndex.FromJust(); |
+ |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, |
+ Object::ToInteger(isolate, value)); |
+ |
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + |
+ NumberToSize(sta->byte_offset()); |
+ |
+ switch (sta->type()) { |
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
+ case kExternal##Type##Array: \ |
+ return DoAnd<ctype>(isolate, source, i, value); |
+ |
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
+#undef TYPED_ARRAY_CASE |
+ |
+ case kExternalUint8ClampedArray: |
+ return DoAndUint8Clamped(isolate, source, i, value); |
+ |
+ default: |
+ break; |
+ } |
+ |
+ UNREACHABLE(); |
+ return isolate->heap()->undefined_value(); |
+} |
+ |
+// ES #sec-atomics.or |
+// Atomics.or( typedArray, index, value ) |
+BUILTIN(AtomicsOr) { |
+ HandleScope scope(isolate); |
+ Handle<Object> array = args.atOrUndefined(isolate, 1); |
+ Handle<Object> index = args.atOrUndefined(isolate, 2); |
+ Handle<Object> value = args.atOrUndefined(isolate, 3); |
+ |
+ Handle<JSTypedArray> sta; |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
+ isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); |
+ |
+ Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); |
+ if (maybeIndex.IsNothing()) return isolate->heap()->exception(); |
+ size_t i = maybeIndex.FromJust(); |
+ |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, |
+ Object::ToInteger(isolate, value)); |
+ |
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + |
+ NumberToSize(sta->byte_offset()); |
+ |
+ switch (sta->type()) { |
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
+ case kExternal##Type##Array: \ |
+ return DoOr<ctype>(isolate, source, i, value); |
+ |
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
+#undef TYPED_ARRAY_CASE |
+ |
+ case kExternalUint8ClampedArray: |
+ return DoOrUint8Clamped(isolate, source, i, value); |
+ |
+ default: |
+ break; |
+ } |
+ |
+ UNREACHABLE(); |
+ return isolate->heap()->undefined_value(); |
+} |
+ |
+// ES #sec-atomics.xor |
+// Atomics.xor( typedArray, index, value ) |
+BUILTIN(AtomicsXor) { |
+ HandleScope scope(isolate); |
+ Handle<Object> array = args.atOrUndefined(isolate, 1); |
+ Handle<Object> index = args.atOrUndefined(isolate, 2); |
+ Handle<Object> value = args.atOrUndefined(isolate, 3); |
+ |
+ Handle<JSTypedArray> sta; |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
+ isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); |
+ |
+ Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); |
+ if (maybeIndex.IsNothing()) return isolate->heap()->exception(); |
+ size_t i = maybeIndex.FromJust(); |
+ |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, |
+ Object::ToInteger(isolate, value)); |
+ |
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + |
+ NumberToSize(sta->byte_offset()); |
+ |
+ switch (sta->type()) { |
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
+ case kExternal##Type##Array: \ |
+ return DoXor<ctype>(isolate, source, i, value); |
+ |
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
+#undef TYPED_ARRAY_CASE |
+ |
+ case kExternalUint8ClampedArray: |
+ return DoXorUint8Clamped(isolate, source, i, value); |
+ |
+ default: |
+ break; |
+ } |
+ |
+ UNREACHABLE(); |
+ return isolate->heap()->undefined_value(); |
+} |
+ |
+// ES #sec-atomics.exchange |
+// Atomics.exchange( typedArray, index, value ) |
+BUILTIN(AtomicsExchange) { |
+ HandleScope scope(isolate); |
+ Handle<Object> array = args.atOrUndefined(isolate, 1); |
+ Handle<Object> index = args.atOrUndefined(isolate, 2); |
+ Handle<Object> value = args.atOrUndefined(isolate, 3); |
+ |
+ Handle<JSTypedArray> sta; |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
+ isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); |
+ |
+ Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); |
+ if (maybeIndex.IsNothing()) return isolate->heap()->exception(); |
+ size_t i = maybeIndex.FromJust(); |
+ |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, |
+ Object::ToInteger(isolate, value)); |
+ |
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + |
+ NumberToSize(sta->byte_offset()); |
+ |
+ switch (sta->type()) { |
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
+ case kExternal##Type##Array: \ |
+ return DoExchange<ctype>(isolate, source, i, value); |
+ |
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
+#undef TYPED_ARRAY_CASE |
+ |
+ case kExternalUint8ClampedArray: |
+ return DoExchangeUint8Clamped(isolate, source, i, value); |
+ |
+ default: |
+ break; |
+ } |
+ |
+ UNREACHABLE(); |
+ return isolate->heap()->undefined_value(); |
+} |
+ |
} // namespace internal |
} // namespace v8 |