Index: src/builtins/builtins-sharedarraybuffer.cc |
diff --git a/src/builtins/builtins-sharedarraybuffer.cc b/src/builtins/builtins-sharedarraybuffer.cc |
index 1937b0e4bbb5092f5c7ae5a368bddc539b4216ca..67f530fce932454426bcb392bb1dd1e6aae77b1f 100644 |
--- a/src/builtins/builtins-sharedarraybuffer.cc |
+++ b/src/builtins/builtins-sharedarraybuffer.cc |
@@ -341,6 +341,79 @@ void Builtins::Generate_AtomicsExchange(compiler::CodeAssemblerState* state) { |
a.Bind(&other); |
a.Return(a.SmiConstant(0)); |
} |
+ |
+void Builtins::Generate_AtomicsCompareExchange( |
+ compiler::CodeAssemblerState* state) { |
+ using compiler::Node; |
+ CodeStubAssembler a(state); |
+ Node* array = a.Parameter(1); |
+ Node* index = a.Parameter(2); |
+ Node* old_value = a.Parameter(3); |
+ Node* new_value = a.Parameter(4); |
+ Node* context = a.Parameter(5 + 2); |
+ |
+ Node* instance_type; |
+ Node* backing_store; |
+ ValidateSharedTypedArray(&a, array, context, &instance_type, &backing_store); |
+ |
+ Node* index_word32 = ConvertTaggedAtomicIndexToWord32(&a, index, context); |
+ Node* array_length_word32 = a.TruncateTaggedToWord32( |
+ context, a.LoadObjectField(array, JSTypedArray::kLengthOffset)); |
+ ValidateAtomicIndex(&a, index_word32, array_length_word32, context); |
+ Node* index_word = a.ChangeUint32ToWord(index_word32); |
+ |
+ Node* old_value_integer = a.ToInteger(context, old_value); |
+ Node* old_value_word32 = a.TruncateTaggedToWord32(context, old_value_integer); |
+ |
+ Node* new_value_integer = a.ToInteger(context, new_value); |
+ Node* new_value_word32 = a.TruncateTaggedToWord32(context, new_value_integer); |
+ |
+ CodeStubAssembler::Label i8(&a), u8(&a), i16(&a), u16(&a), i32(&a), u32(&a), |
+ other(&a); |
+ int32_t case_values[] = { |
+ FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE, |
+ FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE, |
+ }; |
+ CodeStubAssembler::Label* case_labels[] = { |
+ &i8, &u8, &i16, &u16, &i32, &u32, |
+ }; |
+ a.Switch(instance_type, &other, case_values, case_labels, |
+ arraysize(case_labels)); |
+ |
+ a.Bind(&i8); |
+ a.Return(a.SmiFromWord32( |
+ a.AtomicCompareExchange(MachineType::Int8(), backing_store, index_word, |
+ old_value_word32, new_value_word32))); |
+ |
+ a.Bind(&u8); |
+ a.Return(a.SmiFromWord32( |
+ a.AtomicCompareExchange(MachineType::Uint8(), backing_store, index_word, |
+ old_value_word32, new_value_word32))); |
+ |
+ a.Bind(&i16); |
+ a.Return(a.SmiFromWord32(a.AtomicCompareExchange( |
+ MachineType::Int16(), backing_store, a.WordShl(index_word, 1), |
+ old_value_word32, new_value_word32))); |
+ |
+ a.Bind(&u16); |
+ a.Return(a.SmiFromWord32(a.AtomicCompareExchange( |
+ MachineType::Uint16(), backing_store, a.WordShl(index_word, 1), |
+ old_value_word32, new_value_word32))); |
+ |
+ a.Bind(&i32); |
+ a.Return(a.ChangeInt32ToTagged(a.AtomicCompareExchange( |
+ MachineType::Int32(), backing_store, a.WordShl(index_word, 2), |
+ old_value_word32, new_value_word32))); |
+ |
+ a.Bind(&u32); |
+ a.Return(a.ChangeUint32ToTagged(a.AtomicCompareExchange( |
+ MachineType::Uint32(), backing_store, a.WordShl(index_word, 2), |
+ old_value_word32, new_value_word32))); |
+ |
+ // This shouldn't happen, we've already validated the type. |
+ a.Bind(&other); |
+ a.Return(a.SmiConstant(0)); |
+} |
#endif |
inline bool AtomicIsLockFree(uint32_t size) { |
@@ -496,13 +569,6 @@ namespace { |
#if V8_CC_GNU |
template <typename T> |
-inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { |
- (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, |
- __ATOMIC_SEQ_CST); |
- return oldval; |
-} |
- |
-template <typename T> |
inline T AddSeqCst(T* p, T value) { |
return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); |
} |
@@ -530,13 +596,11 @@ inline T XorSeqCst(T* p, T value) { |
#elif V8_CC_MSVC |
-#define InterlockedCompareExchange32 _InterlockedCompareExchange |
#define InterlockedExchangeAdd32 _InterlockedExchangeAdd |
#define InterlockedAnd32 _InterlockedAnd |
#define InterlockedOr32 _InterlockedOr |
#define InterlockedXor32 _InterlockedXor |
#define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 |
-#define InterlockedCompareExchange8 _InterlockedCompareExchange8 |
#define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 |
#define ATOMIC_OPS(type, suffix, vctype) \ |
@@ -559,12 +623,6 @@ inline T XorSeqCst(T* p, T value) { |
inline type XorSeqCst(type* p, type value) { \ |
return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ |
bit_cast<vctype>(value)); \ |
- } \ |
- \ |
- inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ |
- return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ |
- bit_cast<vctype>(newval), \ |
- bit_cast<vctype>(oldval)); \ |
} |
ATOMIC_OPS(int8_t, 8, char) |
@@ -577,13 +635,11 @@ ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ |
#undef ATOMIC_OPS_INTEGER |
#undef ATOMIC_OPS |
-#undef InterlockedCompareExchange32 |
#undef InterlockedExchangeAdd32 |
#undef InterlockedAnd32 |
#undef InterlockedOr32 |
#undef InterlockedXor32 |
#undef InterlockedExchangeAdd16 |
-#undef InterlockedCompareExchange8 |
#undef InterlockedExchangeAdd8 |
#else |
@@ -644,16 +700,6 @@ inline Object* ToObject(Isolate* isolate, uint32_t t) { |
} |
template <typename T> |
-inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, |
- Handle<Object> oldobj, Handle<Object> newobj) { |
- T oldval = FromObject<T>(oldobj); |
- T newval = FromObject<T>(newobj); |
- T result = |
- CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval); |
- return ToObject(isolate, result); |
-} |
- |
-template <typename T> |
inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, |
Handle<Object> obj) { |
T value = FromObject<T>(obj); |
@@ -706,50 +752,6 @@ inline Object* DoXor(Isolate* isolate, void* buffer, size_t index, |
V(Uint32, uint32, UINT32, uint32_t, 4) \ |
V(Int32, int32, INT32, int32_t, 4) |
-// ES #sec-atomics.wait |
-// Atomics.compareExchange( typedArray, index, expectedValue, replacementValue ) |
-BUILTIN(AtomicsCompareExchange) { |
- HandleScope scope(isolate); |
- Handle<Object> array = args.atOrUndefined(isolate, 1); |
- Handle<Object> index = args.atOrUndefined(isolate, 2); |
- Handle<Object> expected_value = args.atOrUndefined(isolate, 3); |
- Handle<Object> replacement_value = args.atOrUndefined(isolate, 4); |
- |
- Handle<JSTypedArray> sta; |
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
- isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); |
- |
- Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index); |
- if (maybe_index.IsNothing()) return isolate->heap()->exception(); |
- size_t i = maybe_index.FromJust(); |
- |
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
- isolate, expected_value, Object::ToInteger(isolate, expected_value)); |
- |
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
- isolate, replacement_value, |
- Object::ToInteger(isolate, replacement_value)); |
- |
- uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + |
- NumberToSize(sta->byte_offset()); |
- |
- switch (sta->type()) { |
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
- case kExternal##Type##Array: \ |
- return DoCompareExchange<ctype>(isolate, source, i, expected_value, \ |
- replacement_value); |
- |
- INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
-#undef TYPED_ARRAY_CASE |
- |
- default: |
- break; |
- } |
- |
- UNREACHABLE(); |
- return isolate->heap()->undefined_value(); |
-} |
- |
// ES #sec-atomics.add |
// Atomics.add( typedArray, index, value ) |
BUILTIN(AtomicsAdd) { |
@@ -991,6 +993,67 @@ BUILTIN(AtomicsExchange) { |
UNREACHABLE(); |
return isolate->heap()->undefined_value(); |
} |
+ |
+template <typename T> |
+inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { |
+ (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, |
+ __ATOMIC_SEQ_CST); |
+ return oldval; |
+} |
+ |
+template <typename T> |
+inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, |
+ Handle<Object> oldobj, Handle<Object> newobj) { |
+ T oldval = FromObject<T>(oldobj); |
+ T newval = FromObject<T>(newobj); |
+ T result = |
+ CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval); |
+ return ToObject(isolate, result); |
+} |
+ |
+// ES #sec-atomics.wait |
+// Atomics.compareExchange( typedArray, index, expectedValue, replacementValue ) |
+BUILTIN(AtomicsCompareExchange) { |
+ HandleScope scope(isolate); |
+ Handle<Object> array = args.atOrUndefined(isolate, 1); |
+ Handle<Object> index = args.atOrUndefined(isolate, 2); |
+ Handle<Object> expected_value = args.atOrUndefined(isolate, 3); |
+ Handle<Object> replacement_value = args.atOrUndefined(isolate, 4); |
+ |
+ Handle<JSTypedArray> sta; |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
+ isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); |
+ |
+ Maybe<size_t> maybe_index = ValidateAtomicAccess(isolate, sta, index); |
+ if (maybe_index.IsNothing()) return isolate->heap()->exception(); |
+ size_t i = maybe_index.FromJust(); |
+ |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
+ isolate, expected_value, Object::ToInteger(isolate, expected_value)); |
+ |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
+ isolate, replacement_value, |
+ Object::ToInteger(isolate, replacement_value)); |
+ |
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + |
+ NumberToSize(sta->byte_offset()); |
+ |
+ switch (sta->type()) { |
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
+ case kExternal##Type##Array: \ |
+ return DoCompareExchange<ctype>(isolate, source, i, expected_value, \ |
+ replacement_value); |
+ |
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
+#undef TYPED_ARRAY_CASE |
+ |
+ default: |
+ break; |
+ } |
+ |
+ UNREACHABLE(); |
+ return isolate->heap()->undefined_value(); |
+} |
#endif |
} // namespace internal |