Index: src/builtins/builtins-sharedarraybuffer.cc |
diff --git a/src/builtins/builtins-sharedarraybuffer.cc b/src/builtins/builtins-sharedarraybuffer.cc |
index b91807833f78ebb4a300bb73f4df9e93997283cc..5a7b41bb6bb844cc1d62a67059e4ac7ffa045efe 100644 |
--- a/src/builtins/builtins-sharedarraybuffer.cc |
+++ b/src/builtins/builtins-sharedarraybuffer.cc |
@@ -266,5 +266,233 @@ void Builtins::Generate_AtomicsStore(compiler::CodeAssemblerState* state) { |
a.Return(a.SmiConstant(0)); |
} |
+#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 |
+// The helper functions are duplicated from runtime-atomics.cc |
+template <typename T> |
+T FromObject(Handle<Object> number); |
+ |
+template <> |
+inline uint8_t FromObject<uint8_t>(Handle<Object> number) { |
+ return NumberToUint32(*number); |
+} |
+ |
+template <> |
+inline int8_t FromObject<int8_t>(Handle<Object> number) { |
+ return NumberToInt32(*number); |
+} |
+ |
+template <> |
+inline uint16_t FromObject<uint16_t>(Handle<Object> number) { |
+ return NumberToUint32(*number); |
+} |
+ |
+template <> |
+inline int16_t FromObject<int16_t>(Handle<Object> number) { |
+ return NumberToInt32(*number); |
+} |
+ |
+template <> |
+inline uint32_t FromObject<uint32_t>(Handle<Object> number) { |
+ return NumberToUint32(*number); |
+} |
+ |
+template <> |
+inline int32_t FromObject<int32_t>(Handle<Object> number) { |
+ return NumberToInt32(*number); |
+} |
+ |
+inline Object* ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); } |
+ |
+inline Object* ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); } |
+ |
+inline Object* ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); } |
+ |
+inline Object* ToObject(Isolate* isolate, uint16_t t) { |
+ return Smi::FromInt(t); |
+} |
+ |
+inline Object* ToObject(Isolate* isolate, int32_t t) { |
+ return *isolate->factory()->NewNumber(t); |
+} |
+ |
+inline Object* ToObject(Isolate* isolate, uint32_t t) { |
+ return *isolate->factory()->NewNumber(t); |
+} |
+ |
+template <typename T> |
+inline T ExchangeSeqCst(T* p, T value) { |
+ return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); |
+} |
+ |
+template <typename T> |
+inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index, |
+ Handle<Object> obj) { |
+ T value = FromObject<T>(obj); |
+ T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value); |
+ return ToObject(isolate, result); |
+} |
+ |
+// ES #sec-validatesharedintegertypedarray |
+MUST_USE_RESULT MaybeHandle<JSTypedArray> ValidateSharedIntegerTypedArray( |
+ Isolate* isolate, Handle<Object> object, bool only_int32 = false) { |
+ if (object->IsJSTypedArray()) { |
+ Handle<JSTypedArray> typedArray = Handle<JSTypedArray>::cast(object); |
+ if (typedArray->GetBuffer()->is_shared()) { |
+ if (only_int32) { |
+ if (typedArray->type() == kExternalInt32Array) return typedArray; |
+ } else { |
+ if (typedArray->type() != kExternalFloat32Array && |
+ typedArray->type() != kExternalFloat64Array && |
+ typedArray->type() != kExternalUint8ClampedArray) |
+ return typedArray; |
+ } |
+ } |
+ } |
+ |
+ THROW_NEW_ERROR( |
+ isolate, |
+ NewTypeError(only_int32 ? MessageTemplate::kNotInt32SharedTypedArray |
+ : MessageTemplate::kNotIntegerSharedTypedArray, |
+ object), |
+ JSTypedArray); |
+} |
+ |
+// ES #sec-validateatomicaccess |
+MUST_USE_RESULT Maybe<size_t> ValidateAtomicAccess( |
+ Isolate* isolate, Handle<JSTypedArray> typedArray, |
+ Handle<Object> requestIndex) { |
+ // TOOD(v8:5961): Use ToIndex for indexes |
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE( |
+ isolate, requestIndex, Object::ToNumber(requestIndex), Nothing<size_t>()); |
+ Handle<Object> offset; |
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, offset, |
+ Object::ToInteger(isolate, requestIndex), |
+ Nothing<size_t>()); |
+ if (!requestIndex->SameValue(*offset)) { |
+ isolate->Throw(*isolate->factory()->NewRangeError( |
+ MessageTemplate::kInvalidAtomicAccessIndex)); |
+ return Nothing<size_t>(); |
+ } |
+ size_t accessIndex; |
+ uint32_t length = typedArray->length_value(); |
+ if (!TryNumberToSize(*requestIndex, &accessIndex) || accessIndex >= length) { |
+ isolate->Throw(*isolate->factory()->NewRangeError( |
+ MessageTemplate::kInvalidAtomicAccessIndex)); |
+ return Nothing<size_t>(); |
+ } |
+ return Just<size_t>(accessIndex); |
+} |
+ |
+#define INTEGER_TYPED_ARRAYS(V) \ |
+ V(Uint8, uint8, UINT8, uint8_t, 1) \ |
+ V(Int8, int8, INT8, int8_t, 1) \ |
+ V(Uint16, uint16, UINT16, uint16_t, 2) \ |
+ V(Int16, int16, INT16, int16_t, 2) \ |
+ V(Uint32, uint32, UINT32, uint32_t, 4) \ |
+ V(Int32, int32, INT32, int32_t, 4) |
+ |
+BUILTIN(AtomicsExchange) { |
+ HandleScope scope(isolate); |
+ Handle<Object> array = args.atOrUndefined(isolate, 1); |
+ Handle<Object> index = args.atOrUndefined(isolate, 2); |
+ Handle<Object> value = args.atOrUndefined(isolate, 3); |
+ |
+ Handle<JSTypedArray> sta; |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( |
+ isolate, sta, ValidateSharedIntegerTypedArray(isolate, array)); |
+ |
+ Maybe<size_t> maybeIndex = ValidateAtomicAccess(isolate, sta, index); |
+ if (maybeIndex.IsNothing()) return isolate->heap()->exception(); |
+ size_t i = maybeIndex.FromJust(); |
+ |
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, |
+ Object::ToInteger(isolate, value)); |
+ |
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + |
+ NumberToSize(sta->byte_offset()); |
+ |
+ switch (sta->type()) { |
+#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ |
+ case kExternal##Type##Array: \ |
+ return DoExchange<ctype>(isolate, source, i, value); |
+ |
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
+#undef TYPED_ARRAY_CASE |
+ |
+ default: |
+ break; |
+ } |
+ |
+ UNREACHABLE(); |
+ return isolate->heap()->undefined_value(); |
+} |
+#else |
+void Builtins::Generate_AtomicsExchange(compiler::CodeAssemblerState* state) { |
+ using compiler::Node; |
+ CodeStubAssembler a(state); |
+ Node* array = a.Parameter(1); |
+ Node* index = a.Parameter(2); |
+ Node* value = a.Parameter(3); |
+ Node* context = a.Parameter(4 + 2); |
+ |
+ Node* instance_type; |
+ Node* backing_store; |
+ ValidateSharedTypedArray(&a, array, context, &instance_type, &backing_store); |
+ |
+ Node* index_word32 = ConvertTaggedAtomicIndexToWord32(&a, index, context); |
+ Node* array_length_word32 = a.TruncateTaggedToWord32( |
+ context, a.LoadObjectField(array, JSTypedArray::kLengthOffset)); |
+ ValidateAtomicIndex(&a, index_word32, array_length_word32, context); |
+ Node* index_word = a.ChangeUint32ToWord(index_word32); |
+ |
+ Node* value_integer = a.ToInteger(context, value); |
+ Node* value_word32 = a.TruncateTaggedToWord32(context, value_integer); |
+ |
+ CodeStubAssembler::Label i8(&a), u8(&a), i16(&a), u16(&a), i32(&a), u32(&a), |
+ other(&a); |
+ int32_t case_values[] = { |
+ FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE, |
+ FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE, |
+ }; |
+ CodeStubAssembler::Label* case_labels[] = { |
+ &i8, &u8, &i16, &u16, &i32, &u32, |
+ }; |
+ a.Switch(instance_type, &other, case_values, case_labels, |
+ arraysize(case_labels)); |
+ |
+ a.Bind(&i8); |
+ a.Return(a.SmiFromWord32(a.AtomicExchange(MachineType::Int8(), backing_store, |
+ index_word, value_word32))); |
+ |
+ a.Bind(&u8); |
+ a.Return(a.SmiFromWord32(a.AtomicExchange(MachineType::Uint8(), backing_store, |
+ index_word, value_word32))); |
+ |
+ a.Bind(&i16); |
+ a.Return(a.SmiFromWord32(a.AtomicExchange(MachineType::Int16(), backing_store, |
+ a.WordShl(index_word, 1), |
+ value_word32))); |
+ |
+ a.Bind(&u16); |
+ a.Return(a.SmiFromWord32( |
+ a.AtomicExchange(MachineType::Uint16(), backing_store, |
+ a.WordShl(index_word, 1), value_word32))); |
+ |
+ a.Bind(&i32); |
+ a.Return(a.ChangeInt32ToTagged( |
+ a.AtomicExchange(MachineType::Int32(), backing_store, |
+ a.WordShl(index_word, 2), value_word32))); |
+ |
+ a.Bind(&u32); |
+ a.Return(a.ChangeUint32ToTagged( |
+ a.AtomicExchange(MachineType::Uint32(), backing_store, |
+ a.WordShl(index_word, 2), value_word32))); |
+ |
+ // This shouldn't happen, we've already validated the type. |
+ a.Bind(&other); |
+ a.Return(a.SmiConstant(0)); |
+} |
+#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 |
+ |
} // namespace internal |
} // namespace v8 |