Index: src/arm/code-stubs-arm.cc |
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc |
index 69229a056bac4226f04a9f642bb62e06e324befb..d6afbf1d5b4d858d9f496b1d905c120f1171edfb 100644 |
--- a/src/arm/code-stubs-arm.cc |
+++ b/src/arm/code-stubs-arm.cc |
@@ -5444,6 +5444,206 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) { |
} |
+static void GetTypedArrayBackingStore(MacroAssembler* masm, |
Jarin
2016/01/28 08:11:12
Nit: Anonymous namespace for all the static method
binji
2016/01/28 16:20:00
Happy to do it, but it doesn't seem to be consiste
Jarin
2016/02/01 07:42:43
I see your point, but I still think new code shoul
|
+ Register backing_store, |
+ Register object, |
+ Register scratch, |
+ LowDwVfpRegister double_scratch) { |
+ Label offset_is_not_smi, done; |
+ __ ldr(scratch, FieldMemOperand(object, JSTypedArray::kBufferOffset)); |
+ __ ldr(backing_store, |
+ FieldMemOperand(scratch, JSArrayBuffer::kBackingStoreOffset)); |
+ __ ldr(scratch, |
+ FieldMemOperand(object, JSArrayBufferView::kByteOffsetOffset)); |
+ __ UntagAndJumpIfNotSmi(scratch, scratch, &offset_is_not_smi); |
+ // offset is smi |
Jarin
2016/01/28 08:11:12
Nit: If a comment is a sentence, it should start w
binji
2016/01/28 16:20:00
Done.
|
+ __ add(backing_store, backing_store, scratch); |
+ __ jmp(&done); |
+ |
+ // offset is a heap number |
+ __ bind(&offset_is_not_smi); |
+ __ vldr(double_scratch, scratch, HeapNumber::kValueOffset - kHeapObjectTag); |
+ __ vcvt_u32_f64(double_scratch.low(), double_scratch); |
+ __ vmov(scratch, double_scratch.low()); |
+ __ add(backing_store, backing_store, scratch); |
+ __ bind(&done); |
+} |
+ |
+ |
+static void TaggedToInteger32(MacroAssembler* masm, |
+ Register value, |
+ LowDwVfpRegister double_scratch) { |
+ Label not_smi, done; |
+ __ UntagAndJumpIfNotSmi(value, value, ¬_smi); |
+ __ jmp(&done); |
+ |
+ __ bind(¬_smi); |
+ __ vldr(double_scratch, value, HeapNumber::kValueOffset - kHeapObjectTag); |
+ __ vmov(value, double_scratch.low()); |
+ __ bind(&done); |
+} |
+ |
+ |
+static void TypedArrayJumpTable(MacroAssembler* masm, |
+ Register object, |
+ Register scratch, |
+ Register scratch2, |
+ Label* i8, |
+ Label* u8, |
+ Label* i16, |
+ Label* u16, |
+ Label* i32, |
+ Label* u32, |
+ Label* u8c) { |
+ STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 1); |
+ STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 2); |
+ STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 3); |
+ STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 4); |
+ STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 5); |
+ STATIC_ASSERT(FIXED_FLOAT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 6); |
+ STATIC_ASSERT(FIXED_FLOAT64_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 7); |
+ STATIC_ASSERT(FIXED_UINT8_CLAMPED_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 8); |
Jarin
2016/01/28 08:11:12
I like this. Thanks!
binji
2016/01/28 16:20:00
Yeah, it's a bit paranoid, but doesn't really hurt
Jarin
2016/02/01 07:42:43
I actually like that it explicitly states what you
|
+ |
+ __ ldr(scratch, FieldMemOperand(object, JSObject::kElementsOffset)); |
+ __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
+ __ mov(scratch2, Operand(static_cast<uint8_t>(FIXED_INT8_ARRAY_TYPE))); |
+ __ sub(scratch, scratch, scratch2, SetCC); |
+ __ Assert(ge, kOffsetOutOfRange); |
+ |
+ Label abort; |
+ |
+ { |
+ Assembler::BlockConstPoolScope scope(masm); |
+ __ add(pc, pc, Operand(scratch, LSL, 2)); |
+ __ nop(); |
+ __ b(i8); // Int8Array |
+ __ b(u8); // Uint8Array |
+ __ b(i16); // Int16Array |
+ __ b(u16); // Uint16Array |
+ __ b(i32); // Int32Array |
+ __ b(u32); // Uint32Array |
+ __ b(&abort); // Float32Array |
+ __ b(&abort); // Float64Array |
+ __ b(u8); // Uint8ClampedArray |
Jarin
2016/01/28 08:11:12
This should be u8c, no? (Also on all other platfor
binji
2016/01/28 16:20:00
oops. In this case it is fine (because AtomicLoad
|
+ } |
+ |
+ __ bind(&abort); |
+ __ Abort(kNoReason); |
+} |
+ |
+ |
+static void ReturnInteger32(MacroAssembler* masm, |
+ DwVfpRegister dst, |
+ Register value, |
+ SwVfpRegister single_scratch, |
+ Label* use_heap_number) { |
+ Label not_smi; |
+ __ TrySmiTag(r0, value, ¬_smi); |
+ __ Ret(); |
+ |
+ __ bind(¬_smi); |
+ __ vmov(single_scratch, value); |
+ __ vcvt_f64_s32(dst, single_scratch); |
+ __ jmp(use_heap_number); |
+} |
+ |
+ |
+static void ReturnUnsignedInteger32(MacroAssembler* masm, |
+ DwVfpRegister dst, |
+ Register value, |
+ Register scratch, |
+ SwVfpRegister single_scratch, |
+ Label* use_heap_number) { |
+ Label not_smi; |
+ __ mov(scratch, Operand(0x40000000U)); |
+ __ cmp(value, scratch); |
+ __ b(cs, ¬_smi); |
+ __ SmiTag(r0, value); |
+ __ Ret(); |
+ |
+ __ bind(¬_smi); |
+ __ vmov(single_scratch, value); |
+ __ vcvt_f64_u32(dst, single_scratch); |
+ __ jmp(use_heap_number); |
+} |
+ |
+ |
+static void ReturnAllocatedHeapNumber(MacroAssembler* masm, |
+ DwVfpRegister value, |
+ Register scratch, |
+ Register scratch2, |
+ Register scratch3) { |
+ Label call_runtime; |
+ __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex); |
+ __ AllocateHeapNumber(r0, scratch, scratch2, scratch3, &call_runtime); |
+ __ vstr(value, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
+ __ Ret(); |
+ |
+ __ bind(&call_runtime); |
+ { |
+ FrameScope scope(masm, StackFrame::INTERNAL); |
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); |
+ __ vstr(value, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
+ } |
+ __ Ret(); |
+} |
+ |
+ |
+void AtomicsLoadStub::Generate(MacroAssembler* masm) { |
+ Register object = r1; |
+ Register backing_store = r2; |
+ Register index = r0; |
+ Label i8, u8, i16, u16, i32, u32; |
+ |
+ GetTypedArrayBackingStore(masm, backing_store, object, r3, d0); |
+ TaggedToInteger32(masm, index, d0); |
+ TypedArrayJumpTable(masm, object, r3, r4, &i8, &u8, &i16, &u16, &i32, &u32, |
+ &u8); |
+ |
+ __ bind(&i8); |
+ __ ldrb(r0, MemOperand(backing_store, index)); |
+ __ dmb(ISH); |
+ __ sxtb(r0, r0); |
+ __ SmiTag(r0); |
+ __ Ret(); |
+ |
+ __ bind(&u8); |
+ __ ldrb(r0, MemOperand(backing_store, index)); |
+ __ dmb(ISH); |
+ __ SmiTag(r0); |
+ __ Ret(); |
+ |
+ __ bind(&i16); |
+ __ ldrh(r0, MemOperand(backing_store, index, LSL, 1)); |
+ __ dmb(ISH); |
+ __ sxth(r0, r0); |
+ __ SmiTag(r0); |
+ __ Ret(); |
+ |
+ __ bind(&u16); |
+ __ ldrh(r0, MemOperand(backing_store, index, LSL, 1)); |
+ __ dmb(ISH); |
+ __ SmiTag(r0); |
+ __ Ret(); |
+ |
+ Label use_heap_number; |
+ |
+ __ bind(&i32); |
+ __ ldr(r0, MemOperand(backing_store, index, LSL, 2)); |
+ __ dmb(ISH); |
+ ReturnInteger32(masm, d0, r0, s2, &use_heap_number); |
+ |
+ __ bind(&u32); |
+ __ ldr(r0, MemOperand(backing_store, index, LSL, 2)); |
+ __ dmb(ISH); |
+ ReturnUnsignedInteger32(masm, d0, r0, r1, s2, &use_heap_number); |
+ |
+ __ bind(&use_heap_number); |
+ ReturnAllocatedHeapNumber(masm, d0, r1, r2, r3); |
+} |
+ |
+ |
#undef __ |
} // namespace internal |