Chromium Code Reviews| Index: src/arm64/code-stubs-arm64.cc |
| diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc |
| index f0265c73cee09edb9d889b803cf853f73edcc0fd..ec017803b4d1f8f751ba46d748b840eff85f36eb 100644 |
| --- a/src/arm64/code-stubs-arm64.cc |
| +++ b/src/arm64/code-stubs-arm64.cc |
| @@ -5939,6 +5939,149 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) { |
| return_value_operand, NULL); |
| } |
| +namespace { |
| + |
| +void GetTypedArrayBackingStore(MacroAssembler* masm, Register backing_store, |
| + Register object, Register scratch, |
| + FPRegister double_scratch) { |
| + Label offset_is_not_smi, done; |
| + __ ldr(scratch, FieldMemOperand(object, JSTypedArray::kBufferOffset)); |
|
Rodolph Perfetta
2016/04/07 14:13:50
on arm64 the convention is to use the macro assemb
binji
2016/04/08 18:21:42
Done.
|
| + __ ldr(backing_store, |
| + FieldMemOperand(scratch, JSArrayBuffer::kBackingStoreOffset)); |
| + __ ldr(scratch, |
| + FieldMemOperand(object, JSArrayBufferView::kByteOffsetOffset)); |
| + __ JumpIfNotSmi(scratch, &offset_is_not_smi); |
| + // offset is smi |
| + __ SmiUntag(scratch); |
|
Rodolph Perfetta
2016/04/07 14:13:50
you can combine this with the line below:
__ Ad
binji
2016/04/08 18:21:42
Done.
|
| + __ add(backing_store, backing_store, scratch); |
| + __ jmp(&done); |
| + |
| + // offset is a heap number |
| + __ bind(&offset_is_not_smi); |
| + __ Ldr(double_scratch, FieldMemOperand(scratch, HeapNumber::kValueOffset)); |
| + __ Fcvtzu(scratch, double_scratch); |
| + __ add(backing_store, backing_store, scratch); |
| + __ bind(&done); |
| +} |
| + |
| +void TypedArrayJumpTable(MacroAssembler* masm, Register object, |
| + Register scratch, Register scratch2, Label* i8, |
| + Label* u8, Label* i16, Label* u16, Label* i32, |
| + Label* u32, Label* u8c) { |
| + STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 1); |
| + STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 2); |
| + STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 3); |
| + STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 4); |
| + STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 5); |
| + STATIC_ASSERT(FIXED_FLOAT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 6); |
| + STATIC_ASSERT(FIXED_FLOAT64_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 7); |
| + STATIC_ASSERT(FIXED_UINT8_CLAMPED_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 8); |
| + |
| + __ ldr(scratch, FieldMemOperand(object, JSObject::kElementsOffset)); |
| + __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
| + __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
| + __ Mov(scratch2, static_cast<uint8_t>(FIXED_INT8_ARRAY_TYPE)); |
|
Rodolph Perfetta
2016/04/07 14:13:50
this can be merged in the sub below
binji
2016/04/08 18:21:42
Done.
|
| + __ subs(scratch, scratch, Operand(scratch2)); |
| + __ Assert(ge, kOffsetOutOfRange); |
| + |
| + Label abort; |
| + Label table; |
| + |
| + __ Mov(scratch2, scratch); |
|
Rodolph Perfetta
2016/04/07 14:13:50
You don't need this, below simply adr into scratch
binji
2016/04/08 18:21:42
Done.
|
| + __ Adr(scratch, &table); |
| + __ Add(scratch, scratch, Operand(scratch2, UXTW, 2)); |
| + __ Br(scratch); |
| + |
| + __ StartBlockPools(); |
| + __ Bind(&table); |
| + __ b(i8); // Int8Array |
| + __ b(u8); // Uint8Array |
| + __ b(i16); // Int16Array |
| + __ b(u16); // Uint16Array |
| + __ b(i32); // Int32Array |
| + __ b(u32); // Uint32Array |
| + __ b(&abort); // Float32Array |
| + __ b(&abort); // Float64Array |
| + __ b(u8c); // Uint8ClampedArray |
| + __ EndBlockPools(); |
| + |
| + __ bind(&abort); |
| + __ Abort(kNoReason); |
| +} |
| + |
| +void ReturnUnsignedInteger32(MacroAssembler* masm, FPRegister dst, |
| + Register value, Register scratch, |
| + Register scratch2, Register scratch3) { |
| + Label not_smi, call_runtime; |
| + __ Mov(scratch, 0x40000000U); |
|
Rodolph Perfetta
2016/04/07 14:13:50
on 64-bits platform Smi are signed 32-bits integer
binji
2016/04/08 18:21:42
Done.
|
| + __ Cmp(value, scratch); |
| + __ B(hs, ¬_smi); |
| + __ SmiTag(x0, value); |
| + __ Ret(); |
| + |
| + __ bind(¬_smi); |
| + __ ucvtf(dst, value); |
| + __ AllocateHeapNumber(x0, &call_runtime, scratch, scratch2, dst); |
| + __ Ret(); |
| + |
| + __ bind(&call_runtime); |
| + { |
| + FrameScope scope(masm, StackFrame::INTERNAL); |
| + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); |
| + __ Str(value, FieldMemOperand(x0, HeapNumber::kValueOffset)); |
| + } |
| + __ Ret(); |
| +} |
| + |
| +} // anonymous namespace |
| + |
| +void AtomicsLoadStub::Generate(MacroAssembler* masm) { |
| + Register object = x1; |
| + Register index = x0; // Index is an untagged word32. |
| + Register backing_store = x2; |
| + Label i8, u8, i16, u16, i32, u32; |
| + |
| + GetTypedArrayBackingStore(masm, backing_store, object, x3, d0); |
| + TypedArrayJumpTable(masm, object, x3, x4, &i8, &u8, &i16, &u16, &i32, &u32, |
| + &u8); |
| + |
| + __ bind(&i8); |
| + __ ldrsb(x0, MemOperand(backing_store, index)); |
| + __ dmb(InnerShareable, BarrierAll); |
| + __ SmiTag(x0); |
| + __ Ret(); |
| + |
| + __ bind(&u8); |
| + __ ldrb(x0, MemOperand(backing_store, index)); |
| + __ dmb(InnerShareable, BarrierAll); |
| + __ SmiTag(x0); |
| + __ Ret(); |
| + |
| + __ bind(&i16); |
| + __ ldrsh(x0, MemOperand(backing_store, index, UXTW, 1)); |
| + __ dmb(InnerShareable, BarrierAll); |
| + __ SmiTag(x0); |
| + __ Ret(); |
| + |
| + __ bind(&u16); |
| + __ ldrh(x0, MemOperand(backing_store, index, UXTW, 1)); |
| + __ dmb(InnerShareable, BarrierAll); |
| + __ SmiTag(x0); |
| + __ Ret(); |
| + |
| + __ bind(&i32); |
| + __ ldrsw(x0, MemOperand(backing_store, index, UXTW, 2)); |
| + __ dmb(InnerShareable, BarrierAll); |
| + DCHECK(SmiValuesAre32Bits()); |
| + __ SmiTag(x0); |
| + __ Ret(); |
| + |
| + __ bind(&u32); |
| + __ ldr(w0, MemOperand(backing_store, index, UXTW, 2)); |
| + __ dmb(InnerShareable, BarrierAll); |
| + __ uxtw(x0, x0); |
| + ReturnUnsignedInteger32(masm, d0, x0, x1, x2, x3); |
| +} |
| #undef __ |