Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #if V8_TARGET_ARCH_ARM | 5 #if V8_TARGET_ARCH_ARM |
| 6 | 6 |
| 7 #include "src/code-stubs.h" | 7 #include "src/code-stubs.h" |
| 8 #include "src/api-arguments.h" | 8 #include "src/api-arguments.h" |
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
| 10 #include "src/bootstrapper.h" | 10 #include "src/bootstrapper.h" |
| (...skipping 5534 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5545 ExternalReference thunk_ref = | 5545 ExternalReference thunk_ref = |
| 5546 ExternalReference::invoke_accessor_getter_callback(isolate()); | 5546 ExternalReference::invoke_accessor_getter_callback(isolate()); |
| 5547 | 5547 |
| 5548 // +3 is to skip prolog, return address and name handle. | 5548 // +3 is to skip prolog, return address and name handle. |
| 5549 MemOperand return_value_operand( | 5549 MemOperand return_value_operand( |
| 5550 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); | 5550 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); |
| 5551 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, | 5551 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, |
| 5552 kStackUnwindSpace, NULL, return_value_operand, NULL); | 5552 kStackUnwindSpace, NULL, return_value_operand, NULL); |
| 5553 } | 5553 } |
| 5554 | 5554 |
| 5555 namespace { | |
| 5556 | |
| 5557 void GetTypedArrayBackingStore(MacroAssembler* masm, Register backing_store, | |
| 5558 Register object, Register scratch, | |
| 5559 LowDwVfpRegister double_scratch) { | |
| 5560 Label offset_is_not_smi, done; | |
| 5561 __ ldr(scratch, FieldMemOperand(object, JSTypedArray::kBufferOffset)); | |
| 5562 __ ldr(backing_store, | |
| 5563 FieldMemOperand(scratch, JSArrayBuffer::kBackingStoreOffset)); | |
| 5564 __ ldr(scratch, | |
| 5565 FieldMemOperand(object, JSArrayBufferView::kByteOffsetOffset)); | |
| 5566 __ UntagAndJumpIfNotSmi(scratch, scratch, &offset_is_not_smi); | |
| 5567 // offset is smi | |
| 5568 __ add(backing_store, backing_store, scratch); | |
| 5569 __ jmp(&done); | |
| 5570 | |
| 5571 // offset is a heap number | |
| 5572 __ bind(&offset_is_not_smi); | |
| 5573 __ vldr(double_scratch, scratch, HeapNumber::kValueOffset - kHeapObjectTag); | |
|
Rodolph Perfetta
2016/04/07 14:13:50
When you arrive here, scratch has been untagged (U
binji
2016/04/08 18:21:42
Done.
| |
| 5574 __ vcvt_u32_f64(double_scratch.low(), double_scratch); | |
| 5575 __ vmov(scratch, double_scratch.low()); | |
| 5576 __ add(backing_store, backing_store, scratch); | |
| 5577 __ bind(&done); | |
| 5578 } | |
| 5579 | |
| 5580 void TypedArrayJumpTable(MacroAssembler* masm, Register object, | |
| 5581 Register scratch, Register scratch2, Label* i8, | |
| 5582 Label* u8, Label* i16, Label* u16, Label* i32, | |
| 5583 Label* u32, Label* u8c) { | |
| 5584 STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 1); | |
| 5585 STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 2); | |
| 5586 STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 3); | |
| 5587 STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 4); | |
| 5588 STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 5); | |
| 5589 STATIC_ASSERT(FIXED_FLOAT32_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 6); | |
| 5590 STATIC_ASSERT(FIXED_FLOAT64_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 7); | |
| 5591 STATIC_ASSERT(FIXED_UINT8_CLAMPED_ARRAY_TYPE == FIXED_INT8_ARRAY_TYPE + 8); | |
| 5592 | |
| 5593 __ ldr(scratch, FieldMemOperand(object, JSObject::kElementsOffset)); | |
| 5594 __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); | |
| 5595 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | |
| 5596 __ mov(scratch2, Operand(static_cast<uint8_t>(FIXED_INT8_ARRAY_TYPE))); | |
|
Rodolph Perfetta
2016/04/07 14:13:50
add/sub can handle any 8-bit constant directly, so
binji
2016/04/08 18:21:42
Done.
| |
| 5597 __ sub(scratch, scratch, scratch2, SetCC); | |
| 5598 __ Assert(ge, kOffsetOutOfRange); | |
| 5599 | |
| 5600 Label abort; | |
| 5601 | |
| 5602 { | |
| 5603 Assembler::BlockConstPoolScope scope(masm); | |
| 5604 __ add(pc, pc, Operand(scratch, LSL, 2)); | |
| 5605 __ nop(); | |
| 5606 __ b(i8); // Int8Array | |
| 5607 __ b(u8); // Uint8Array | |
| 5608 __ b(i16); // Int16Array | |
| 5609 __ b(u16); // Uint16Array | |
| 5610 __ b(i32); // Int32Array | |
| 5611 __ b(u32); // Uint32Array | |
| 5612 __ b(&abort); // Float32Array | |
| 5613 __ b(&abort); // Float64Array | |
| 5614 __ b(u8c); // Uint8ClampedArray | |
| 5615 } | |
| 5616 | |
| 5617 __ bind(&abort); | |
| 5618 __ Abort(kNoReason); | |
| 5619 } | |
| 5620 | |
| 5621 void ReturnInteger32(MacroAssembler* masm, DwVfpRegister dst, Register value, | |
| 5622 SwVfpRegister single_scratch, Label* use_heap_number) { | |
| 5623 Label not_smi; | |
| 5624 __ TrySmiTag(r0, value, ¬_smi); | |
| 5625 __ Ret(); | |
| 5626 | |
| 5627 __ bind(¬_smi); | |
| 5628 __ vmov(single_scratch, value); | |
| 5629 __ vcvt_f64_s32(dst, single_scratch); | |
| 5630 __ jmp(use_heap_number); | |
| 5631 } | |
| 5632 | |
| 5633 void ReturnUnsignedInteger32(MacroAssembler* masm, DwVfpRegister dst, | |
| 5634 Register value, Register scratch, | |
| 5635 SwVfpRegister single_scratch, | |
| 5636 Label* use_heap_number) { | |
| 5637 Label not_smi; | |
| 5638 __ mov(scratch, Operand(0x40000000U)); | |
|
Rodolph Perfetta
2016/04/07 14:13:49
cmp can handle any 8-bit value with an even shift,
binji
2016/04/08 18:21:42
Done.
| |
| 5639 __ cmp(value, scratch); | |
| 5640 __ b(cs, ¬_smi); | |
| 5641 __ SmiTag(r0, value); | |
| 5642 __ Ret(); | |
| 5643 | |
| 5644 __ bind(¬_smi); | |
| 5645 __ vmov(single_scratch, value); | |
| 5646 __ vcvt_f64_u32(dst, single_scratch); | |
| 5647 __ jmp(use_heap_number); | |
| 5648 } | |
| 5649 | |
| 5650 void ReturnAllocatedHeapNumber(MacroAssembler* masm, DwVfpRegister value, | |
| 5651 Register scratch, Register scratch2, | |
| 5652 Register scratch3) { | |
| 5653 Label call_runtime; | |
| 5654 __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex); | |
| 5655 __ AllocateHeapNumber(r0, scratch, scratch2, scratch3, &call_runtime); | |
| 5656 __ vstr(value, FieldMemOperand(r0, HeapNumber::kValueOffset)); | |
| 5657 __ Ret(); | |
| 5658 | |
| 5659 __ bind(&call_runtime); | |
| 5660 { | |
| 5661 FrameScope scope(masm, StackFrame::INTERNAL); | |
| 5662 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); | |
| 5663 __ vstr(value, FieldMemOperand(r0, HeapNumber::kValueOffset)); | |
| 5664 } | |
| 5665 __ Ret(); | |
| 5666 } | |
| 5667 | |
| 5668 } // anonymous namespace | |
| 5669 | |
| 5670 void AtomicsLoadStub::Generate(MacroAssembler* masm) { | |
| 5671 Register object = r1; | |
| 5672 Register index = r0; // Index is an untagged word32. | |
| 5673 Register backing_store = r2; | |
| 5674 Label i8, u8, i16, u16, i32, u32; | |
| 5675 | |
| 5676 GetTypedArrayBackingStore(masm, backing_store, object, r3, d0); | |
| 5677 TypedArrayJumpTable(masm, object, r3, r4, &i8, &u8, &i16, &u16, &i32, &u32, | |
| 5678 &u8); | |
| 5679 | |
| 5680 __ bind(&i8); | |
| 5681 __ ldrb(r0, MemOperand(backing_store, index)); | |
|
Rodolph Perfetta
2016/04/07 14:13:50
if you use ldrsb, then no need to use sxtb below
binji
2016/04/08 18:21:42
Done.
| |
| 5682 __ dmb(ISH); | |
| 5683 __ sxtb(r0, r0); | |
| 5684 __ SmiTag(r0); | |
| 5685 __ Ret(); | |
| 5686 | |
| 5687 __ bind(&u8); | |
| 5688 __ ldrb(r0, MemOperand(backing_store, index)); | |
| 5689 __ dmb(ISH); | |
| 5690 __ SmiTag(r0); | |
| 5691 __ Ret(); | |
| 5692 | |
| 5693 __ bind(&i16); | |
| 5694 __ ldrh(r0, MemOperand(backing_store, index, LSL, 1)); | |
|
Rodolph Perfetta
2016/04/07 14:13:49
use ldrsh to avoid the sxth below
binji
2016/04/08 18:21:42
Done.
| |
| 5695 __ dmb(ISH); | |
| 5696 __ sxth(r0, r0); | |
| 5697 __ SmiTag(r0); | |
| 5698 __ Ret(); | |
| 5699 | |
| 5700 __ bind(&u16); | |
| 5701 __ ldrh(r0, MemOperand(backing_store, index, LSL, 1)); | |
| 5702 __ dmb(ISH); | |
| 5703 __ SmiTag(r0); | |
| 5704 __ Ret(); | |
| 5705 | |
| 5706 Label use_heap_number; | |
| 5707 | |
| 5708 __ bind(&i32); | |
| 5709 __ ldr(r0, MemOperand(backing_store, index, LSL, 2)); | |
| 5710 __ dmb(ISH); | |
| 5711 ReturnInteger32(masm, d0, r0, s2, &use_heap_number); | |
| 5712 | |
| 5713 __ bind(&u32); | |
| 5714 __ ldr(r0, MemOperand(backing_store, index, LSL, 2)); | |
| 5715 __ dmb(ISH); | |
| 5716 ReturnUnsignedInteger32(masm, d0, r0, r1, s2, &use_heap_number); | |
| 5717 | |
| 5718 __ bind(&use_heap_number); | |
| 5719 ReturnAllocatedHeapNumber(masm, d0, r1, r2, r3); | |
| 5720 } | |
| 5555 | 5721 |
| 5556 #undef __ | 5722 #undef __ |
| 5557 | 5723 |
| 5558 } // namespace internal | 5724 } // namespace internal |
| 5559 } // namespace v8 | 5725 } // namespace v8 |
| 5560 | 5726 |
| 5561 #endif // V8_TARGET_ARCH_ARM | 5727 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |