Index: src/mips/macro-assembler-mips.cc |
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc |
index 02f7bee24811f9280148137a2df53782e9508855..a188cab3021a953a9b80870d4a9120a3c45dd504 100644 |
--- a/src/mips/macro-assembler-mips.cc |
+++ b/src/mips/macro-assembler-mips.cc |
@@ -6057,28 +6057,39 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg, |
bind(&done); |
} |
- |
-void MacroAssembler::TestJSArrayForAllocationMemento( |
- Register receiver_reg, |
- Register scratch_reg, |
- Label* no_memento_found, |
- Condition cond, |
- Label* allocation_memento_present) { |
- ExternalReference new_space_start = |
- ExternalReference::new_space_start(isolate()); |
+void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg, |
+ Register scratch_reg, |
+ Label* no_memento_found) { |
+ Label map_check; |
ExternalReference new_space_allocation_top = |
ExternalReference::new_space_allocation_top_address(isolate()); |
- Addu(scratch_reg, receiver_reg, |
- Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); |
- Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start)); |
+ const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag; |
+ const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize; |
+ |
+ // Bail out if the object sits on the page boundary as no memento can follow |
+ // and we cannot touch the memory following it. |
+ Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset)); |
+ Xor(scratch_reg, scratch_reg, Operand(receiver_reg)); |
+ Branch(no_memento_found, gt, scratch_reg, Operand(Page::kPageSize)); |
+ // Bail out if the object is not in new space. |
+ Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset)); |
+ JumpIfNotInNewSpace(scratch_reg, scratch_reg, no_memento_found); |
+ // If the object is in new space, we need to check whether it is on the same |
+ // page as the current top. |
+ Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset)); |
+ Xor(scratch_reg, scratch_reg, Operand(new_space_allocation_top)); |
+ And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask)); |
+ Branch(&map_check, ne, scratch_reg, Operand(zero_reg)); |
+ // Otherwise, we have to check whether we are still below top, to ensure that |
+ // we are not checking against a stale memento. |
+ Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset)); |
li(at, Operand(new_space_allocation_top)); |
lw(at, MemOperand(at)); |
Branch(no_memento_found, gt, scratch_reg, Operand(at)); |
- lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize)); |
- if (allocation_memento_present) { |
Michael Lippautz
2016/03/30 15:22:22
The way this was implemented was slightly better (
|
- Branch(allocation_memento_present, cond, scratch_reg, |
- Operand(isolate()->factory()->allocation_memento_map())); |
- } |
+ bind(&map_check); |
+ lw(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset)); |
+ Branch(no_memento_found, ne, scratch_reg, |
+ Operand(isolate()->factory()->allocation_memento_map())); |
} |