| Index: src/x64/macro-assembler-x64.cc
|
| ===================================================================
|
| --- src/x64/macro-assembler-x64.cc (revision 4703)
|
| +++ src/x64/macro-assembler-x64.cc (working copy)
|
| @@ -90,21 +90,58 @@
|
| bind(¬_in_new_space);
|
| }
|
|
|
| + Label fast;
|
| +
|
| // Compute the page start address from the heap object pointer, and reuse
|
| // the 'object' register for it.
|
| - and_(object, Immediate(~Page::kPageAlignmentMask));
|
| + ASSERT(is_int32(~Page::kPageAlignmentMask));
|
| + and_(object,
|
| + Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
|
| + Register page_start = object;
|
|
|
| - // Compute number of region covering addr. See Page::GetRegionNumberForAddress
|
| - // method for more details.
|
| - and_(addr, Immediate(Page::kPageAlignmentMask));
|
| - shrl(addr, Immediate(Page::kRegionSizeLog2));
|
| + // Compute the bit addr in the remembered set/index of the pointer in the
|
| + // page. Reuse 'addr' as pointer_offset.
|
| + subq(addr, page_start);
|
| + shr(addr, Immediate(kPointerSizeLog2));
|
| + Register pointer_offset = addr;
|
|
|
| - // Set dirty mark for region.
|
| - bts(Operand(object, Page::kDirtyFlagOffset), addr);
|
| + // If the bit offset lies beyond the normal remembered set range, it is in
|
| + // the extra remembered set area of a large object.
|
| + cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
|
| + j(below, &fast);
|
| +
|
| + // We have a large object containing pointers. It must be a FixedArray.
|
| +
|
| + // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
|
| + // extra remembered set after the large object.
|
| +
|
| + // Load the array length into 'scratch'.
|
| + movl(scratch,
|
| + Operand(page_start,
|
| + Page::kObjectStartOffset + FixedArray::kLengthOffset));
|
| + Register array_length = scratch;
|
| +
|
| + // Extra remembered set starts right after the large object (a FixedArray), at
|
| + // page_start + kObjectStartOffset + objectSize
|
| + // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
|
| + // Add the delta between the end of the normal RSet and the start of the
|
| + // extra RSet to 'page_start', so that addressing the bit using
|
| + // 'pointer_offset' hits the extra RSet words.
|
| + lea(page_start,
|
| + Operand(page_start, array_length, times_pointer_size,
|
| + Page::kObjectStartOffset + FixedArray::kHeaderSize
|
| + - Page::kRSetEndOffset));
|
| +
|
| + // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
|
| + // to limit code size. We should probably evaluate this decision by
|
| + // measuring the performance of an equivalent implementation using
|
| + // "simpler" instructions
|
| + bind(&fast);
|
| + bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
|
| }
|
|
|
|
|
| -// For page containing |object| mark region covering [object+offset] dirty.
|
| +// Set the remembered set bit for [object+offset].
|
| // object is the object being stored into, value is the object being stored.
|
| // If offset is zero, then the smi_index register contains the array index into
|
| // the elements array represented as a smi. Otherwise it can be used as a
|
| @@ -119,8 +156,9 @@
|
| // registers are rsi.
|
| ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi));
|
|
|
| - // First, check if a write barrier is even needed. The tests below
|
| - // catch stores of Smis and stores into young gen.
|
| + // First, check if a remembered set write is even needed. The tests below
|
| + // catch stores of Smis and stores into young gen (which does not have space
|
| + // for the remembered set bits).
|
| Label done;
|
| JumpIfSmi(value, &done);
|
|
|
| @@ -153,8 +191,8 @@
|
| bind(&okay);
|
| }
|
|
|
| - // Test that the object address is not in the new space. We cannot
|
| - // update page dirty marks for new space pages.
|
| + // Test that the object address is not in the new space. We cannot
|
| + // set remembered set bits in the new space.
|
| InNewSpace(object, scratch, equal, &done);
|
|
|
| // The offset is relative to a tagged or untagged HeapObject pointer,
|
| @@ -163,19 +201,48 @@
|
| ASSERT(IsAligned(offset, kPointerSize) ||
|
| IsAligned(offset + kHeapObjectTag, kPointerSize));
|
|
|
| - Register dst = smi_index;
|
| - if (offset != 0) {
|
| - lea(dst, Operand(object, offset));
|
| + // We use optimized write barrier code if the word being written to is not in
|
| + // a large object page, or is in the first "page" of a large object page.
|
| + // We make sure that an offset is inside the right limits whether it is
|
| + // tagged or untagged.
|
| + if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
|
| + // Compute the bit offset in the remembered set, leave it in 'scratch'.
|
| + lea(scratch, Operand(object, offset));
|
| + ASSERT(is_int32(Page::kPageAlignmentMask));
|
| + and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
|
| + shr(scratch, Immediate(kPointerSizeLog2));
|
| +
|
| + // Compute the page address from the heap object pointer, leave it in
|
| + // 'object' (immediate value is sign extended).
|
| + and_(object, Immediate(~Page::kPageAlignmentMask));
|
| +
|
| + // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
|
| + // to limit code size. We should probably evaluate this decision by
|
| + // measuring the performance of an equivalent implementation using
|
| + // "simpler" instructions
|
| + bts(Operand(object, Page::kRSetOffset), scratch);
|
| } else {
|
| - // array access: calculate the destination address in the same manner as
|
| - // KeyedStoreIC::GenerateGeneric.
|
| - SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
|
| - lea(dst, FieldOperand(object,
|
| - index.reg,
|
| - index.scale,
|
| - FixedArray::kHeaderSize));
|
| + Register dst = smi_index;
|
| + if (offset != 0) {
|
| + lea(dst, Operand(object, offset));
|
| + } else {
|
| + // array access: calculate the destination address in the same manner as
|
| + // KeyedStoreIC::GenerateGeneric.
|
| + SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
|
| + lea(dst, FieldOperand(object,
|
| + index.reg,
|
| + index.scale,
|
| + FixedArray::kHeaderSize));
|
| + }
|
| + // If we are already generating a shared stub, not inlining the
|
| + // record write code isn't going to save us any memory.
|
| + if (generating_stub()) {
|
| + RecordWriteHelper(object, dst, scratch);
|
| + } else {
|
| + RecordWriteStub stub(object, dst, scratch);
|
| + CallStub(&stub);
|
| + }
|
| }
|
| - RecordWriteHelper(object, dst, scratch);
|
|
|
| bind(&done);
|
|
|
| @@ -577,18 +644,6 @@
|
| }
|
|
|
|
|
| -void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
|
| - Register src,
|
| - int power) {
|
| - ASSERT((0 <= power) && (power < 32));
|
| - if (dst.is(src)) {
|
| - shr(dst, Immediate(power + kSmiShift));
|
| - } else {
|
| - UNIMPLEMENTED(); // Not used.
|
| - }
|
| -}
|
| -
|
| -
|
| Condition MacroAssembler::CheckSmi(Register src) {
|
| ASSERT_EQ(0, kSmiTag);
|
| testb(src, Immediate(kSmiTagMask));
|
| @@ -2552,7 +2607,7 @@
|
| movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
|
| Integer32ToSmi(scratch1, length);
|
| movq(FieldOperand(result, String::kLengthOffset), scratch1);
|
| - movq(FieldOperand(result, String::kHashFieldOffset),
|
| + movl(FieldOperand(result, String::kHashFieldOffset),
|
| Immediate(String::kEmptyHashField));
|
| }
|
|
|
| @@ -2590,7 +2645,7 @@
|
| movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
|
| Integer32ToSmi(scratch1, length);
|
| movq(FieldOperand(result, String::kLengthOffset), scratch1);
|
| - movq(FieldOperand(result, String::kHashFieldOffset),
|
| + movl(FieldOperand(result, String::kHashFieldOffset),
|
| Immediate(String::kEmptyHashField));
|
| }
|
|
|
|
|