| Index: src/x64/macro-assembler-x64.cc
|
| diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
|
| index 065b61693f20c22360a4b98915b2ed15f454913b..4e0e01e0fd7bdaf1a5f037e62b108aeaaf3bc096 100644
|
| --- a/src/x64/macro-assembler-x64.cc
|
| +++ b/src/x64/macro-assembler-x64.cc
|
| @@ -90,58 +90,21 @@ void MacroAssembler::RecordWriteHelper(Register object,
|
| bind(¬_in_new_space);
|
| }
|
|
|
| - Label fast;
|
| -
|
| // Compute the page start address from the heap object pointer, and reuse
|
| // the 'object' register for it.
|
| - ASSERT(is_int32(~Page::kPageAlignmentMask));
|
| - and_(object,
|
| - Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
|
| - Register page_start = object;
|
| -
|
| - // Compute the bit addr in the remembered set/index of the pointer in the
|
| - // page. Reuse 'addr' as pointer_offset.
|
| - subq(addr, page_start);
|
| - shr(addr, Immediate(kPointerSizeLog2));
|
| - Register pointer_offset = addr;
|
| -
|
| - // If the bit offset lies beyond the normal remembered set range, it is in
|
| - // the extra remembered set area of a large object.
|
| - cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
|
| - j(below, &fast);
|
| -
|
| - // We have a large object containing pointers. It must be a FixedArray.
|
| -
|
| - // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
|
| - // extra remembered set after the large object.
|
| -
|
| - // Load the array length into 'scratch'.
|
| - movl(scratch,
|
| - Operand(page_start,
|
| - Page::kObjectStartOffset + FixedArray::kLengthOffset));
|
| - Register array_length = scratch;
|
| -
|
| - // Extra remembered set starts right after the large object (a FixedArray), at
|
| - // page_start + kObjectStartOffset + objectSize
|
| - // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
|
| - // Add the delta between the end of the normal RSet and the start of the
|
| - // extra RSet to 'page_start', so that addressing the bit using
|
| - // 'pointer_offset' hits the extra RSet words.
|
| - lea(page_start,
|
| - Operand(page_start, array_length, times_pointer_size,
|
| - Page::kObjectStartOffset + FixedArray::kHeaderSize
|
| - - Page::kRSetEndOffset));
|
| -
|
| - // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
|
| - // to limit code size. We should probably evaluate this decision by
|
| - // measuring the performance of an equivalent implementation using
|
| - // "simpler" instructions
|
| - bind(&fast);
|
| - bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
|
| -}
|
| -
|
| -
|
| -// Set the remembered set bit for [object+offset].
|
| + and_(object, Immediate(~Page::kPageAlignmentMask));
|
| +
|
| + // Compute number of region covering addr. See Page::GetRegionNumberForAddress
|
| + // method for more details.
|
| + and_(addr, Immediate(Page::kPageAlignmentMask));
|
| + shrl(addr, Immediate(Page::kRegionSizeLog2));
|
| +
|
| + // Set dirty mark for region.
|
| + bts(Operand(object, Page::kDirtyFlagOffset), addr);
|
| +}
|
| +
|
| +
|
| +// For page containing |object| mark region covering [object+offset] dirty.
|
| // object is the object being stored into, value is the object being stored.
|
| // If offset is zero, then the smi_index register contains the array index into
|
| // the elements array represented as a smi. Otherwise it can be used as a
|
| @@ -156,9 +119,8 @@ void MacroAssembler::RecordWrite(Register object,
|
| // registers are rsi.
|
| ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi));
|
|
|
| - // First, check if a remembered set write is even needed. The tests below
|
| - // catch stores of Smis and stores into young gen (which does not have space
|
| - // for the remembered set bits).
|
| + // First, check if a write barrier is even needed. The tests below
|
| + // catch stores of Smis and stores into young gen.
|
| Label done;
|
| JumpIfSmi(value, &done);
|
|
|
| @@ -191,8 +153,8 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
|
| bind(&okay);
|
| }
|
|
|
| - // Test that the object address is not in the new space. We cannot
|
| - // set remembered set bits in the new space.
|
| + // Test that the object address is not in the new space. We cannot
|
| + // update page dirty marks for new space pages.
|
| InNewSpace(object, scratch, equal, &done);
|
|
|
| // The offset is relative to a tagged or untagged HeapObject pointer,
|
| @@ -201,48 +163,19 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
|
| ASSERT(IsAligned(offset, kPointerSize) ||
|
| IsAligned(offset + kHeapObjectTag, kPointerSize));
|
|
|
| - // We use optimized write barrier code if the word being written to is not in
|
| - // a large object page, or is in the first "page" of a large object page.
|
| - // We make sure that an offset is inside the right limits whether it is
|
| - // tagged or untagged.
|
| - if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
|
| - // Compute the bit offset in the remembered set, leave it in 'scratch'.
|
| - lea(scratch, Operand(object, offset));
|
| - ASSERT(is_int32(Page::kPageAlignmentMask));
|
| - and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
|
| - shr(scratch, Immediate(kPointerSizeLog2));
|
| -
|
| - // Compute the page address from the heap object pointer, leave it in
|
| - // 'object' (immediate value is sign extended).
|
| - and_(object, Immediate(~Page::kPageAlignmentMask));
|
| -
|
| - // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
|
| - // to limit code size. We should probably evaluate this decision by
|
| - // measuring the performance of an equivalent implementation using
|
| - // "simpler" instructions
|
| - bts(Operand(object, Page::kRSetOffset), scratch);
|
| + Register dst = smi_index;
|
| + if (offset != 0) {
|
| + lea(dst, Operand(object, offset));
|
| } else {
|
| - Register dst = smi_index;
|
| - if (offset != 0) {
|
| - lea(dst, Operand(object, offset));
|
| - } else {
|
| - // array access: calculate the destination address in the same manner as
|
| - // KeyedStoreIC::GenerateGeneric.
|
| - SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
|
| - lea(dst, FieldOperand(object,
|
| - index.reg,
|
| - index.scale,
|
| - FixedArray::kHeaderSize));
|
| - }
|
| - // If we are already generating a shared stub, not inlining the
|
| - // record write code isn't going to save us any memory.
|
| - if (generating_stub()) {
|
| - RecordWriteHelper(object, dst, scratch);
|
| - } else {
|
| - RecordWriteStub stub(object, dst, scratch);
|
| - CallStub(&stub);
|
| - }
|
| + // array access: calculate the destination address in the same manner as
|
| + // KeyedStoreIC::GenerateGeneric.
|
| + SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
|
| + lea(dst, FieldOperand(object,
|
| + index.reg,
|
| + index.scale,
|
| + FixedArray::kHeaderSize));
|
| }
|
| + RecordWriteHelper(object, dst, scratch);
|
|
|
| bind(&done);
|
|
|
| @@ -644,6 +577,18 @@ void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
|
| }
|
|
|
|
|
| +void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
|
| + Register src,
|
| + int power) {
|
| + ASSERT((0 <= power) && (power < 32));
|
| + if (dst.is(src)) {
|
| + shr(dst, Immediate(power + kSmiShift));
|
| + } else {
|
| + UNIMPLEMENTED(); // Not used.
|
| + }
|
| +}
|
| +
|
| +
|
| Condition MacroAssembler::CheckSmi(Register src) {
|
| ASSERT_EQ(0, kSmiTag);
|
| testb(src, Immediate(kSmiTagMask));
|
| @@ -2607,7 +2552,7 @@ void MacroAssembler::AllocateTwoByteString(Register result,
|
| movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
|
| Integer32ToSmi(scratch1, length);
|
| movq(FieldOperand(result, String::kLengthOffset), scratch1);
|
| - movl(FieldOperand(result, String::kHashFieldOffset),
|
| + movq(FieldOperand(result, String::kHashFieldOffset),
|
| Immediate(String::kEmptyHashField));
|
| }
|
|
|
| @@ -2645,7 +2590,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
|
| movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
|
| Integer32ToSmi(scratch1, length);
|
| movq(FieldOperand(result, String::kLengthOffset), scratch1);
|
| - movl(FieldOperand(result, String::kHashFieldOffset),
|
| + movq(FieldOperand(result, String::kHashFieldOffset),
|
| Immediate(String::kEmptyHashField));
|
| }
|
|
|
|
|