Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(420)

Unified Diff: src/x64/macro-assembler-x64.cc

Issue 8139027: Version 3.6.5 (Closed) Base URL: http://v8.googlecode.com/svn/trunk/
Patch Set: '' Created 9 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | src/x64/regexp-macro-assembler-x64.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/x64/macro-assembler-x64.cc
===================================================================
--- src/x64/macro-assembler-x64.cc (revision 9531)
+++ src/x64/macro-assembler-x64.cc (working copy)
@@ -44,6 +44,7 @@
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
allow_stub_calls_(true),
+ has_frame_(false),
root_array_available_(true) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
@@ -196,28 +197,47 @@
}
-void MacroAssembler::RecordWriteHelper(Register object,
- Register addr,
- Register scratch) {
- if (emit_debug_code()) {
- // Check that the object is not in new space.
- Label not_in_new_space;
- InNewSpace(object, scratch, not_equal, &not_in_new_space, Label::kNear);
- Abort("new-space object passed to RecordWriteHelper");
- bind(&not_in_new_space);
+void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
+ Register addr,
+ Register scratch,
+ SaveFPRegsMode save_fp,
+ RememberedSetFinalAction and_then) {
+ if (FLAG_debug_code) {
+ Label ok;
+ JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
+ int3();
+ bind(&ok);
}
-
- // Compute the page start address from the heap object pointer, and reuse
- // the 'object' register for it.
- and_(object, Immediate(~Page::kPageAlignmentMask));
-
- // Compute number of region covering addr. See Page::GetRegionNumberForAddress
- // method for more details.
- shrl(addr, Immediate(Page::kRegionSizeLog2));
- andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
-
- // Set dirty mark for region.
- bts(Operand(object, Page::kDirtyFlagOffset), addr);
+ // Load store buffer top.
+ LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
+ // Store pointer to buffer.
+ movq(Operand(scratch, 0), addr);
+ // Increment buffer top.
+ addq(scratch, Immediate(kPointerSize));
+ // Write back new top of buffer.
+ StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
+ // Call stub on end of buffer.
+ Label done;
+ // Check for end of buffer.
+ testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+ if (and_then == kReturnAtEnd) {
+ Label buffer_overflowed;
+ j(not_equal, &buffer_overflowed, Label::kNear);
+ ret(0);
+ bind(&buffer_overflowed);
+ } else {
+ ASSERT(and_then == kFallThroughAtEnd);
+ j(equal, &done, Label::kNear);
+ }
+ StoreBufferOverflowStub store_buffer_overflow =
+ StoreBufferOverflowStub(save_fp);
+ CallStub(&store_buffer_overflow);
+ if (and_then == kReturnAtEnd) {
+ ret(0);
+ } else {
+ ASSERT(and_then == kFallThroughAtEnd);
+ bind(&done);
+ }
}
@@ -225,7 +245,7 @@
Register scratch,
Condition cc,
Label* branch,
- Label::Distance near_jump) {
+ Label::Distance distance) {
if (Serializer::enabled()) {
// Can't do arithmetic on external references if it might get serialized.
// The mask isn't really an address. We load it as an external reference in
@@ -240,7 +260,7 @@
}
movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
cmpq(scratch, kScratchRegister);
- j(cc, branch, near_jump);
+ j(cc, branch, distance);
} else {
ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
intptr_t new_space_start =
@@ -252,127 +272,128 @@
lea(scratch, Operand(object, kScratchRegister, times_1, 0));
}
and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
- j(cc, branch, near_jump);
+ j(cc, branch, distance);
}
}
-void MacroAssembler::RecordWrite(Register object,
- int offset,
- Register value,
- Register index) {
+void MacroAssembler::RecordWriteField(
+ Register object,
+ int offset,
+ Register value,
+ Register dst,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are rsi.
- ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
+ ASSERT(!value.is(rsi) && !dst.is(rsi));
// First, check if a write barrier is even needed. The tests below
- // catch stores of smis and stores into the young generation.
+ // catch stores of Smis.
Label done;
- JumpIfSmi(value, &done);
- RecordWriteNonSmi(object, offset, value, index);
+ // Skip barrier if writing a smi.
+ if (smi_check == INLINE_SMI_CHECK) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ ASSERT(IsAligned(offset, kPointerSize));
+
+ lea(dst, FieldOperand(object, offset));
+ if (emit_debug_code()) {
+ Label ok;
+ testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
+ j(zero, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ RecordWrite(
+ object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+
bind(&done);
- // Clobber all input registers when running with the debug-code flag
- // turned on to provoke errors. This clobbering repeats the
- // clobbering done inside RecordWriteNonSmi but it's necessary to
- // avoid having the fast case for smis leave the registers
- // unchanged.
+ // Clobber clobbered input registers when running with the debug-code flag
+ // turned on to provoke errors.
if (emit_debug_code()) {
- movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
}
void MacroAssembler::RecordWrite(Register object,
Register address,
- Register value) {
+ Register value,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are rsi.
- ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
+ ASSERT(!value.is(rsi) && !address.is(rsi));
- // First, check if a write barrier is even needed. The tests below
- // catch stores of smis and stores into the young generation.
- Label done;
- JumpIfSmi(value, &done);
+ ASSERT(!object.is(value));
+ ASSERT(!object.is(address));
+ ASSERT(!value.is(address));
+ if (emit_debug_code()) {
+ AbortIfSmi(object);
+ }
- InNewSpace(object, value, equal, &done);
+ if (remembered_set_action == OMIT_REMEMBERED_SET &&
+ !FLAG_incremental_marking) {
+ return;
+ }
- RecordWriteHelper(object, address, value);
-
- bind(&done);
-
- // Clobber all input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ if (FLAG_debug_code) {
+ Label ok;
+ cmpq(value, Operand(address, 0));
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
}
-}
-
-void MacroAssembler::RecordWriteNonSmi(Register object,
- int offset,
- Register scratch,
- Register index) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
Label done;
- if (emit_debug_code()) {
- Label okay;
- JumpIfNotSmi(object, &okay, Label::kNear);
- Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
- bind(&okay);
-
- if (offset == 0) {
- // index must be int32.
- Register tmp = index.is(rax) ? rbx : rax;
- push(tmp);
- movl(tmp, index);
- cmpq(tmp, index);
- Check(equal, "Index register for RecordWrite must be untagged int32.");
- pop(tmp);
- }
+ if (smi_check == INLINE_SMI_CHECK) {
+ // Skip barrier if writing a smi.
+ JumpIfSmi(value, &done);
}
- // Test that the object address is not in the new space. We cannot
- // update page dirty marks for new space pages.
- InNewSpace(object, scratch, equal, &done);
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
- // The offset is relative to a tagged or untagged HeapObject pointer,
- // so either offset or offset + kHeapObjectTag must be a
- // multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize) ||
- IsAligned(offset + kHeapObjectTag, kPointerSize));
+ CheckPageFlag(object,
+ value, // Used as scratch.
+ MemoryChunk::kPointersFromHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
- Register dst = index;
- if (offset != 0) {
- lea(dst, Operand(object, offset));
- } else {
- // array access: calculate the destination address in the same manner as
- // KeyedStoreIC::GenerateGeneric.
- lea(dst, FieldOperand(object,
- index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- }
- RecordWriteHelper(object, dst, scratch);
+ RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ CallStub(&stub);
bind(&done);
- // Clobber all input registers when running with the debug-code flag
+ // Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
}
+
void MacroAssembler::Assert(Condition cc, const char* msg) {
if (emit_debug_code()) Check(cc, msg);
}
@@ -400,7 +421,7 @@
Label L;
j(cc, &L, Label::kNear);
Abort(msg);
- // will not return here
+ // Control will not return here.
bind(&L);
}
@@ -448,9 +469,6 @@
RecordComment(msg);
}
#endif
- // Disable stub call restrictions to always allow calls to abort.
- AllowStubCallsScope allow_scope(this, true);
-
push(rax);
movq(kScratchRegister, p0, RelocInfo::NONE);
push(kScratchRegister);
@@ -458,20 +476,28 @@
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
RelocInfo::NONE);
push(kScratchRegister);
- CallRuntime(Runtime::kAbort, 2);
- // will not return here
+
+ if (!has_frame_) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ CallRuntime(Runtime::kAbort, 2);
+ } else {
+ CallRuntime(Runtime::kAbort, 2);
+ }
+ // Control will not return here.
int3();
}
void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
- ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
+ ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
+ ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
MaybeObject* result = stub->TryGetCode();
if (!result->IsFailure()) {
call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
@@ -482,13 +508,12 @@
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
+ ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
MaybeObject* result = stub->TryGetCode();
if (!result->IsFailure()) {
jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
@@ -504,6 +529,12 @@
}
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+ if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+ return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+}
+
+
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
addq(rsp, Immediate(num_arguments * kPointerSize));
@@ -540,8 +571,7 @@
const Runtime::Function* function = Runtime::FunctionForId(id);
Set(rax, function->nargs);
LoadAddress(rbx, ExternalReference(function, isolate()));
- CEntryStub ces(1);
- ces.SaveDoubles();
+ CEntryStub ces(1, kSaveFPRegs);
CallStub(&ces);
}
@@ -795,8 +825,8 @@
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
- // Calls are not allowed in some stubs.
- ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
+ // You can't call a builtin without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
// Rely on the assertion to check that the number of provided
// arguments match the expected number of arguments. Fake a
@@ -825,6 +855,57 @@
}
+static const Register saved_regs[] =
+ { rax, rcx, rdx, rbx, rbp, rsi, rdi, r8, r9, r10, r11 };
+static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
+
+
+void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) {
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ for (int i = 0; i < kNumberOfSavedRegs; i++) {
+ Register reg = saved_regs[i];
+ if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+ push(reg);
+ }
+ }
+ // R12 to r15 are callee save on all platforms.
+ if (fp_mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(SSE2);
+ subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ XMMRegister reg = XMMRegister::from_code(i);
+ movsd(Operand(rsp, i * kDoubleSize), reg);
+ }
+ }
+}
+
+
+void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) {
+ if (fp_mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(SSE2);
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ XMMRegister reg = XMMRegister::from_code(i);
+ movsd(reg, Operand(rsp, i * kDoubleSize));
+ }
+ addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ }
+ for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
+ Register reg = saved_regs[i];
+ if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+ pop(reg);
+ }
+ }
+}
+
+
void MacroAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
xorl(dst, dst);
@@ -2567,13 +2648,91 @@
void MacroAssembler::CheckFastElements(Register map,
Label* fail,
Label::Distance distance) {
- STATIC_ASSERT(FAST_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Immediate(Map::kMaximumBitField2FastElementValue));
j(above, fail, distance);
}
+void MacroAssembler::CheckFastObjectElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 1);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ j(below_equal, fail, distance);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Immediate(Map::kMaximumBitField2FastElementValue));
+ j(above, fail, distance);
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
+ j(above, fail, distance);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(
+ Register maybe_number,
+ Register elements,
+ Register key,
+ XMMRegister xmm_scratch,
+ Label* fail) {
+ Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
+
+ JumpIfSmi(maybe_number, &smi_value, Label::kNear);
+
+ CheckMap(maybe_number,
+ isolate()->factory()->heap_number_map(),
+ fail,
+ DONT_DO_SMI_CHECK);
+
+ // Double value, canonicalize NaN.
+ uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+ cmpl(FieldOperand(maybe_number, offset),
+ Immediate(kNaNOrInfinityLowerBoundUpper32));
+ j(greater_equal, &maybe_nan, Label::kNear);
+
+ bind(&not_nan);
+ movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+ bind(&have_double_value);
+ movsd(FieldOperand(elements, key, times_8, FixedDoubleArray::kHeaderSize),
+ xmm_scratch);
+ jmp(&done);
+
+ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ j(greater, &is_nan, Label::kNear);
+ cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
+ j(zero, &not_nan);
+ bind(&is_nan);
+ // Convert all NaNs to the same canonical NaN value when they are stored in
+ // the double array.
+ Set(kScratchRegister, BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+ movq(xmm_scratch, kScratchRegister);
+ jmp(&have_double_value, Label::kNear);
+
+ bind(&smi_value);
+ // Value is a smi. convert to a double and store.
+ // Preserve original value.
+ SmiToInteger32(kScratchRegister, maybe_number);
+ cvtlsi2sd(xmm_scratch, kScratchRegister);
+ movsd(FieldOperand(elements, key, times_8, FixedDoubleArray::kHeaderSize),
+ xmm_scratch);
+ bind(&done);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
@@ -2787,10 +2946,10 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
- ASSERT(allow_stub_calls());
Set(rax, 0); // No arguments.
LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
CEntryStub ces(1);
+ ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -2816,6 +2975,9 @@
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
InvokePrologue(expected,
actual,
@@ -2847,6 +3009,9 @@
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
Label done;
Register dummy = rax;
InvokePrologue(expected,
@@ -2877,6 +3042,9 @@
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
ASSERT(function.is(rdi));
movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
@@ -2896,6 +3064,9 @@
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
+ // You can't call a function without a valid frame.
+ ASSERT(flag == JUMP_FUNCTION || has_frame());
+
ASSERT(function->is_compiled());
// Get the function and setup the context.
Move(rdi, Handle<JSFunction>(function));
@@ -3759,6 +3930,20 @@
}
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+ Register end_offset,
+ Register filler) {
+ Label loop, entry;
+ jmp(&entry);
+ bind(&loop);
+ movq(Operand(start_offset, 0), filler);
+ addq(start_offset, Immediate(kPointerSize));
+ bind(&entry);
+ cmpq(start_offset, end_offset);
+ j(less, &loop);
+}
+
+
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
@@ -3858,6 +4043,7 @@
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+ ASSERT(has_frame());
// Check stack alignment.
if (emit_debug_code()) {
CheckStackAlignment();
@@ -3872,6 +4058,17 @@
}
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+ if (r1.is(r2)) return true;
+ if (r1.is(r3)) return true;
+ if (r1.is(r4)) return true;
+ if (r2.is(r3)) return true;
+ if (r2.is(r4)) return true;
+ if (r3.is(r4)) return true;
+ return false;
+}
+
+
CodePatcher::CodePatcher(byte* address, int size)
: address_(address),
size_(size),
@@ -3892,6 +4089,195 @@
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
+
+void MacroAssembler::CheckPageFlag(
+ Register object,
+ Register scratch,
+ int mask,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance) {
+ ASSERT(cc == zero || cc == not_zero);
+ if (scratch.is(object)) {
+ and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ } else {
+ movq(scratch, Immediate(~Page::kPageAlignmentMask));
+ and_(scratch, object);
+ }
+ if (mask < (1 << kBitsPerByte)) {
+ testb(Operand(scratch, MemoryChunk::kFlagsOffset),
+ Immediate(static_cast<uint8_t>(mask)));
+ } else {
+ testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+ }
+ j(cc, condition_met, condition_met_distance);
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* on_black,
+ Label::Distance on_black_distance) {
+ ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
+ GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ // The mask_scratch register contains a 1 at the position of the first bit
+ // and a 0 at all other positions, including the position of the second bit.
+ movq(rcx, mask_scratch);
+ // Make rcx into a mask that covers both marking bits using the operation
+ // rcx = mask | (mask << 1).
+ lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
+ // Note that we are using a 4-byte aligned 8-byte load.
+ and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ cmpq(mask_scratch, rcx);
+ j(equal, on_black, on_black_distance);
+}
+
+
+// Detect some, but not all, common pointer-free objects. This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(
+ Register value,
+ Register scratch,
+ Label* not_data_object,
+ Label::Distance not_data_object_distance) {
+ Label is_data_object;
+ movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ j(equal, &is_data_object, Label::kNear);
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
+ Immediate(kIsIndirectStringMask | kIsNotStringMask));
+ j(not_zero, not_data_object, not_data_object_distance);
+ bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg) {
+ ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
+ movq(bitmap_reg, addr_reg);
+ // Sign extended 32 bit immediate.
+ and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+ movq(rcx, addr_reg);
+ int shift =
+ Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
+ shrl(rcx, Immediate(shift));
+ and_(rcx,
+ Immediate((Page::kPageAlignmentMask >> shift) &
+ ~(Bitmap::kBytesPerCell - 1)));
+
+ addq(bitmap_reg, rcx);
+ movq(rcx, addr_reg);
+ shrl(rcx, Immediate(kPointerSizeLog2));
+ and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
+ movl(mask_reg, Immediate(1));
+ shl_cl(mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+ Register value,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* value_is_white_and_not_data,
+ Label::Distance distance) {
+ ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
+ GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+ // If the value is black or grey we don't need to do anything.
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ Label done;
+
+ // Since both black and grey have a 1 in the first position and white does
+ // not have a 1 there we only need to check one bit.
+ testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ j(not_zero, &done, Label::kNear);
+
+ if (FLAG_debug_code) {
+ // Check for impossible bit pattern.
+ Label ok;
+ push(mask_scratch);
+ // shl. May overflow making the check conservative.
+ addq(mask_scratch, mask_scratch);
+ testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ j(zero, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ pop(mask_scratch);
+ }
+
+ // Value is white. We check whether it is data that doesn't need scanning.
+ // Currently only checks for HeapNumber and non-cons strings.
+ Register map = rcx; // Holds map while checking type.
+ Register length = rcx; // Holds length of object after checking type.
+ Label not_heap_number;
+ Label is_data_object;
+
+ // Check for heap-number
+ movq(map, FieldOperand(value, HeapObject::kMapOffset));
+ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ j(not_equal, &not_heap_number, Label::kNear);
+ movq(length, Immediate(HeapNumber::kSize));
+ jmp(&is_data_object, Label::kNear);
+
+ bind(&not_heap_number);
+ // Check for strings.
+ ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object containing
+ // no GC pointers.
+ Register instance_type = rcx;
+ movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
+ j(not_zero, value_is_white_and_not_data);
+ // It's a non-indirect (non-cons and non-slice) string.
+ // If it's external, the length is just ExternalString::kSize.
+ // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+ Label not_external;
+ // External strings are the only ones with the kExternalStringTag bit
+ // set.
+ ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+ ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ testb(instance_type, Immediate(kExternalStringTag));
+ j(zero, &not_external, Label::kNear);
+ movq(length, Immediate(ExternalString::kSize));
+ jmp(&is_data_object, Label::kNear);
+
+ bind(&not_external);
+ // Sequential string, either ASCII or UC16.
+ ASSERT(kAsciiStringTag == 0x04);
+ and_(length, Immediate(kStringEncodingMask));
+ xor_(length, Immediate(kStringEncodingMask));
+ addq(length, Immediate(0x04));
+ // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
+ imul(length, FieldOperand(value, String::kLengthOffset));
+ shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
+ addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+ and_(length, Immediate(~kObjectAlignmentMask));
+
+ bind(&is_data_object);
+ // Value is a data object, and it is white. Mark it black. Since we know
+ // that the object is white we can make it black by flipping one bit.
+ or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+
+ and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+ addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
+
+ bind(&done);
+}
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | src/x64/regexp-macro-assembler-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698