Index: src/mips/code-stubs-mips.cc |
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc |
index 1fbbcfa66296d7a2771bb83e2d7422bbba548277..83200827d10a8b07c070af1f7d0764784f879b8c 100644 |
--- a/src/mips/code-stubs-mips.cc |
+++ b/src/mips/code-stubs-mips.cc |
@@ -886,6 +886,35 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( |
} |
+bool WriteInt32ToHeapNumberStub::CompilingCallsToThisStubIsGCSafe() { |
+ // These variants are compiled ahead of time. See next method. |
+ if (the_int_.is(a1) && |
+ the_heap_number_.is(v0) && |
+ scratch_.is(a2) && |
+ sign_.is(a3)) { |
+ return true; |
+ } |
+ if (the_int_.is(a2) && |
+ the_heap_number_.is(v0) && |
+ scratch_.is(a3) && |
+ sign_.is(a0)) { |
+ return true; |
+ } |
+ // Other register combinations are generated as and when they are needed, |
+ // so it is unsafe to call them from stubs (we can't generate a stub while |
+ // we are generating a stub). |
+ return false; |
+} |
+ |
+ |
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() { |
+ WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3); |
+ WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0); |
+ Handle<Code> code1 = stub1.GetCode(); |
+ Handle<Code> code2 = stub2.GetCode(); |
+} |
+ |
+ |
// See comment for class, this does NOT work for int32's that are in Smi range. |
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { |
Label max_negative_int; |
@@ -1801,6 +1830,35 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { |
} |
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
+ // We don't allow a GC during a store buffer overflow so there is no need to |
+ // store the registers in any particular way, but we do have to store and |
+ // restore them. |
+ __ MultiPush(kJSCallerSaved | ra.bit()); |
+ if (save_doubles_ == kSaveFPRegs) { |
+ CpuFeatures::Scope scope(FPU); |
+ __ MultiPushFPU(kCallerSavedFPU); |
+ } |
+ const int argument_count = 1; |
+ const int fp_argument_count = 0; |
+ const Register scratch = a1; |
+ |
+ AllowExternalCallThatCantCauseGC scope(masm); |
+ __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); |
+ __ li(a0, Operand(ExternalReference::isolate_address())); |
+ __ CallCFunction( |
+ ExternalReference::store_buffer_overflow_function(masm->isolate()), |
+ argument_count); |
+ if (save_doubles_ == kSaveFPRegs) { |
+ CpuFeatures::Scope scope(FPU); |
+ __ MultiPopFPU(kCallerSavedFPU); |
+ } |
+ |
+ __ MultiPop(kJSCallerSaved | ra.bit()); |
+ __ Ret(); |
+} |
+ |
+ |
void UnaryOpStub::PrintName(StringStream* stream) { |
const char* op_name = Token::Name(op_); |
const char* overwrite_name = NULL; // Make g++ happy. |
@@ -3484,12 +3542,12 @@ bool CEntryStub::IsPregenerated() { |
void CodeStub::GenerateStubsAheadOfTime() { |
+ WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(); |
} |
void CodeStub::GenerateFPStubs() { |
- CEntryStub save_doubles(1); |
- save_doubles.SaveDoubles(); |
+ CEntryStub save_doubles(1, kSaveFPRegs); |
Handle<Code> code = save_doubles.GetCode(); |
code->GetIsolate()->set_fp_stubs_generated(true); |
} |
@@ -4812,16 +4870,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { |
__ sw(a2, FieldMemOperand(last_match_info_elements, |
RegExpImpl::kLastCaptureCountOffset)); |
// Store last subject and last input. |
- __ mov(a3, last_match_info_elements); // Moved up to reduce latency. |
__ sw(subject, |
FieldMemOperand(last_match_info_elements, |
RegExpImpl::kLastSubjectOffset)); |
- __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0); |
+ __ mov(a2, subject); |
+ __ RecordWriteField(last_match_info_elements, |
+ RegExpImpl::kLastSubjectOffset, |
+ a2, |
+ t3, |
+ kRAHasNotBeenSaved, |
+ kDontSaveFPRegs); |
__ sw(subject, |
FieldMemOperand(last_match_info_elements, |
RegExpImpl::kLastInputOffset)); |
- __ mov(a3, last_match_info_elements); |
- __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0); |
+ __ RecordWriteField(last_match_info_elements, |
+ RegExpImpl::kLastInputOffset, |
+ subject, |
+ t3, |
+ kRAHasNotBeenSaved, |
+ kDontSaveFPRegs); |
// Get the static offsets vector filled by the native regexp code. |
ExternalReference address_of_static_offsets_vector = |
@@ -7021,6 +7088,238 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { |
} |
+struct AheadOfTimeWriteBarrierStubList { |
+ Register object, value, address; |
+ RememberedSetAction action; |
+}; |
+ |
+ |
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { |
+ // TODO(1696): Fill this in for MIPS. |
+ // Null termination. |
+ { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET} |
+}; |
+ |
+ |
+bool RecordWriteStub::CompilingCallsToThisStubIsGCSafe() { |
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; |
+ !entry->object.is(no_reg); |
+ entry++) { |
+ if (object_.is(entry->object) && |
+ value_.is(entry->value) && |
+ address_.is(entry->address) && |
+ remembered_set_action_ == entry->action && |
+ save_fp_regs_mode_ == kDontSaveFPRegs) { |
+ return true; |
+ } |
+ } |
+ return true; // TODO(1696): Should be false. |
+} |
+ |
+ |
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { |
+ StoreBufferOverflowStub stub1(kDontSaveFPRegs); |
+ stub1.GetCode(); |
+ StoreBufferOverflowStub stub2(kSaveFPRegs); |
+ stub2.GetCode(); |
+} |
+ |
+ |
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { |
+ for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; |
+ !entry->object.is(no_reg); |
+ entry++) { |
+ RecordWriteStub stub(entry->object, |
+ entry->value, |
+ entry->address, |
+ entry->action, |
+ kDontSaveFPRegs); |
+ stub.GetCode(); |
+ } |
+} |
+ |
+ |
+// Takes the input in 3 registers: address_ value_ and object_. A pointer to |
+// the value has just been written into the object, now this stub makes sure |
+// we keep the GC informed. The word in the object where the value has been |
+// written is in the address register. |
+void RecordWriteStub::Generate(MacroAssembler* masm) { |
+ Label skip_to_incremental_noncompacting; |
+ Label skip_to_incremental_compacting; |
+ |
+ // The first two branch+nop instructions are generated with labels so as to |
+ // get the offset fixed up correctly by the bind(Label*) call. We patch it |
+ // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this |
+ // position) and the "beq zero_reg, zero_reg, ..." when we start and stop |
+ // incremental heap marking. |
+ // See RecordWriteStub::Patch for details. |
+ __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting); |
+ __ nop(); |
+ __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting); |
+ __ nop(); |
+ |
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) { |
+ __ RememberedSetHelper( |
+ address_, value_, save_fp_regs_mode_, MacroAssembler::kReturnAtEnd); |
+ } |
+ __ Ret(); |
+ |
+ __ bind(&skip_to_incremental_noncompacting); |
+ GenerateIncremental(masm, INCREMENTAL); |
+ |
+ __ bind(&skip_to_incremental_compacting); |
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION); |
+ |
+ // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. |
+ // Will be checked in IncrementalMarking::ActivateGeneratedStub. |
+ |
+ PatchBranchIntoNop(masm, 0); |
+ PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize); |
+} |
+ |
+ |
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { |
+ regs_.Save(masm); |
+ |
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) { |
+ Label dont_need_remembered_set; |
+ |
+ __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0)); |
+ __ JumpIfNotInNewSpace(regs_.scratch0(), |
+ regs_.scratch0(), |
+ &dont_need_remembered_set); |
+ |
+ __ CheckPageFlag(regs_.object(), |
+ regs_.scratch0(), |
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE, |
+ ne, |
+ &dont_need_remembered_set); |
+ |
+ // First notify the incremental marker if necessary, then update the |
+ // remembered set. |
+ CheckNeedsToInformIncrementalMarker( |
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); |
+ InformIncrementalMarker(masm, mode); |
+ regs_.Restore(masm); |
+ __ RememberedSetHelper( |
+ address_, value_, save_fp_regs_mode_, MacroAssembler::kReturnAtEnd); |
+ |
+ __ bind(&dont_need_remembered_set); |
+ } |
+ |
+ CheckNeedsToInformIncrementalMarker( |
+ masm, kReturnOnNoNeedToInformIncrementalMarker, mode); |
+ InformIncrementalMarker(masm, mode); |
+ regs_.Restore(masm); |
+ __ Ret(); |
+} |
+ |
+ |
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { |
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); |
+ int argument_count = 3; |
+ __ PrepareCallCFunction(argument_count, regs_.scratch0()); |
+ Register address = |
+ a0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); |
+ ASSERT(!address.is(regs_.object())); |
+ ASSERT(!address.is(a0)); |
+ __ Move(address, regs_.address()); |
+ __ Move(a0, regs_.object()); |
+ if (mode == INCREMENTAL_COMPACTION) { |
+ __ Move(a1, address); |
+ } else { |
+ ASSERT(mode == INCREMENTAL); |
+ __ lw(a1, MemOperand(address, 0)); |
+ } |
+ __ li(a2, Operand(ExternalReference::isolate_address())); |
+ |
+ AllowExternalCallThatCantCauseGC scope(masm); |
+ if (mode == INCREMENTAL_COMPACTION) { |
+ __ CallCFunction( |
+ ExternalReference::incremental_evacuation_record_write_function( |
+ masm->isolate()), |
+ argument_count); |
+ } else { |
+ ASSERT(mode == INCREMENTAL); |
+ __ CallCFunction( |
+ ExternalReference::incremental_marking_record_write_function( |
+ masm->isolate()), |
+ argument_count); |
+ } |
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); |
+} |
+ |
+ |
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker( |
+ MacroAssembler* masm, |
+ OnNoNeedToInformIncrementalMarker on_no_need, |
+ Mode mode) { |
+ Label on_black; |
+ Label need_incremental; |
+ Label need_incremental_pop_scratch; |
+ |
+ // Let's look at the color of the object: If it is not black we don't have |
+ // to inform the incremental marker. |
+ __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); |
+ |
+ regs_.Restore(masm); |
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { |
+ __ RememberedSetHelper( |
+ address_, value_, save_fp_regs_mode_, MacroAssembler::kReturnAtEnd); |
+ } else { |
+ __ Ret(); |
+ } |
+ |
+ __ bind(&on_black); |
+ |
+ // Get the value from the slot. |
+ __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0)); |
+ |
+ if (mode == INCREMENTAL_COMPACTION) { |
+ Label ensure_not_white; |
+ |
+ __ CheckPageFlag(regs_.scratch0(), // Contains value. |
+ regs_.scratch1(), // Scratch. |
+ MemoryChunk::kEvacuationCandidateMask, |
+ eq, |
+ &ensure_not_white); |
+ |
+ __ CheckPageFlag(regs_.object(), |
+ regs_.scratch1(), // Scratch. |
+ MemoryChunk::kSkipEvacuationSlotsRecordingMask, |
+ eq, |
+ &need_incremental); |
+ |
+ __ bind(&ensure_not_white); |
+ } |
+ |
+ // We need extra registers for this, so we push the object and the address |
+ // register temporarily. |
+ __ Push(regs_.object(), regs_.address()); |
+ __ EnsureNotWhite(regs_.scratch0(), // The value. |
+ regs_.scratch1(), // Scratch. |
+ regs_.object(), // Scratch. |
+ regs_.address(), // Scratch. |
+ &need_incremental_pop_scratch); |
+ __ Pop(regs_.object(), regs_.address()); |
+ |
+ regs_.Restore(masm); |
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { |
+ __ RememberedSetHelper( |
+ address_, value_, save_fp_regs_mode_, MacroAssembler::kReturnAtEnd); |
+ } else { |
+ __ Ret(); |
+ } |
+ |
+ __ bind(&need_incremental_pop_scratch); |
+ __ Pop(regs_.object(), regs_.address()); |
+ |
+ __ bind(&need_incremental); |
+ |
+ // Fall through when we need to inform the incremental marker. |
+} |
+ |
+ |
#undef __ |
} } // namespace v8::internal |