| Index: src/arm/code-stubs-arm.cc | 
| =================================================================== | 
| --- src/arm/code-stubs-arm.cc	(revision 9531) | 
| +++ src/arm/code-stubs-arm.cc	(working copy) | 
| @@ -838,9 +838,11 @@ | 
| __ vmov(d0, r0, r1); | 
| __ vmov(d1, r2, r3); | 
| } | 
| -  // Call C routine that may not cause GC or other trouble. | 
| -  __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()), | 
| -                   0, 2); | 
| +  { | 
| +    AllowExternalCallThatCantCauseGC scope(masm); | 
| +    __ CallCFunction( | 
| +        ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); | 
| +  } | 
| // Store answer in the overwritable heap number. Double returned in | 
| // registers r0 and r1 or in d0. | 
| if (masm->use_eabi_hardfloat()) { | 
| @@ -857,6 +859,29 @@ | 
| } | 
|  | 
|  | 
| +bool WriteInt32ToHeapNumberStub::IsPregenerated() { | 
| +  // These variants are compiled ahead of time.  See next method. | 
| +  if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { | 
| +    return true; | 
| +  } | 
| +  if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) { | 
| +    return true; | 
| +  } | 
| +  // Other register combinations are generated as and when they are needed, | 
| +  // so it is unsafe to call them from stubs (we can't generate a stub while | 
| +  // we are generating a stub). | 
| +  return false; | 
| +} | 
| + | 
| + | 
| +void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() { | 
| +  WriteInt32ToHeapNumberStub stub1(r1, r0, r2); | 
| +  WriteInt32ToHeapNumberStub stub2(r2, r0, r3); | 
| +  stub1.GetCode()->set_is_pregenerated(true); | 
| +  stub2.GetCode()->set_is_pregenerated(true); | 
| +} | 
| + | 
| + | 
| // See comment for class. | 
| void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { | 
| Label max_negative_int; | 
| @@ -1197,6 +1222,8 @@ | 
| __ vmov(d0, r0, r1); | 
| __ vmov(d1, r2, r3); | 
| } | 
| + | 
| +    AllowExternalCallThatCantCauseGC scope(masm); | 
| __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), | 
| 0, 2); | 
| __ pop(pc);  // Return. | 
| @@ -1214,7 +1241,7 @@ | 
| // If either operand is a JS object or an oddball value, then they are | 
| // not equal since their pointers are different. | 
| // There is no test for undetectability in strict equality. | 
| -    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); | 
| +    STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); | 
| Label first_non_object; | 
| // Get the type of the first operand into r2 and compare it with | 
| // FIRST_SPEC_OBJECT_TYPE. | 
| @@ -1606,6 +1633,8 @@ | 
| // The stub expects its argument in the tos_ register and returns its result in | 
| // it, too: zero for false, and a non-zero value for true. | 
| void ToBooleanStub::Generate(MacroAssembler* masm) { | 
| +  // This stub overrides SometimesSetsUpAFrame() to return false.  That means | 
| +  // we cannot call anything that could cause a GC from this stub. | 
| // This stub uses VFP3 instructions. | 
| CpuFeatures::Scope scope(VFP3); | 
|  | 
| @@ -1713,6 +1742,41 @@ | 
| } | 
|  | 
|  | 
| +void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { | 
| +  // We don't allow a GC during a store buffer overflow so there is no need to | 
| +  // store the registers in any particular way, but we do have to store and | 
| +  // restore them. | 
| +  __ stm(db_w, sp, kCallerSaved | lr.bit()); | 
| +  if (save_doubles_ == kSaveFPRegs) { | 
| +    CpuFeatures::Scope scope(VFP3); | 
| +    __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); | 
| +    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { | 
| +      DwVfpRegister reg = DwVfpRegister::from_code(i); | 
| +      __ vstr(reg, MemOperand(sp, i * kDoubleSize)); | 
| +    } | 
| +  } | 
| +  const int argument_count = 1; | 
| +  const int fp_argument_count = 0; | 
| +  const Register scratch = r1; | 
| + | 
| +  AllowExternalCallThatCantCauseGC scope(masm); | 
| +  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); | 
| +  __ mov(r0, Operand(ExternalReference::isolate_address())); | 
| +  __ CallCFunction( | 
| +      ExternalReference::store_buffer_overflow_function(masm->isolate()), | 
| +      argument_count); | 
| +  if (save_doubles_ == kSaveFPRegs) { | 
| +    CpuFeatures::Scope scope(VFP3); | 
| +    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { | 
| +      DwVfpRegister reg = DwVfpRegister::from_code(i); | 
| +      __ vldr(reg, MemOperand(sp, i * kDoubleSize)); | 
| +    } | 
| +    __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); | 
| +  } | 
| +  __ ldm(ia_w, sp, kCallerSaved | pc.bit());  // Also pop pc to get Ret(0). | 
| +} | 
| + | 
| + | 
| void UnaryOpStub::PrintName(StringStream* stream) { | 
| const char* op_name = Token::Name(op_); | 
| const char* overwrite_name = NULL;  // Make g++ happy. | 
| @@ -1866,12 +1930,13 @@ | 
| __ jmp(&heapnumber_allocated); | 
|  | 
| __ bind(&slow_allocate_heapnumber); | 
| -    __ EnterInternalFrame(); | 
| -    __ push(r0); | 
| -    __ CallRuntime(Runtime::kNumberAlloc, 0); | 
| -    __ mov(r1, Operand(r0)); | 
| -    __ pop(r0); | 
| -    __ LeaveInternalFrame(); | 
| +    { | 
| +      FrameScope scope(masm, StackFrame::INTERNAL); | 
| +      __ push(r0); | 
| +      __ CallRuntime(Runtime::kNumberAlloc, 0); | 
| +      __ mov(r1, Operand(r0)); | 
| +      __ pop(r0); | 
| +    } | 
|  | 
| __ bind(&heapnumber_allocated); | 
| __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | 
| @@ -1912,13 +1977,14 @@ | 
| __ jmp(&heapnumber_allocated); | 
|  | 
| __ bind(&slow_allocate_heapnumber); | 
| -    __ EnterInternalFrame(); | 
| -    __ push(r0);  // Push the heap number, not the untagged int32. | 
| -    __ CallRuntime(Runtime::kNumberAlloc, 0); | 
| -    __ mov(r2, r0);  // Move the new heap number into r2. | 
| -    // Get the heap number into r0, now that the new heap number is in r2. | 
| -    __ pop(r0); | 
| -    __ LeaveInternalFrame(); | 
| +    { | 
| +      FrameScope scope(masm, StackFrame::INTERNAL); | 
| +      __ push(r0);  // Push the heap number, not the untagged int32. | 
| +      __ CallRuntime(Runtime::kNumberAlloc, 0); | 
| +      __ mov(r2, r0);  // Move the new heap number into r2. | 
| +      // Get the heap number into r0, now that the new heap number is in r2. | 
| +      __ pop(r0); | 
| +    } | 
|  | 
| // Convert the heap number in r0 to an untagged integer in r1. | 
| // This can't go slow-case because it's the same number we already | 
| @@ -2028,6 +2094,10 @@ | 
|  | 
|  | 
| void BinaryOpStub::Generate(MacroAssembler* masm) { | 
| +  // Explicitly allow generation of nested stubs. It is safe here because | 
| +  // generation code does not use any raw pointers. | 
| +  AllowStubCallsScope allow_stub_calls(masm, true); | 
| + | 
| switch (operands_type_) { | 
| case BinaryOpIC::UNINITIALIZED: | 
| GenerateTypeTransition(masm); | 
| @@ -3133,10 +3203,11 @@ | 
| __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); | 
| __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache); | 
| __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 
| -    __ EnterInternalFrame(); | 
| -    __ push(r0); | 
| -    __ CallRuntime(RuntimeFunction(), 1); | 
| -    __ LeaveInternalFrame(); | 
| +    { | 
| +      FrameScope scope(masm, StackFrame::INTERNAL); | 
| +      __ push(r0); | 
| +      __ CallRuntime(RuntimeFunction(), 1); | 
| +    } | 
| __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 
| __ Ret(); | 
|  | 
| @@ -3149,14 +3220,15 @@ | 
|  | 
| // We return the value in d2 without adding it to the cache, but | 
| // we cause a scavenging GC so that future allocations will succeed. | 
| -    __ EnterInternalFrame(); | 
| +    { | 
| +      FrameScope scope(masm, StackFrame::INTERNAL); | 
|  | 
| -    // Allocate an aligned object larger than a HeapNumber. | 
| -    ASSERT(4 * kPointerSize >= HeapNumber::kSize); | 
| -    __ mov(scratch0, Operand(4 * kPointerSize)); | 
| -    __ push(scratch0); | 
| -    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); | 
| -    __ LeaveInternalFrame(); | 
| +      // Allocate an aligned object larger than a HeapNumber. | 
| +      ASSERT(4 * kPointerSize >= HeapNumber::kSize); | 
| +      __ mov(scratch0, Operand(4 * kPointerSize)); | 
| +      __ push(scratch0); | 
| +      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); | 
| +    } | 
| __ Ret(); | 
| } | 
| } | 
| @@ -3173,6 +3245,7 @@ | 
| } else { | 
| __ vmov(r0, r1, d2); | 
| } | 
| +  AllowExternalCallThatCantCauseGC scope(masm); | 
| switch (type_) { | 
| case TranscendentalCache::SIN: | 
| __ CallCFunction(ExternalReference::math_sin_double_function(isolate), | 
| @@ -3268,11 +3341,14 @@ | 
| __ push(lr); | 
| __ PrepareCallCFunction(1, 1, scratch); | 
| __ SetCallCDoubleArguments(double_base, exponent); | 
| -    __ CallCFunction( | 
| -        ExternalReference::power_double_int_function(masm->isolate()), | 
| -        1, 1); | 
| -    __ pop(lr); | 
| -    __ GetCFunctionDoubleResult(double_result); | 
| +    { | 
| +      AllowExternalCallThatCantCauseGC scope(masm); | 
| +      __ CallCFunction( | 
| +          ExternalReference::power_double_int_function(masm->isolate()), | 
| +          1, 1); | 
| +      __ pop(lr); | 
| +      __ GetCFunctionDoubleResult(double_result); | 
| +    } | 
| __ vstr(double_result, | 
| FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); | 
| __ mov(r0, heapnumber); | 
| @@ -3298,11 +3374,14 @@ | 
| __ push(lr); | 
| __ PrepareCallCFunction(0, 2, scratch); | 
| __ SetCallCDoubleArguments(double_base, double_exponent); | 
| -    __ CallCFunction( | 
| -        ExternalReference::power_double_double_function(masm->isolate()), | 
| -        0, 2); | 
| -    __ pop(lr); | 
| -    __ GetCFunctionDoubleResult(double_result); | 
| +    { | 
| +      AllowExternalCallThatCantCauseGC scope(masm); | 
| +      __ CallCFunction( | 
| +          ExternalReference::power_double_double_function(masm->isolate()), | 
| +          0, 2); | 
| +      __ pop(lr); | 
| +      __ GetCFunctionDoubleResult(double_result); | 
| +    } | 
| __ vstr(double_result, | 
| FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); | 
| __ mov(r0, heapnumber); | 
| @@ -3319,6 +3398,37 @@ | 
| } | 
|  | 
|  | 
| +bool CEntryStub::IsPregenerated() { | 
| +  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && | 
| +          result_size_ == 1; | 
| +} | 
| + | 
| + | 
| +void CodeStub::GenerateStubsAheadOfTime() { | 
| +  CEntryStub::GenerateAheadOfTime(); | 
| +  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(); | 
| +  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); | 
| +  RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); | 
| +} | 
| + | 
| + | 
| +void CodeStub::GenerateFPStubs() { | 
| +  CEntryStub save_doubles(1, kSaveFPRegs); | 
| +  Handle<Code> code = save_doubles.GetCode(); | 
| +  code->set_is_pregenerated(true); | 
| +  StoreBufferOverflowStub stub(kSaveFPRegs); | 
| +  stub.GetCode()->set_is_pregenerated(true); | 
| +  code->GetIsolate()->set_fp_stubs_generated(true); | 
| +} | 
| + | 
| + | 
| +void CEntryStub::GenerateAheadOfTime() { | 
| +  CEntryStub stub(1, kDontSaveFPRegs); | 
| +  Handle<Code> code = stub.GetCode(); | 
| +  code->set_is_pregenerated(true); | 
| +} | 
| + | 
| + | 
| void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { | 
| __ Throw(r0); | 
| } | 
| @@ -3430,8 +3540,7 @@ | 
| __ b(eq, throw_out_of_memory_exception); | 
|  | 
| // Retrieve the pending exception and clear the variable. | 
| -  __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate))); | 
| -  __ ldr(r3, MemOperand(ip)); | 
| +  __ mov(r3, Operand(isolate->factory()->the_hole_value())); | 
| __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 
| isolate))); | 
| __ ldr(r0, MemOperand(ip)); | 
| @@ -3469,6 +3578,7 @@ | 
| __ sub(r6, r6, Operand(kPointerSize)); | 
|  | 
| // Enter the exit frame that transitions from JavaScript to C++. | 
| +  FrameScope scope(masm, StackFrame::MANUAL); | 
| __ EnterExitFrame(save_doubles_); | 
|  | 
| // Setup argc and the builtin function in callee-saved registers. | 
| @@ -3613,8 +3723,7 @@ | 
| // saved values before returning a failure to C. | 
|  | 
| // Clear any pending exceptions. | 
| -  __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate))); | 
| -  __ ldr(r5, MemOperand(ip)); | 
| +  __ mov(r5, Operand(isolate->factory()->the_hole_value())); | 
| __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 
| isolate))); | 
| __ str(r5, MemOperand(ip)); | 
| @@ -3851,10 +3960,11 @@ | 
| } | 
| __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); | 
| } else { | 
| -    __ EnterInternalFrame(); | 
| -    __ Push(r0, r1); | 
| -    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); | 
| -    __ LeaveInternalFrame(); | 
| +    { | 
| +      FrameScope scope(masm, StackFrame::INTERNAL); | 
| +      __ Push(r0, r1); | 
| +      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); | 
| +    } | 
| __ cmp(r0, Operand::Zero()); | 
| __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq); | 
| __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne); | 
| @@ -4480,8 +4590,7 @@ | 
|  | 
| // For arguments 4 and 3 get string length, calculate start of string data and | 
| // calculate the shift of the index (0 for ASCII and 1 for two byte). | 
| -  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); | 
| -  __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | 
| +  __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); | 
| __ eor(r3, r3, Operand(1)); | 
| // Load the length from the original subject string from the previous stack | 
| // frame. Therefore we have to use fp, which points exactly to two pointer | 
| @@ -4532,8 +4641,7 @@ | 
| // stack overflow (on the backtrack stack) was detected in RegExp code but | 
| // haven't created the exception yet. Handle that in the runtime system. | 
| // TODO(592): Rerunning the RegExp to get the stack overflow exception. | 
| -  __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate))); | 
| -  __ ldr(r1, MemOperand(r1, 0)); | 
| +  __ mov(r1, Operand(isolate->factory()->the_hole_value())); | 
| __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 
| isolate))); | 
| __ ldr(r0, MemOperand(r2, 0)); | 
| @@ -4575,16 +4683,25 @@ | 
| __ str(r2, FieldMemOperand(last_match_info_elements, | 
| RegExpImpl::kLastCaptureCountOffset)); | 
| // Store last subject and last input. | 
| -  __ mov(r3, last_match_info_elements);  // Moved up to reduce latency. | 
| __ str(subject, | 
| FieldMemOperand(last_match_info_elements, | 
| RegExpImpl::kLastSubjectOffset)); | 
| -  __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7); | 
| +  __ mov(r2, subject); | 
| +  __ RecordWriteField(last_match_info_elements, | 
| +                      RegExpImpl::kLastSubjectOffset, | 
| +                      r2, | 
| +                      r7, | 
| +                      kLRHasNotBeenSaved, | 
| +                      kDontSaveFPRegs); | 
| __ str(subject, | 
| FieldMemOperand(last_match_info_elements, | 
| RegExpImpl::kLastInputOffset)); | 
| -  __ mov(r3, last_match_info_elements); | 
| -  __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7); | 
| +  __ RecordWriteField(last_match_info_elements, | 
| +                      RegExpImpl::kLastInputOffset, | 
| +                      subject, | 
| +                      r7, | 
| +                      kLRHasNotBeenSaved, | 
| +                      kDontSaveFPRegs); | 
|  | 
| // Get the static offsets vector filled by the native regexp code. | 
| ExternalReference address_of_static_offsets_vector = | 
| @@ -4712,6 +4829,22 @@ | 
| } | 
|  | 
|  | 
| +void CallFunctionStub::FinishCode(Code* code) { | 
| +  code->set_has_function_cache(false); | 
| +} | 
| + | 
| + | 
| +void CallFunctionStub::Clear(Heap* heap, Address address) { | 
| +  UNREACHABLE(); | 
| +} | 
| + | 
| + | 
| +Object* CallFunctionStub::GetCachedValue(Address address) { | 
| +  UNREACHABLE(); | 
| +  return NULL; | 
| +} | 
| + | 
| + | 
| void CallFunctionStub::Generate(MacroAssembler* masm) { | 
| Label slow, non_function; | 
|  | 
| @@ -6425,12 +6558,13 @@ | 
| // Call the runtime system in a fresh internal frame. | 
| ExternalReference miss = | 
| ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); | 
| -  __ EnterInternalFrame(); | 
| -  __ Push(r1, r0); | 
| -  __ mov(ip, Operand(Smi::FromInt(op_))); | 
| -  __ push(ip); | 
| -  __ CallExternalReference(miss, 3); | 
| -  __ LeaveInternalFrame(); | 
| +  { | 
| +    FrameScope scope(masm, StackFrame::INTERNAL); | 
| +    __ Push(r1, r0); | 
| +    __ mov(ip, Operand(Smi::FromInt(op_))); | 
| +    __ push(ip); | 
| +    __ CallExternalReference(miss, 3); | 
| +  } | 
| // Compute the entry point of the rewritten stub. | 
| __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| // Restore registers. | 
| @@ -6613,6 +6747,8 @@ | 
|  | 
|  | 
| void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { | 
| +  // This stub overrides SometimesSetsUpAFrame() to return false.  That means | 
| +  // we cannot call anything that could cause a GC from this stub. | 
| // Registers: | 
| //  result: StringDictionary to probe | 
| //  r1: key | 
| @@ -6702,6 +6838,267 @@ | 
| } | 
|  | 
|  | 
| +struct AheadOfTimeWriteBarrierStubList { | 
| +  Register object, value, address; | 
| +  RememberedSetAction action; | 
| +}; | 
| + | 
| + | 
| +struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { | 
| +  // Used in RegExpExecStub. | 
| +  { r6, r4, r7, EMIT_REMEMBERED_SET }, | 
| +  { r6, r2, r7, EMIT_REMEMBERED_SET }, | 
| +  // Used in CompileArrayPushCall. | 
| +  // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. | 
| +  // Also used in KeyedStoreIC::GenerateGeneric. | 
| +  { r3, r4, r5, EMIT_REMEMBERED_SET }, | 
| +  // Used in CompileStoreGlobal. | 
| +  { r4, r1, r2, OMIT_REMEMBERED_SET }, | 
| +  // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. | 
| +  { r1, r2, r3, EMIT_REMEMBERED_SET }, | 
| +  { r3, r2, r1, EMIT_REMEMBERED_SET }, | 
| +  // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. | 
| +  { r2, r1, r3, EMIT_REMEMBERED_SET }, | 
| +  { r3, r1, r2, EMIT_REMEMBERED_SET }, | 
| +  // KeyedStoreStubCompiler::GenerateStoreFastElement. | 
| +  { r4, r2, r3, EMIT_REMEMBERED_SET }, | 
| +  // Null termination. | 
| +  { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET} | 
| +}; | 
| + | 
| + | 
| +bool RecordWriteStub::IsPregenerated() { | 
| +  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; | 
| +       !entry->object.is(no_reg); | 
| +       entry++) { | 
| +    if (object_.is(entry->object) && | 
| +        value_.is(entry->value) && | 
| +        address_.is(entry->address) && | 
| +        remembered_set_action_ == entry->action && | 
| +        save_fp_regs_mode_ == kDontSaveFPRegs) { | 
| +      return true; | 
| +    } | 
| +  } | 
| +  return false; | 
| +} | 
| + | 
| + | 
| +bool StoreBufferOverflowStub::IsPregenerated() { | 
| +  return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated(); | 
| +} | 
| + | 
| + | 
| +void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { | 
| +  StoreBufferOverflowStub stub1(kDontSaveFPRegs); | 
| +  stub1.GetCode()->set_is_pregenerated(true); | 
| +} | 
| + | 
| + | 
| +void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { | 
| +  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; | 
| +       !entry->object.is(no_reg); | 
| +       entry++) { | 
| +    RecordWriteStub stub(entry->object, | 
| +                         entry->value, | 
| +                         entry->address, | 
| +                         entry->action, | 
| +                         kDontSaveFPRegs); | 
| +    stub.GetCode()->set_is_pregenerated(true); | 
| +  } | 
| +} | 
| + | 
| + | 
| +// Takes the input in 3 registers: address_ value_ and object_.  A pointer to | 
| +// the value has just been written into the object, now this stub makes sure | 
| +// we keep the GC informed.  The word in the object where the value has been | 
| +// written is in the address register. | 
| +void RecordWriteStub::Generate(MacroAssembler* masm) { | 
| +  Label skip_to_incremental_noncompacting; | 
| +  Label skip_to_incremental_compacting; | 
| + | 
| +  // The first two instructions are generated with labels so as to get the | 
| +  // offset fixed up correctly by the bind(Label*) call.  We patch it back and | 
| +  // forth between a compare instructions (a nop in this position) and the | 
| +  // real branch when we start and stop incremental heap marking. | 
| +  // See RecordWriteStub::Patch for details. | 
| +  __ b(&skip_to_incremental_noncompacting); | 
| +  __ b(&skip_to_incremental_compacting); | 
| + | 
| +  if (remembered_set_action_ == EMIT_REMEMBERED_SET) { | 
| +    __ RememberedSetHelper(object_, | 
| +                           address_, | 
| +                           value_, | 
| +                           save_fp_regs_mode_, | 
| +                           MacroAssembler::kReturnAtEnd); | 
| +  } | 
| +  __ Ret(); | 
| + | 
| +  __ bind(&skip_to_incremental_noncompacting); | 
| +  GenerateIncremental(masm, INCREMENTAL); | 
| + | 
| +  __ bind(&skip_to_incremental_compacting); | 
| +  GenerateIncremental(masm, INCREMENTAL_COMPACTION); | 
| + | 
| +  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. | 
| +  // Will be checked in IncrementalMarking::ActivateGeneratedStub. | 
| +  ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12)); | 
| +  ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12)); | 
| +  PatchBranchIntoNop(masm, 0); | 
| +  PatchBranchIntoNop(masm, Assembler::kInstrSize); | 
| +} | 
| + | 
| + | 
| +void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { | 
| +  regs_.Save(masm); | 
| + | 
| +  if (remembered_set_action_ == EMIT_REMEMBERED_SET) { | 
| +    Label dont_need_remembered_set; | 
| + | 
| +    __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); | 
| +    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value. | 
| +                           regs_.scratch0(), | 
| +                           &dont_need_remembered_set); | 
| + | 
| +    __ CheckPageFlag(regs_.object(), | 
| +                     regs_.scratch0(), | 
| +                     1 << MemoryChunk::SCAN_ON_SCAVENGE, | 
| +                     ne, | 
| +                     &dont_need_remembered_set); | 
| + | 
| +    // First notify the incremental marker if necessary, then update the | 
| +    // remembered set. | 
| +    CheckNeedsToInformIncrementalMarker( | 
| +        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); | 
| +    InformIncrementalMarker(masm, mode); | 
| +    regs_.Restore(masm); | 
| +    __ RememberedSetHelper(object_, | 
| +                           address_, | 
| +                           value_, | 
| +                           save_fp_regs_mode_, | 
| +                           MacroAssembler::kReturnAtEnd); | 
| + | 
| +    __ bind(&dont_need_remembered_set); | 
| +  } | 
| + | 
| +  CheckNeedsToInformIncrementalMarker( | 
| +      masm, kReturnOnNoNeedToInformIncrementalMarker, mode); | 
| +  InformIncrementalMarker(masm, mode); | 
| +  regs_.Restore(masm); | 
| +  __ Ret(); | 
| +} | 
| + | 
| + | 
| +void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { | 
| +  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); | 
| +  int argument_count = 3; | 
| +  __ PrepareCallCFunction(argument_count, regs_.scratch0()); | 
| +  Register address = | 
| +      r0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); | 
| +  ASSERT(!address.is(regs_.object())); | 
| +  ASSERT(!address.is(r0)); | 
| +  __ Move(address, regs_.address()); | 
| +  __ Move(r0, regs_.object()); | 
| +  if (mode == INCREMENTAL_COMPACTION) { | 
| +    __ Move(r1, address); | 
| +  } else { | 
| +    ASSERT(mode == INCREMENTAL); | 
| +    __ ldr(r1, MemOperand(address, 0)); | 
| +  } | 
| +  __ mov(r2, Operand(ExternalReference::isolate_address())); | 
| + | 
| +  AllowExternalCallThatCantCauseGC scope(masm); | 
| +  if (mode == INCREMENTAL_COMPACTION) { | 
| +    __ CallCFunction( | 
| +        ExternalReference::incremental_evacuation_record_write_function( | 
| +            masm->isolate()), | 
| +        argument_count); | 
| +  } else { | 
| +    ASSERT(mode == INCREMENTAL); | 
| +    __ CallCFunction( | 
| +        ExternalReference::incremental_marking_record_write_function( | 
| +            masm->isolate()), | 
| +        argument_count); | 
| +  } | 
| +  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); | 
| +} | 
| + | 
| + | 
| +void RecordWriteStub::CheckNeedsToInformIncrementalMarker( | 
| +    MacroAssembler* masm, | 
| +    OnNoNeedToInformIncrementalMarker on_no_need, | 
| +    Mode mode) { | 
| +  Label on_black; | 
| +  Label need_incremental; | 
| +  Label need_incremental_pop_scratch; | 
| + | 
| +  // Let's look at the color of the object:  If it is not black we don't have | 
| +  // to inform the incremental marker. | 
| +  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); | 
| + | 
| +  regs_.Restore(masm); | 
| +  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { | 
| +    __ RememberedSetHelper(object_, | 
| +                           address_, | 
| +                           value_, | 
| +                           save_fp_regs_mode_, | 
| +                           MacroAssembler::kReturnAtEnd); | 
| +  } else { | 
| +    __ Ret(); | 
| +  } | 
| + | 
| +  __ bind(&on_black); | 
| + | 
| +  // Get the value from the slot. | 
| +  __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); | 
| + | 
| +  if (mode == INCREMENTAL_COMPACTION) { | 
| +    Label ensure_not_white; | 
| + | 
| +    __ CheckPageFlag(regs_.scratch0(),  // Contains value. | 
| +                     regs_.scratch1(),  // Scratch. | 
| +                     MemoryChunk::kEvacuationCandidateMask, | 
| +                     eq, | 
| +                     &ensure_not_white); | 
| + | 
| +    __ CheckPageFlag(regs_.object(), | 
| +                     regs_.scratch1(),  // Scratch. | 
| +                     MemoryChunk::kSkipEvacuationSlotsRecordingMask, | 
| +                     eq, | 
| +                     &need_incremental); | 
| + | 
| +    __ bind(&ensure_not_white); | 
| +  } | 
| + | 
| +  // We need extra registers for this, so we push the object and the address | 
| +  // register temporarily. | 
| +  __ Push(regs_.object(), regs_.address()); | 
| +  __ EnsureNotWhite(regs_.scratch0(),  // The value. | 
| +                    regs_.scratch1(),  // Scratch. | 
| +                    regs_.object(),  // Scratch. | 
| +                    regs_.address(),  // Scratch. | 
| +                    &need_incremental_pop_scratch); | 
| +  __ Pop(regs_.object(), regs_.address()); | 
| + | 
| +  regs_.Restore(masm); | 
| +  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { | 
| +    __ RememberedSetHelper(object_, | 
| +                           address_, | 
| +                           value_, | 
| +                           save_fp_regs_mode_, | 
| +                           MacroAssembler::kReturnAtEnd); | 
| +  } else { | 
| +    __ Ret(); | 
| +  } | 
| + | 
| +  __ bind(&need_incremental_pop_scratch); | 
| +  __ Pop(regs_.object(), regs_.address()); | 
| + | 
| +  __ bind(&need_incremental); | 
| + | 
| +  // Fall through when we need to inform the incremental marker. | 
| +} | 
| + | 
| + | 
| #undef __ | 
|  | 
| } }  // namespace v8::internal | 
|  |