| Index: src/arm/codegen-arm.cc
|
| ===================================================================
|
| --- src/arm/codegen-arm.cc (revision 5874)
|
| +++ src/arm/codegen-arm.cc (working copy)
|
| @@ -6537,16 +6537,29 @@
|
| class DeferredReferenceGetNamedValue: public DeferredCode {
|
| public:
|
| explicit DeferredReferenceGetNamedValue(Register receiver,
|
| - Handle<String> name)
|
| - : receiver_(receiver), name_(name) {
|
| - set_comment("[ DeferredReferenceGetNamedValue");
|
| + Handle<String> name,
|
| + bool is_contextual)
|
| + : receiver_(receiver),
|
| + name_(name),
|
| + is_contextual_(is_contextual),
|
| + is_dont_delete_(false) {
|
| + set_comment(is_contextual
|
| + ? "[ DeferredReferenceGetNamedValue (contextual)"
|
| + : "[ DeferredReferenceGetNamedValue");
|
| }
|
|
|
| virtual void Generate();
|
|
|
| + void set_is_dont_delete(bool value) {
|
| + ASSERT(is_contextual_);
|
| + is_dont_delete_ = value;
|
| + }
|
| +
|
| private:
|
| Register receiver_;
|
| Handle<String> name_;
|
| + bool is_contextual_;
|
| + bool is_dont_delete_;
|
| };
|
|
|
|
|
| @@ -6573,10 +6586,20 @@
|
| // The rest of the instructions in the deferred code must be together.
|
| { Assembler::BlockConstPoolScope block_const_pool(masm_);
|
| Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
|
| - __ Call(ic, RelocInfo::CODE_TARGET);
|
| - // The call must be followed by a nop(1) instruction to indicate that the
|
| - // in-object has been inlined.
|
| - __ nop(PROPERTY_ACCESS_INLINED);
|
| + RelocInfo::Mode mode = is_contextual_
|
| + ? RelocInfo::CODE_TARGET_CONTEXT
|
| + : RelocInfo::CODE_TARGET;
|
| + __ Call(ic, mode);
|
| + // We must mark the code just after the call with the correct marker.
|
| + MacroAssembler::NopMarkerTypes code_marker;
|
| + if (is_contextual_) {
|
| + code_marker = is_dont_delete_
|
| + ? MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE
|
| + : MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT;
|
| + } else {
|
| + code_marker = MacroAssembler::PROPERTY_ACCESS_INLINED;
|
| + }
|
| + __ MarkCode(code_marker);
|
|
|
| // At this point the answer is in r0. We move it to the expected register
|
| // if necessary.
|
| @@ -6640,7 +6663,7 @@
|
| __ Call(ic, RelocInfo::CODE_TARGET);
|
| // The call must be followed by a nop instruction to indicate that the
|
| // keyed load has been inlined.
|
| - __ nop(PROPERTY_ACCESS_INLINED);
|
| + __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
|
|
|
| // Now go back to the frame that we entered with. This will not overwrite
|
| // the receiver or key registers since they were not in use when we came
|
| @@ -6697,7 +6720,7 @@
|
| __ Call(ic, RelocInfo::CODE_TARGET);
|
| // The call must be followed by a nop instruction to indicate that the
|
| // keyed store has been inlined.
|
| - __ nop(PROPERTY_ACCESS_INLINED);
|
| + __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
|
|
|
| // Block the constant pool for one more instruction after leaving this
|
| // constant pool block scope to include the branch instruction ending the
|
| @@ -6745,7 +6768,7 @@
|
| __ Call(ic, RelocInfo::CODE_TARGET);
|
| // The call must be followed by a nop instruction to indicate that the
|
| // named store has been inlined.
|
| - __ nop(PROPERTY_ACCESS_INLINED);
|
| + __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
|
|
|
| // Go back to the frame we entered with. The instructions
|
| // generated by this merge are skipped over by the inline store
|
| @@ -6763,7 +6786,14 @@
|
|
|
| // Consumes the top of stack (the receiver) and pushes the result instead.
|
| void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
|
| - if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
|
| + bool contextual_load_in_builtin =
|
| + is_contextual &&
|
| + (Bootstrapper::IsActive() ||
|
| + (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
|
| +
|
| + if (scope()->is_global_scope() ||
|
| + loop_nesting() == 0 ||
|
| + contextual_load_in_builtin) {
|
| Comment cmnt(masm(), "[ Load from named Property");
|
| // Setup the name register and call load IC.
|
| frame_->CallLoadIC(name,
|
| @@ -6773,12 +6803,19 @@
|
| frame_->EmitPush(r0); // Push answer.
|
| } else {
|
| // Inline the in-object property case.
|
| - Comment cmnt(masm(), "[ Inlined named property load");
|
| + Comment cmnt(masm(), is_contextual
|
| + ? "[ Inlined contextual property load"
|
| + : "[ Inlined named property load");
|
|
|
| // Counter will be decremented in the deferred code. Placed here to avoid
|
| // having it in the instruction stream below where patching will occur.
|
| - __ IncrementCounter(&Counters::named_load_inline, 1,
|
| - frame_->scratch0(), frame_->scratch1());
|
| + if (is_contextual) {
|
| + __ IncrementCounter(&Counters::named_load_global_inline, 1,
|
| + frame_->scratch0(), frame_->scratch1());
|
| + } else {
|
| + __ IncrementCounter(&Counters::named_load_inline, 1,
|
| + frame_->scratch0(), frame_->scratch1());
|
| + }
|
|
|
| // The following instructions are the inlined load of an in-object property.
|
| // Parts of this code is patched, so the exact instructions generated needs
|
| @@ -6789,19 +6826,57 @@
|
| Register receiver = frame_->PopToRegister();
|
|
|
| DeferredReferenceGetNamedValue* deferred =
|
| - new DeferredReferenceGetNamedValue(receiver, name);
|
| + new DeferredReferenceGetNamedValue(receiver, name, is_contextual);
|
|
|
| + bool is_dont_delete = false;
|
| + if (is_contextual) {
|
| + if (!info_->closure().is_null()) {
|
| + // When doing lazy compilation we can check if the global cell
|
| + // already exists and use its "don't delete" status as a hint.
|
| + AssertNoAllocation no_gc;
|
| + v8::internal::GlobalObject* global_object =
|
| + info_->closure()->context()->global();
|
| + LookupResult lookup;
|
| + global_object->LocalLookupRealNamedProperty(*name, &lookup);
|
| + if (lookup.IsProperty() && lookup.type() == NORMAL) {
|
| + ASSERT(lookup.holder() == global_object);
|
| + ASSERT(global_object->property_dictionary()->ValueAt(
|
| + lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell());
|
| + is_dont_delete = lookup.IsDontDelete();
|
| + }
|
| + }
|
| + if (is_dont_delete) {
|
| + __ IncrementCounter(&Counters::dont_delete_hint_hit, 1,
|
| + frame_->scratch0(), frame_->scratch1());
|
| + }
|
| + }
|
| +
|
| + { Assembler::BlockConstPoolScope block_const_pool(masm_);
|
| + if (!is_contextual) {
|
| + // Check that the receiver is a heap object.
|
| + __ tst(receiver, Operand(kSmiTagMask));
|
| + deferred->Branch(eq);
|
| + }
|
| +
|
| + // Check for the_hole_value if necessary.
|
| + // Below we rely on the number of instructions generated, and we can't
|
| + // cope with the Check macro which does not generate a fixed number of
|
| + // instructions.
|
| + Label skip, check_the_hole, cont;
|
| + if (FLAG_debug_code && is_contextual && is_dont_delete) {
|
| + __ b(&skip);
|
| + __ bind(&check_the_hole);
|
| + __ Check(ne, "DontDelete cells can't contain the hole");
|
| + __ b(&cont);
|
| + __ bind(&skip);
|
| + }
|
| +
|
| #ifdef DEBUG
|
| - int kInlinedNamedLoadInstructions = 7;
|
| - Label check_inlined_codesize;
|
| - masm_->bind(&check_inlined_codesize);
|
| + int InlinedNamedLoadInstructions = 5;
|
| + Label check_inlined_codesize;
|
| + masm_->bind(&check_inlined_codesize);
|
| #endif
|
|
|
| - { Assembler::BlockConstPoolScope block_const_pool(masm_);
|
| - // Check that the receiver is a heap object.
|
| - __ tst(receiver, Operand(kSmiTagMask));
|
| - deferred->Branch(eq);
|
| -
|
| Register scratch = VirtualFrame::scratch0();
|
| Register scratch2 = VirtualFrame::scratch1();
|
|
|
| @@ -6812,12 +6887,42 @@
|
| __ cmp(scratch, scratch2);
|
| deferred->Branch(ne);
|
|
|
| - // Initially use an invalid index. The index will be patched by the
|
| - // inline cache code.
|
| - __ ldr(receiver, MemOperand(receiver, 0));
|
| + if (is_contextual) {
|
| +#ifdef DEBUG
|
| + InlinedNamedLoadInstructions += 1;
|
| +#endif
|
| + // Load the (initially invalid) cell and get its value.
|
| + masm()->mov(receiver, Operand(Factory::null_value()));
|
| + __ ldr(receiver,
|
| + FieldMemOperand(receiver, JSGlobalPropertyCell::kValueOffset));
|
|
|
| + deferred->set_is_dont_delete(is_dont_delete);
|
| +
|
| + if (!is_dont_delete) {
|
| +#ifdef DEBUG
|
| + InlinedNamedLoadInstructions += 3;
|
| +#endif
|
| + __ cmp(receiver, Operand(Factory::the_hole_value()));
|
| + deferred->Branch(eq);
|
| + } else if (FLAG_debug_code) {
|
| +#ifdef DEBUG
|
| + InlinedNamedLoadInstructions += 3;
|
| +#endif
|
| + __ cmp(receiver, Operand(Factory::the_hole_value()));
|
| + __ b(&check_the_hole, eq);
|
| + __ bind(&cont);
|
| + }
|
| + } else {
|
| + // Initially use an invalid index. The index will be patched by the
|
| + // inline cache code.
|
| + __ ldr(receiver, MemOperand(receiver, 0));
|
| + }
|
| +
|
| // Make sure that the expected number of instructions are generated.
|
| - ASSERT_EQ(kInlinedNamedLoadInstructions,
|
| + // If the code before is updated, the offsets in ic-arm.cc
|
| + // LoadIC::PatchInlinedContextualLoad and PatchInlinedLoad need
|
| + // to be updated.
|
| + ASSERT_EQ(InlinedNamedLoadInstructions,
|
| masm_->InstructionsGeneratedSince(&check_inlined_codesize));
|
| }
|
|
|
|
|