Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(421)

Side by Side Diff: src/arm/codegen-arm.cc

Issue 2249002: Fix jump targets on ARM to merge virtual frames (really this time).... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/codegen-arm.h ('k') | src/arm/ic-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
59 bool strict); 59 bool strict);
60 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); 60 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
61 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm); 61 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
62 static void MultiplyByKnownInt(MacroAssembler* masm, 62 static void MultiplyByKnownInt(MacroAssembler* masm,
63 Register source, 63 Register source,
64 Register destination, 64 Register destination,
65 int known_int); 65 int known_int);
66 static bool IsEasyToMultiplyBy(int x); 66 static bool IsEasyToMultiplyBy(int x);
67 67
68 68
69 #define __ ACCESS_MASM(masm)
70
71 // -------------------------------------------------------------------------
72 // Platform-specific FrameRegisterState functions.
73
74 void FrameRegisterState::Save(MacroAssembler* masm) const {
75 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
76 int action = registers_[i];
77 if (action == kPush) {
78 __ push(RegisterAllocator::ToRegister(i));
79 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
80 __ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
81 }
82 }
83 }
84
85
86 void FrameRegisterState::Restore(MacroAssembler* masm) const {
87 // Restore registers in reverse order due to the stack.
88 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
89 int action = registers_[i];
90 if (action == kPush) {
91 __ pop(RegisterAllocator::ToRegister(i));
92 } else if (action != kIgnore) {
93 action &= ~kSyncedFlag;
94 __ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
95 }
96 }
97 }
98
99
100 #undef __
101 #define __ ACCESS_MASM(masm_) 69 #define __ ACCESS_MASM(masm_)
102 70
103 // ------------------------------------------------------------------------- 71 // -------------------------------------------------------------------------
104 // Platform-specific DeferredCode functions. 72 // Platform-specific DeferredCode functions.
105 73
106 void DeferredCode::SaveRegisters() { 74 void DeferredCode::SaveRegisters() {
107 frame_state_.Save(masm_);
108 } 75 }
109 76
110 77
111 void DeferredCode::RestoreRegisters() { 78 void DeferredCode::RestoreRegisters() {
112 frame_state_.Restore(masm_);
113 } 79 }
114 80
115 81
116 // ------------------------------------------------------------------------- 82 // -------------------------------------------------------------------------
117 // Platform-specific RuntimeCallHelper functions. 83 // Platform-specific RuntimeCallHelper functions.
118 84
119 void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { 85 void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
120 frame_state_->Save(masm); 86 frame_state_.frame()->AssertIsSpilled();
121 } 87 }
122 88
123 89
124 void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { 90 void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
125 frame_state_->Restore(masm);
126 } 91 }
127 92
128 93
129 void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { 94 void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
95 frame_state_.frame()->AssertIsSpilled();
130 masm->EnterInternalFrame(); 96 masm->EnterInternalFrame();
131 } 97 }
132 98
133 99
134 void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { 100 void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
135 masm->LeaveInternalFrame(); 101 masm->LeaveInternalFrame();
136 } 102 }
137 103
138 104
139 // ------------------------------------------------------------------------- 105 // -------------------------------------------------------------------------
(...skipping 3310 matching lines...) Expand 10 before | Expand all | Expand 10 after
3450 // For a compound assignment the right-hand side is a binary operation 3416 // For a compound assignment the right-hand side is a binary operation
3451 // between the current property value and the actual right-hand side. 3417 // between the current property value and the actual right-hand side.
3452 if (is_trivial_receiver) { 3418 if (is_trivial_receiver) {
3453 Load(prop->obj()); 3419 Load(prop->obj());
3454 } else if (var != NULL) { 3420 } else if (var != NULL) {
3455 LoadGlobal(); 3421 LoadGlobal();
3456 } else { 3422 } else {
3457 frame_->Dup(); 3423 frame_->Dup();
3458 } 3424 }
3459 EmitNamedLoad(name, var != NULL); 3425 EmitNamedLoad(name, var != NULL);
3460 frame_->EmitPush(r0);
3461 3426
3462 // Perform the binary operation. 3427 // Perform the binary operation.
3463 Literal* literal = node->value()->AsLiteral(); 3428 Literal* literal = node->value()->AsLiteral();
3464 bool overwrite_value = 3429 bool overwrite_value =
3465 (node->value()->AsBinaryOperation() != NULL && 3430 (node->value()->AsBinaryOperation() != NULL &&
3466 node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); 3431 node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
3467 if (literal != NULL && literal->handle()->IsSmi()) { 3432 if (literal != NULL && literal->handle()->IsSmi()) {
3468 SmiOperation(node->binary_op(), 3433 SmiOperation(node->binary_op(),
3469 literal->handle(), 3434 literal->handle(),
3470 false, 3435 false,
(...skipping 2136 matching lines...) Expand 10 before | Expand all | Expand 10 after
5607 } 5572 }
5608 5573
5609 virtual void Generate(); 5574 virtual void Generate();
5610 5575
5611 private: 5576 private:
5612 Register receiver_; 5577 Register receiver_;
5613 Handle<String> name_; 5578 Handle<String> name_;
5614 }; 5579 };
5615 5580
5616 5581
5582 // Convention for this is that on entry the receiver is in a register that
5583 // is not used by the stack. On exit the answer is found in that same
5584 // register and the stack has the same height.
5617 void DeferredReferenceGetNamedValue::Generate() { 5585 void DeferredReferenceGetNamedValue::Generate() {
5618 ASSERT(receiver_.is(r0) || receiver_.is(r1)); 5586 #ifdef DEBUG
5587 int expected_height = frame_state()->frame()->height();
5588 #endif
5589 VirtualFrame copied_frame(*frame_state()->frame());
5590 copied_frame.SpillAll();
5619 5591
5620 Register scratch1 = VirtualFrame::scratch0(); 5592 Register scratch1 = VirtualFrame::scratch0();
5621 Register scratch2 = VirtualFrame::scratch1(); 5593 Register scratch2 = VirtualFrame::scratch1();
5594 ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
5622 __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2); 5595 __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
5623 __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2); 5596 __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
5624 5597
5625 // Ensure receiver in r0 and name in r2 to match load ic calling convention. 5598 // Ensure receiver in r0 and name in r2 to match load ic calling convention.
5626 __ Move(r0, receiver_); 5599 __ Move(r0, receiver_);
5627 __ mov(r2, Operand(name_)); 5600 __ mov(r2, Operand(name_));
5628 5601
5629 // The rest of the instructions in the deferred code must be together. 5602 // The rest of the instructions in the deferred code must be together.
5630 { Assembler::BlockConstPoolScope block_const_pool(masm_); 5603 { Assembler::BlockConstPoolScope block_const_pool(masm_);
5631 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); 5604 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
5632 __ Call(ic, RelocInfo::CODE_TARGET); 5605 __ Call(ic, RelocInfo::CODE_TARGET);
5633 // The call must be followed by a nop(1) instruction to indicate that the 5606 // The call must be followed by a nop(1) instruction to indicate that the
5634 // in-object has been inlined. 5607 // in-object has been inlined.
5635 __ nop(PROPERTY_ACCESS_INLINED); 5608 __ nop(PROPERTY_ACCESS_INLINED);
5636 5609
5610 // At this point the answer is in r0. We move it to the expected register
5611 // if necessary.
5612 __ Move(receiver_, r0);
5613
5614 // Now go back to the frame that we entered with. This will not overwrite
5615 // the receiver register since that register was not in use when we came
5616 // in. The instructions emitted by this merge are skipped over by the
5617 // inline load patching mechanism when looking for the branch instruction
5618 // that tells it where the code to patch is.
5619 copied_frame.MergeTo(frame_state()->frame());
5620
5637 // Block the constant pool for one more instruction after leaving this 5621 // Block the constant pool for one more instruction after leaving this
5638 // constant pool block scope to include the branch instruction ending the 5622 // constant pool block scope to include the branch instruction ending the
5639 // deferred code. 5623 // deferred code.
5640 __ BlockConstPoolFor(1); 5624 __ BlockConstPoolFor(1);
5641 } 5625 }
5626 ASSERT_EQ(expected_height, frame_state()->frame()->height());
5642 } 5627 }
5643 5628
5644 5629
5645 class DeferredReferenceGetKeyedValue: public DeferredCode { 5630 class DeferredReferenceGetKeyedValue: public DeferredCode {
5646 public: 5631 public:
5647 DeferredReferenceGetKeyedValue(Register key, Register receiver) 5632 DeferredReferenceGetKeyedValue(Register key, Register receiver)
5648 : key_(key), receiver_(receiver) { 5633 : key_(key), receiver_(receiver) {
5649 set_comment("[ DeferredReferenceGetKeyedValue"); 5634 set_comment("[ DeferredReferenceGetKeyedValue");
5650 } 5635 }
5651 5636
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
5732 __ nop(PROPERTY_ACCESS_INLINED); 5717 __ nop(PROPERTY_ACCESS_INLINED);
5733 5718
5734 // Block the constant pool for one more instruction after leaving this 5719 // Block the constant pool for one more instruction after leaving this
5735 // constant pool block scope to include the branch instruction ending the 5720 // constant pool block scope to include the branch instruction ending the
5736 // deferred code. 5721 // deferred code.
5737 __ BlockConstPoolFor(1); 5722 __ BlockConstPoolFor(1);
5738 } 5723 }
5739 } 5724 }
5740 5725
5741 5726
5727 // Consumes the top of stack (the receiver) and pushes the result instead.
5742 void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) { 5728 void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
5743 if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) { 5729 if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
5744 Comment cmnt(masm(), "[ Load from named Property"); 5730 Comment cmnt(masm(), "[ Load from named Property");
5745 // Setup the name register and call load IC. 5731 // Setup the name register and call load IC.
5746 frame_->CallLoadIC(name, 5732 frame_->CallLoadIC(name,
5747 is_contextual 5733 is_contextual
5748 ? RelocInfo::CODE_TARGET_CONTEXT 5734 ? RelocInfo::CODE_TARGET_CONTEXT
5749 : RelocInfo::CODE_TARGET); 5735 : RelocInfo::CODE_TARGET);
5736 frame_->EmitPush(r0); // Push answer.
5750 } else { 5737 } else {
5751 // Inline the in-object property case. 5738 // Inline the in-object property case.
5752 Comment cmnt(masm(), "[ Inlined named property load"); 5739 Comment cmnt(masm(), "[ Inlined named property load");
5753 5740
5754 // Counter will be decremented in the deferred code. Placed here to avoid 5741 // Counter will be decremented in the deferred code. Placed here to avoid
5755 // having it in the instruction stream below where patching will occur. 5742 // having it in the instruction stream below where patching will occur.
5756 __ IncrementCounter(&Counters::named_load_inline, 1, 5743 __ IncrementCounter(&Counters::named_load_inline, 1,
5757 frame_->scratch0(), frame_->scratch1()); 5744 frame_->scratch0(), frame_->scratch1());
5758 5745
5759 // The following instructions are the inlined load of an in-object property. 5746 // The following instructions are the inlined load of an in-object property.
5760 // Parts of this code is patched, so the exact instructions generated needs 5747 // Parts of this code is patched, so the exact instructions generated needs
5761 // to be fixed. Therefore the instruction pool is blocked when generating 5748 // to be fixed. Therefore the instruction pool is blocked when generating
5762 // this code 5749 // this code
5763 5750
5764 // Load the receiver from the stack. 5751 // Load the receiver from the stack.
5765 Register receiver = frame_->PopToRegister(); 5752 Register receiver = frame_->PopToRegister();
5766 VirtualFrame::SpilledScope spilled(frame_);
5767 5753
5768 DeferredReferenceGetNamedValue* deferred = 5754 DeferredReferenceGetNamedValue* deferred =
5769 new DeferredReferenceGetNamedValue(receiver, name); 5755 new DeferredReferenceGetNamedValue(receiver, name);
5770 5756
5771 #ifdef DEBUG 5757 #ifdef DEBUG
5772 int kInlinedNamedLoadInstructions = 7; 5758 int kInlinedNamedLoadInstructions = 7;
5773 Label check_inlined_codesize; 5759 Label check_inlined_codesize;
5774 masm_->bind(&check_inlined_codesize); 5760 masm_->bind(&check_inlined_codesize);
5775 #endif 5761 #endif
5776 5762
5777 { Assembler::BlockConstPoolScope block_const_pool(masm_); 5763 { Assembler::BlockConstPoolScope block_const_pool(masm_);
5778 // Check that the receiver is a heap object. 5764 // Check that the receiver is a heap object.
5779 __ tst(receiver, Operand(kSmiTagMask)); 5765 __ tst(receiver, Operand(kSmiTagMask));
5780 deferred->Branch(eq); 5766 deferred->Branch(eq);
5781 5767
5768 Register scratch = VirtualFrame::scratch0();
5769 Register scratch2 = VirtualFrame::scratch1();
5770
5782 // Check the map. The null map used below is patched by the inline cache 5771 // Check the map. The null map used below is patched by the inline cache
5783 // code. 5772 // code. Therefore we can't use a LoadRoot call.
5784 __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset)); 5773 __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
5785 __ mov(r3, Operand(Factory::null_value())); 5774 __ mov(scratch2, Operand(Factory::null_value()));
5786 __ cmp(r2, r3); 5775 __ cmp(scratch, scratch2);
5787 deferred->Branch(ne); 5776 deferred->Branch(ne);
5788 5777
5789 // Initially use an invalid index. The index will be patched by the 5778 // Initially use an invalid index. The index will be patched by the
5790 // inline cache code. 5779 // inline cache code.
5791 __ ldr(r0, MemOperand(receiver, 0)); 5780 __ ldr(receiver, MemOperand(receiver, 0));
5792 5781
5793 // Make sure that the expected number of instructions are generated. 5782 // Make sure that the expected number of instructions are generated.
5794 ASSERT_EQ(kInlinedNamedLoadInstructions, 5783 ASSERT_EQ(kInlinedNamedLoadInstructions,
5795 masm_->InstructionsGeneratedSince(&check_inlined_codesize)); 5784 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
5796 } 5785 }
5797 5786
5798 deferred->BindExit(); 5787 deferred->BindExit();
5788 // At this point the receiver register has the result, either from the
5789 // deferred code or from the inlined code.
5790 frame_->EmitPush(receiver);
5799 } 5791 }
5800 } 5792 }
5801 5793
5802 5794
5803 void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) { 5795 void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
5804 #ifdef DEBUG 5796 #ifdef DEBUG
5805 int expected_height = frame_->height() - (is_contextual ? 1 : 2); 5797 int expected_height = frame_->height() - (is_contextual ? 1 : 2);
5806 #endif 5798 #endif
5807 frame_->CallStoreIC(name, is_contextual); 5799 frame_->CallStoreIC(name, is_contextual);
5808 5800
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after
6004 ASSERT(proxy->AsVariable()->is_global()); 5996 ASSERT(proxy->AsVariable()->is_global());
6005 return proxy->name(); 5997 return proxy->name();
6006 } else { 5998 } else {
6007 Literal* raw_name = property->key()->AsLiteral(); 5999 Literal* raw_name = property->key()->AsLiteral();
6008 ASSERT(raw_name != NULL); 6000 ASSERT(raw_name != NULL);
6009 return Handle<String>(String::cast(*raw_name->handle())); 6001 return Handle<String>(String::cast(*raw_name->handle()));
6010 } 6002 }
6011 } 6003 }
6012 6004
6013 6005
6006 void Reference::DupIfPersist() {
6007 if (persist_after_get_) {
6008 switch (type_) {
6009 case KEYED:
6010 cgen_->frame()->Dup2();
6011 break;
6012 case NAMED:
6013 cgen_->frame()->Dup();
6014 // Fall through.
6015 case UNLOADED:
6016 case ILLEGAL:
6017 case SLOT:
6018 // Do nothing.
6019 ;
6020 }
6021 } else {
6022 set_unloaded();
6023 }
6024 }
6025
6026
6014 void Reference::GetValue() { 6027 void Reference::GetValue() {
6015 ASSERT(cgen_->HasValidEntryRegisters()); 6028 ASSERT(cgen_->HasValidEntryRegisters());
6016 ASSERT(!is_illegal()); 6029 ASSERT(!is_illegal());
6017 ASSERT(!cgen_->has_cc()); 6030 ASSERT(!cgen_->has_cc());
6018 MacroAssembler* masm = cgen_->masm(); 6031 MacroAssembler* masm = cgen_->masm();
6019 Property* property = expression_->AsProperty(); 6032 Property* property = expression_->AsProperty();
6020 if (property != NULL) { 6033 if (property != NULL) {
6021 cgen_->CodeForSourcePosition(property->position()); 6034 cgen_->CodeForSourcePosition(property->position());
6022 } 6035 }
6023 6036
6024 switch (type_) { 6037 switch (type_) {
6025 case SLOT: { 6038 case SLOT: {
6026 Comment cmnt(masm, "[ Load from Slot"); 6039 Comment cmnt(masm, "[ Load from Slot");
6027 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); 6040 Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
6028 ASSERT(slot != NULL); 6041 ASSERT(slot != NULL);
6042 DupIfPersist();
6029 cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF); 6043 cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
6030 if (!persist_after_get_) {
6031 cgen_->UnloadReference(this);
6032 }
6033 break; 6044 break;
6034 } 6045 }
6035 6046
6036 case NAMED: { 6047 case NAMED: {
6037 Variable* var = expression_->AsVariableProxy()->AsVariable(); 6048 Variable* var = expression_->AsVariableProxy()->AsVariable();
6038 bool is_global = var != NULL; 6049 bool is_global = var != NULL;
6039 ASSERT(!is_global || var->is_global()); 6050 ASSERT(!is_global || var->is_global());
6040 if (persist_after_get_) { 6051 Handle<String> name = GetName();
6041 cgen_->frame()->Dup(); 6052 DupIfPersist();
6042 } 6053 cgen_->EmitNamedLoad(name, is_global);
6043 cgen_->EmitNamedLoad(GetName(), is_global);
6044 cgen_->frame()->EmitPush(r0);
6045 if (!persist_after_get_) set_unloaded();
6046 break; 6054 break;
6047 } 6055 }
6048 6056
6049 case KEYED: { 6057 case KEYED: {
6050 ASSERT(property != NULL); 6058 ASSERT(property != NULL);
6051 if (persist_after_get_) { 6059 DupIfPersist();
6052 cgen_->frame()->Dup2();
6053 }
6054 cgen_->EmitKeyedLoad(); 6060 cgen_->EmitKeyedLoad();
6055 cgen_->frame()->EmitPush(r0); 6061 cgen_->frame()->EmitPush(r0);
6056 if (!persist_after_get_) set_unloaded();
6057 break; 6062 break;
6058 } 6063 }
6059 6064
6060 default: 6065 default:
6061 UNREACHABLE(); 6066 UNREACHABLE();
6062 } 6067 }
6063 } 6068 }
6064 6069
6065 6070
6066 void Reference::SetValue(InitState init_state) { 6071 void Reference::SetValue(InitState init_state) {
(...skipping 4197 matching lines...) Expand 10 before | Expand all | Expand 10 after
10264 __ bind(&string_add_runtime); 10269 __ bind(&string_add_runtime);
10265 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); 10270 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
10266 } 10271 }
10267 10272
10268 10273
10269 #undef __ 10274 #undef __
10270 10275
10271 } } // namespace v8::internal 10276 } } // namespace v8::internal
10272 10277
10273 #endif // V8_TARGET_ARCH_ARM 10278 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/codegen-arm.h ('k') | src/arm/ic-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698