Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(938)

Side by Side Diff: src/arm/codegen-arm.cc

Issue 5140002: Generate inline code for contextual loads on ARM.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 10 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 6504 matching lines...) Expand 10 before | Expand all | Expand 10 after
6515 } 6515 }
6516 6516
6517 cc_reg_ = eq; 6517 cc_reg_ = eq;
6518 ASSERT(has_cc() && frame_->height() == original_height); 6518 ASSERT(has_cc() && frame_->height() == original_height);
6519 } 6519 }
6520 6520
6521 6521
6522 class DeferredReferenceGetNamedValue: public DeferredCode { 6522 class DeferredReferenceGetNamedValue: public DeferredCode {
6523 public: 6523 public:
6524 explicit DeferredReferenceGetNamedValue(Register receiver, 6524 explicit DeferredReferenceGetNamedValue(Register receiver,
6525 Handle<String> name) 6525 Handle<String> name,
6526 : receiver_(receiver), name_(name) { 6526 bool is_contextual)
6527 set_comment("[ DeferredReferenceGetNamedValue"); 6527 : receiver_(receiver),
6528 name_(name),
6529 is_contextual_(is_contextual),
6530 is_dont_delete_(false) {
6531 set_comment(is_contextual
6532 ? "[ DeferredReferenceGetNamedValue (contextual)"
6533 : "[ DeferredReferenceGetNamedValue");
6528 } 6534 }
6529 6535
6530 virtual void Generate(); 6536 virtual void Generate();
6531 6537
6538 void set_is_dont_delete(bool value) {
6539 ASSERT(is_contextual_);
6540 is_dont_delete_ = value;
6541 }
6542
6532 private: 6543 private:
6533 Register receiver_; 6544 Register receiver_;
6534 Handle<String> name_; 6545 Handle<String> name_;
6546 bool is_contextual_;
6547 bool is_dont_delete_;
6535 }; 6548 };
6536 6549
6537 6550
6538 // Convention for this is that on entry the receiver is in a register that 6551 // Convention for this is that on entry the receiver is in a register that
6539 // is not used by the stack. On exit the answer is found in that same 6552 // is not used by the stack. On exit the answer is found in that same
6540 // register and the stack has the same height. 6553 // register and the stack has the same height.
6541 void DeferredReferenceGetNamedValue::Generate() { 6554 void DeferredReferenceGetNamedValue::Generate() {
6542 #ifdef DEBUG 6555 #ifdef DEBUG
6543 int expected_height = frame_state()->frame()->height(); 6556 int expected_height = frame_state()->frame()->height();
6544 #endif 6557 #endif
6545 VirtualFrame copied_frame(*frame_state()->frame()); 6558 VirtualFrame copied_frame(*frame_state()->frame());
6546 copied_frame.SpillAll(); 6559 copied_frame.SpillAll();
6547 6560
6548 Register scratch1 = VirtualFrame::scratch0(); 6561 Register scratch1 = VirtualFrame::scratch0();
6549 Register scratch2 = VirtualFrame::scratch1(); 6562 Register scratch2 = VirtualFrame::scratch1();
6550 ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2)); 6563 ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
6551 __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2); 6564 __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
6552 __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2); 6565 __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
6553 6566
6554 // Ensure receiver in r0 and name in r2 to match load ic calling convention. 6567 // Ensure receiver in r0 and name in r2 to match load ic calling convention.
6555 __ Move(r0, receiver_); 6568 __ Move(r0, receiver_);
6556 __ mov(r2, Operand(name_)); 6569 __ mov(r2, Operand(name_));
6557 6570
6558 // The rest of the instructions in the deferred code must be together. 6571 // The rest of the instructions in the deferred code must be together.
6559 { Assembler::BlockConstPoolScope block_const_pool(masm_); 6572 { Assembler::BlockConstPoolScope block_const_pool(masm_);
6560 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); 6573 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
6561 __ Call(ic, RelocInfo::CODE_TARGET); 6574 RelocInfo::Mode mode = is_contextual_
6562 // The call must be followed by a nop(1) instruction to indicate that the 6575 ? RelocInfo::CODE_TARGET_CONTEXT
6563 // in-object has been inlined. 6576 : RelocInfo::CODE_TARGET;
6564 __ nop(PROPERTY_ACCESS_INLINED); 6577 __ Call(ic, mode);
6578 // We must mark the code just after the call with the correct marker.
6579 __ MarkCode(is_contextual_
6580 ? Assembler::PROPERTY_ACCESS_INLINED_CONTEXT
6581 : Assembler::PROPERTY_ACCESS_INLINED);
6565 6582
6566 // At this point the answer is in r0. We move it to the expected register 6583 // At this point the answer is in r0. We move it to the expected register
6567 // if necessary. 6584 // if necessary.
6568 __ Move(receiver_, r0); 6585 __ Move(receiver_, r0);
6569 6586
6570 // Now go back to the frame that we entered with. This will not overwrite 6587 // Now go back to the frame that we entered with. This will not overwrite
6571 // the receiver register since that register was not in use when we came 6588 // the receiver register since that register was not in use when we came
6572 // in. The instructions emitted by this merge are skipped over by the 6589 // in. The instructions emitted by this merge are skipped over by the
6573 // inline load patching mechanism when looking for the branch instruction 6590 // inline load patching mechanism when looking for the branch instruction
6574 // that tells it where the code to patch is. 6591 // that tells it where the code to patch is.
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
6618 __ Swap(r0, r1, ip); 6635 __ Swap(r0, r1, ip);
6619 } 6636 }
6620 6637
6621 // The rest of the instructions in the deferred code must be together. 6638 // The rest of the instructions in the deferred code must be together.
6622 { Assembler::BlockConstPoolScope block_const_pool(masm_); 6639 { Assembler::BlockConstPoolScope block_const_pool(masm_);
6623 // Call keyed load IC. It has the arguments key and receiver in r0 and r1. 6640 // Call keyed load IC. It has the arguments key and receiver in r0 and r1.
6624 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); 6641 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
6625 __ Call(ic, RelocInfo::CODE_TARGET); 6642 __ Call(ic, RelocInfo::CODE_TARGET);
6626 // The call must be followed by a nop instruction to indicate that the 6643 // The call must be followed by a nop instruction to indicate that the
6627 // keyed load has been inlined. 6644 // keyed load has been inlined.
6628 __ nop(PROPERTY_ACCESS_INLINED); 6645 __ MarkCode(Assembler::PROPERTY_ACCESS_INLINED);
6629 6646
6630 // Now go back to the frame that we entered with. This will not overwrite 6647 // Now go back to the frame that we entered with. This will not overwrite
6631 // the receiver or key registers since they were not in use when we came 6648 // the receiver or key registers since they were not in use when we came
6632 // in. The instructions emitted by this merge are skipped over by the 6649 // in. The instructions emitted by this merge are skipped over by the
6633 // inline load patching mechanism when looking for the branch instruction 6650 // inline load patching mechanism when looking for the branch instruction
6634 // that tells it where the code to patch is. 6651 // that tells it where the code to patch is.
6635 copied_frame.MergeTo(frame_state()->frame()); 6652 copied_frame.MergeTo(frame_state()->frame());
6636 6653
6637 // Block the constant pool for one more instruction after leaving this 6654 // Block the constant pool for one more instruction after leaving this
6638 // constant pool block scope to include the branch instruction ending the 6655 // constant pool block scope to include the branch instruction ending the
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
6675 ASSERT(receiver_.is(r2)); 6692 ASSERT(receiver_.is(r2));
6676 6693
6677 // The rest of the instructions in the deferred code must be together. 6694 // The rest of the instructions in the deferred code must be together.
6678 { Assembler::BlockConstPoolScope block_const_pool(masm_); 6695 { Assembler::BlockConstPoolScope block_const_pool(masm_);
6679 // Call keyed store IC. It has the arguments value, key and receiver in r0, 6696 // Call keyed store IC. It has the arguments value, key and receiver in r0,
6680 // r1 and r2. 6697 // r1 and r2.
6681 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); 6698 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
6682 __ Call(ic, RelocInfo::CODE_TARGET); 6699 __ Call(ic, RelocInfo::CODE_TARGET);
6683 // The call must be followed by a nop instruction to indicate that the 6700 // The call must be followed by a nop instruction to indicate that the
6684 // keyed store has been inlined. 6701 // keyed store has been inlined.
6685 __ nop(PROPERTY_ACCESS_INLINED); 6702 __ MarkCode(Assembler::PROPERTY_ACCESS_INLINED);
6686 6703
6687 // Block the constant pool for one more instruction after leaving this 6704 // Block the constant pool for one more instruction after leaving this
6688 // constant pool block scope to include the branch instruction ending the 6705 // constant pool block scope to include the branch instruction ending the
6689 // deferred code. 6706 // deferred code.
6690 __ BlockConstPoolFor(1); 6707 __ BlockConstPoolFor(1);
6691 } 6708 }
6692 } 6709 }
6693 6710
6694 6711
6695 class DeferredReferenceSetNamedValue: public DeferredCode { 6712 class DeferredReferenceSetNamedValue: public DeferredCode {
(...skipping 27 matching lines...) Expand all
6723 __ mov(r2, Operand(name_)); 6740 __ mov(r2, Operand(name_));
6724 6741
6725 // The rest of the instructions in the deferred code must be together. 6742 // The rest of the instructions in the deferred code must be together.
6726 { Assembler::BlockConstPoolScope block_const_pool(masm_); 6743 { Assembler::BlockConstPoolScope block_const_pool(masm_);
6727 // Call keyed store IC. It has the arguments value, key and receiver in r0, 6744 // Call keyed store IC. It has the arguments value, key and receiver in r0,
6728 // r1 and r2. 6745 // r1 and r2.
6729 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); 6746 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
6730 __ Call(ic, RelocInfo::CODE_TARGET); 6747 __ Call(ic, RelocInfo::CODE_TARGET);
6731 // The call must be followed by a nop instruction to indicate that the 6748 // The call must be followed by a nop instruction to indicate that the
6732 // named store has been inlined. 6749 // named store has been inlined.
6733 __ nop(PROPERTY_ACCESS_INLINED); 6750 __ MarkCode(Assembler::PROPERTY_ACCESS_INLINED);
6734 6751
6735 // Go back to the frame we entered with. The instructions 6752 // Go back to the frame we entered with. The instructions
6736 // generated by this merge are skipped over by the inline store 6753 // generated by this merge are skipped over by the inline store
6737 // patching mechanism when looking for the branch instruction that 6754 // patching mechanism when looking for the branch instruction that
6738 // tells it where the code to patch is. 6755 // tells it where the code to patch is.
6739 copied_frame.MergeTo(frame_state()->frame()); 6756 copied_frame.MergeTo(frame_state()->frame());
6740 6757
6741 // Block the constant pool for one more instruction after leaving this 6758 // Block the constant pool for one more instruction after leaving this
6742 // constant pool block scope to include the branch instruction ending the 6759 // constant pool block scope to include the branch instruction ending the
6743 // deferred code. 6760 // deferred code.
6744 __ BlockConstPoolFor(1); 6761 __ BlockConstPoolFor(1);
6745 } 6762 }
6746 } 6763 }
6747 6764
6748 6765
6749 // Consumes the top of stack (the receiver) and pushes the result instead. 6766 // Consumes the top of stack (the receiver) and pushes the result instead.
6750 void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) { 6767 void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
6751 if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) { 6768 bool contextual_load_in_builtin =
6769 is_contextual &&
6770 (Bootstrapper::IsActive() ||
6771 (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
6772
6773 if (scope()->is_global_scope() ||
6774 loop_nesting() == 0 ||
6775 contextual_load_in_builtin) {
6752 Comment cmnt(masm(), "[ Load from named Property"); 6776 Comment cmnt(masm(), "[ Load from named Property");
6753 // Setup the name register and call load IC. 6777 // Setup the name register and call load IC.
6754 frame_->CallLoadIC(name, 6778 frame_->CallLoadIC(name,
6755 is_contextual 6779 is_contextual
6756 ? RelocInfo::CODE_TARGET_CONTEXT 6780 ? RelocInfo::CODE_TARGET_CONTEXT
6757 : RelocInfo::CODE_TARGET); 6781 : RelocInfo::CODE_TARGET);
6758 frame_->EmitPush(r0); // Push answer. 6782 frame_->EmitPush(r0); // Push answer.
6759 } else { 6783 } else {
6760 // Inline the in-object property case. 6784 // Inline the in-object property case.
6761 Comment cmnt(masm(), "[ Inlined named property load"); 6785 Comment cmnt(masm(), is_contextual
6786 ? "[ Inlined contextual property load"
6787 : "[ Inlined named property load");
6762 6788
6763 // Counter will be decremented in the deferred code. Placed here to avoid 6789 // Counter will be decremented in the deferred code. Placed here to avoid
6764 // having it in the instruction stream below where patching will occur. 6790 // having it in the instruction stream below where patching will occur.
6765 __ IncrementCounter(&Counters::named_load_inline, 1, 6791 if (is_contextual) {
6766 frame_->scratch0(), frame_->scratch1()); 6792 __ IncrementCounter(&Counters::named_load_global_inline, 1,
6793 frame_->scratch0(), frame_->scratch1());
6794 } else {
6795 __ IncrementCounter(&Counters::named_load_inline, 1,
6796 frame_->scratch0(), frame_->scratch1());
6797 }
6767 6798
6768 // The following instructions are the inlined load of an in-object property. 6799 // The following instructions are the inlined load of an in-object property.
6769 // Parts of this code is patched, so the exact instructions generated needs 6800 // Parts of this code is patched, so the exact instructions generated needs
6770 // to be fixed. Therefore the instruction pool is blocked when generating 6801 // to be fixed. Therefore the instruction pool is blocked when generating
6771 // this code 6802 // this code
6772 6803
6773 // Load the receiver from the stack. 6804 // Load the receiver from the stack.
6774 Register receiver = frame_->PopToRegister(); 6805 Register receiver = frame_->PopToRegister();
6775 6806
6776 DeferredReferenceGetNamedValue* deferred = 6807 DeferredReferenceGetNamedValue* deferred =
6777 new DeferredReferenceGetNamedValue(receiver, name); 6808 new DeferredReferenceGetNamedValue(receiver, name, is_contextual);
6809
6810 bool is_dont_delete = false;
6811 if (is_contextual) {
6812 if (!info_->closure().is_null()) {
6813 // When doing lazy compilation we can check if the global cell
6814 // already exists and use its "don't delete" status as a hint.
6815 AssertNoAllocation no_gc;
6816 v8::internal::GlobalObject* global_object =
6817 info_->closure()->context()->global();
6818 LookupResult lookup;
6819 global_object->LocalLookupRealNamedProperty(*name, &lookup);
6820 if (lookup.IsProperty() && lookup.type() == NORMAL) {
6821 ASSERT(lookup.holder() == global_object);
6822 ASSERT(global_object->property_dictionary()->ValueAt(
6823 lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell());
6824 is_dont_delete = lookup.IsDontDelete();
6825 }
6826 }
6827 if (is_dont_delete) {
6828 __ IncrementCounter(&Counters::dont_delete_hint_hit, 1,
6829 frame_->scratch0(), frame_->scratch1());
6830 }
6831 }
6832
6833 { Assembler::BlockConstPoolScope block_const_pool(masm_);
6834 if (!is_contextual) {
6835 // Check that the receiver is a heap object.
6836 __ tst(receiver, Operand(kSmiTagMask));
6837 deferred->Branch(eq);
6838 }
6839
6840 // This code checks for the_hole_value when we DontDelete cell.
Søren Thygesen Gjesse 2010/11/19 09:08:47 Some words are missing in "when we DontDelete cell
Alexandre 2010/11/23 11:23:21 Changed the comment. On 2010/11/19 09:08:47, Søren
6841 // Below we rely on the number of instructions generated, and we can't
6842 // cope with the Check macro which does not generate a fixed number of
6843 // instructions.
6844 Label skip, check_the_hole, cont;
6845 if (is_contextual && is_dont_delete && FLAG_debug_code) {
Søren Thygesen Gjesse 2010/11/19 09:08:47 Please move the FLAG_debug_code to the beginning o
Alexandre 2010/11/23 11:23:21 Done.
6846 __ b(&skip);
6847 __ bind(&check_the_hole);
6848 __ Check(ne, "DontDelete cells can't contain the hole");
6849 __ b(&cont);
6850 __ bind(&skip);
6851 }
6778 6852
6779 #ifdef DEBUG 6853 #ifdef DEBUG
6780 int kInlinedNamedLoadInstructions = 7; 6854 int InlinedNamedLoadInstructions = 5;
6781 Label check_inlined_codesize; 6855 Label check_inlined_codesize;
6782 masm_->bind(&check_inlined_codesize); 6856 masm_->bind(&check_inlined_codesize);
6783 #endif 6857 #endif
6784 6858
6785 { Assembler::BlockConstPoolScope block_const_pool(masm_);
6786 // Check that the receiver is a heap object.
6787 __ tst(receiver, Operand(kSmiTagMask));
6788 deferred->Branch(eq);
6789
6790 Register scratch = VirtualFrame::scratch0(); 6859 Register scratch = VirtualFrame::scratch0();
6791 Register scratch2 = VirtualFrame::scratch1(); 6860 Register scratch2 = VirtualFrame::scratch1();
6792 6861
6793 // Check the map. The null map used below is patched by the inline cache 6862 // Check the map. The null map used below is patched by the inline cache
6794 // code. Therefore we can't use a LoadRoot call. 6863 // code. Therefore we can't use a LoadRoot call.
6795 __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset)); 6864 __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
6796 __ mov(scratch2, Operand(Factory::null_value())); 6865 __ mov(scratch2, Operand(Factory::null_value()));
6797 __ cmp(scratch, scratch2); 6866 __ cmp(scratch, scratch2);
6798 deferred->Branch(ne); 6867 deferred->Branch(ne);
6799 6868
6800 // Initially use an invalid index. The index will be patched by the 6869 if (is_contextual) {
6801 // inline cache code. 6870 #ifdef DEBUG
6802 __ ldr(receiver, MemOperand(receiver, 0)); 6871 InlinedNamedLoadInstructions += 1;
6872 #endif
6873 // Load the (initially invalid) cell and get its value.
6874 masm()->mov(receiver, Operand(Factory::null_value()));
6875 __ ldr(receiver,
6876 FieldMemOperand(receiver, JSGlobalPropertyCell::kValueOffset));
6877
6878 deferred->set_is_dont_delete(is_dont_delete);
6879
6880 if (!is_dont_delete) {
6881 #ifdef DEBUG
6882 InlinedNamedLoadInstructions += 3;
6883 #endif
6884 __ cmp(receiver, Operand(Factory::the_hole_value()));
6885 deferred->Branch(eq);
6886 } else if (FLAG_debug_code) {
6887 #ifdef DEBUG
6888 InlinedNamedLoadInstructions += 3;
6889 #endif
6890 __ cmp(receiver, Operand(Factory::the_hole_value()));
6891 __ b(&check_the_hole, eq);
6892 __ bind(&cont);
6893 }
6894 } else {
6895 // Initially use an invalid index. The index will be patched by the
6896 // inline cache code.
6897 __ ldr(receiver, MemOperand(receiver, 0));
6898 }
6803 6899
6804 // Make sure that the expected number of instructions are generated. 6900 // Make sure that the expected number of instructions are generated.
6805 ASSERT_EQ(kInlinedNamedLoadInstructions, 6901 // If the code before is updated, the offsets in ic-arm.cc
6902 // LoadIC::PatchInlinedContextualLoad and PatchInlinedLoad need
6903 // to be updated.
6904 ASSERT_EQ(InlinedNamedLoadInstructions,
6806 masm_->InstructionsGeneratedSince(&check_inlined_codesize)); 6905 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
6807 } 6906 }
6808 6907
6809 deferred->BindExit(); 6908 deferred->BindExit();
6810 // At this point the receiver register has the result, either from the 6909 // At this point the receiver register has the result, either from the
6811 // deferred code or from the inlined code. 6910 // deferred code or from the inlined code.
6812 frame_->EmitPush(receiver); 6911 frame_->EmitPush(receiver);
6813 } 6912 }
6814 } 6913 }
6815 6914
(...skipping 449 matching lines...) Expand 10 before | Expand all | Expand 10 after
7265 BinaryOpIC::GetName(runtime_operands_type_)); 7364 BinaryOpIC::GetName(runtime_operands_type_));
7266 return name_; 7365 return name_;
7267 } 7366 }
7268 7367
7269 7368
7270 #undef __ 7369 #undef __
7271 7370
7272 } } // namespace v8::internal 7371 } } // namespace v8::internal
7273 7372
7274 #endif // V8_TARGET_ARCH_ARM 7373 #endif // V8_TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698