| OLD | NEW |
| 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 23 matching lines...) Expand all Loading... |
| 34 #include "register-allocator-inl.h" | 34 #include "register-allocator-inl.h" |
| 35 #include "runtime.h" | 35 #include "runtime.h" |
| 36 #include "scopes.h" | 36 #include "scopes.h" |
| 37 | 37 |
| 38 | 38 |
| 39 namespace v8 { | 39 namespace v8 { |
| 40 namespace internal { | 40 namespace internal { |
| 41 | 41 |
| 42 #define __ ACCESS_MASM(masm_) | 42 #define __ ACCESS_MASM(masm_) |
| 43 | 43 |
| 44 // ------------------------------------------------------------------------- |
| 45 // Platform-specific DeferredCode functions. |
| 46 |
| 47 void DeferredCode::SaveRegisters() { |
| 48 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { |
| 49 int action = registers_[i]; |
| 50 if (action == kPush) { |
| 51 __ push(RegisterAllocator::ToRegister(i)); |
| 52 } else if (action != kIgnore && (action & kSyncedFlag) == 0) { |
| 53 __ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action)); |
| 54 } |
| 55 } |
| 56 } |
| 57 |
| 58 |
| 59 void DeferredCode::RestoreRegisters() { |
| 60 // Restore registers in reverse order due to the stack. |
| 61 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) { |
| 62 int action = registers_[i]; |
| 63 if (action == kPush) { |
| 64 __ pop(RegisterAllocator::ToRegister(i)); |
| 65 } else if (action != kIgnore) { |
| 66 action &= ~kSyncedFlag; |
| 67 __ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action)); |
| 68 } |
| 69 } |
| 70 } |
| 71 |
| 44 | 72 |
| 45 // ------------------------------------------------------------------------- | 73 // ------------------------------------------------------------------------- |
| 46 // CodeGenState implementation. | 74 // CodeGenState implementation. |
| 47 | 75 |
| 48 CodeGenState::CodeGenState(CodeGenerator* owner) | 76 CodeGenState::CodeGenState(CodeGenerator* owner) |
| 49 : owner_(owner), | 77 : owner_(owner), |
| 50 typeof_state_(NOT_INSIDE_TYPEOF), | 78 typeof_state_(NOT_INSIDE_TYPEOF), |
| 51 true_target_(NULL), | 79 true_target_(NULL), |
| 52 false_target_(NULL), | 80 false_target_(NULL), |
| 53 previous_(NULL) { | 81 previous_(NULL) { |
| (...skipping 715 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 769 virtual void Generate(); | 797 virtual void Generate(); |
| 770 | 798 |
| 771 private: | 799 private: |
| 772 Token::Value op_; | 800 Token::Value op_; |
| 773 int value_; | 801 int value_; |
| 774 bool reversed_; | 802 bool reversed_; |
| 775 OverwriteMode overwrite_mode_; | 803 OverwriteMode overwrite_mode_; |
| 776 }; | 804 }; |
| 777 | 805 |
| 778 | 806 |
| 779 #undef __ | |
| 780 #define __ ACCESS_MASM(masm) | |
| 781 | |
| 782 | |
| 783 void DeferredInlineSmiOperation::Generate() { | 807 void DeferredInlineSmiOperation::Generate() { |
| 784 MacroAssembler* masm = cgen()->masm(); | |
| 785 enter()->Bind(); | |
| 786 VirtualFrame::SpilledScope spilled_scope; | |
| 787 | |
| 788 switch (op_) { | 808 switch (op_) { |
| 789 case Token::ADD: { | 809 case Token::ADD: { |
| 810 // Revert optimistic add. |
| 790 if (reversed_) { | 811 if (reversed_) { |
| 791 // revert optimistic add | |
| 792 __ sub(r0, r0, Operand(Smi::FromInt(value_))); | 812 __ sub(r0, r0, Operand(Smi::FromInt(value_))); |
| 793 __ mov(r1, Operand(Smi::FromInt(value_))); | 813 __ mov(r1, Operand(Smi::FromInt(value_))); |
| 794 } else { | 814 } else { |
| 795 // revert optimistic add | |
| 796 __ sub(r1, r0, Operand(Smi::FromInt(value_))); | 815 __ sub(r1, r0, Operand(Smi::FromInt(value_))); |
| 797 __ mov(r0, Operand(Smi::FromInt(value_))); | 816 __ mov(r0, Operand(Smi::FromInt(value_))); |
| 798 } | 817 } |
| 799 break; | 818 break; |
| 800 } | 819 } |
| 801 | 820 |
| 802 case Token::SUB: { | 821 case Token::SUB: { |
| 822 // Revert optimistic sub. |
| 803 if (reversed_) { | 823 if (reversed_) { |
| 804 // revert optimistic sub | |
| 805 __ rsb(r0, r0, Operand(Smi::FromInt(value_))); | 824 __ rsb(r0, r0, Operand(Smi::FromInt(value_))); |
| 806 __ mov(r1, Operand(Smi::FromInt(value_))); | 825 __ mov(r1, Operand(Smi::FromInt(value_))); |
| 807 } else { | 826 } else { |
| 808 __ add(r1, r0, Operand(Smi::FromInt(value_))); | 827 __ add(r1, r0, Operand(Smi::FromInt(value_))); |
| 809 __ mov(r0, Operand(Smi::FromInt(value_))); | 828 __ mov(r0, Operand(Smi::FromInt(value_))); |
| 810 } | 829 } |
| 811 break; | 830 break; |
| 812 } | 831 } |
| 813 | 832 |
| 814 case Token::BIT_OR: | 833 case Token::BIT_OR: |
| 815 case Token::BIT_XOR: | 834 case Token::BIT_XOR: |
| 816 case Token::BIT_AND: { | 835 case Token::BIT_AND: { |
| 817 if (reversed_) { | 836 if (reversed_) { |
| 818 __ mov(r1, Operand(Smi::FromInt(value_))); | 837 __ mov(r1, Operand(Smi::FromInt(value_))); |
| 819 } else { | 838 } else { |
| 820 __ mov(r1, Operand(r0)); | 839 __ mov(r1, Operand(r0)); |
| 821 __ mov(r0, Operand(Smi::FromInt(value_))); | 840 __ mov(r0, Operand(Smi::FromInt(value_))); |
| 822 } | 841 } |
| 823 break; | 842 break; |
| 824 } | 843 } |
| 825 | 844 |
| 826 case Token::SHL: | 845 case Token::SHL: |
| 827 case Token::SHR: | 846 case Token::SHR: |
| 828 case Token::SAR: { | 847 case Token::SAR: { |
| 829 if (!reversed_) { | 848 if (!reversed_) { |
| 830 __ mov(r1, Operand(r0)); | 849 __ mov(r1, Operand(r0)); |
| 831 __ mov(r0, Operand(Smi::FromInt(value_))); | 850 __ mov(r0, Operand(Smi::FromInt(value_))); |
| 832 } else { | 851 } else { |
| 833 UNREACHABLE(); // should have been handled in SmiOperation | 852 UNREACHABLE(); // Should have been handled in SmiOperation. |
| 834 } | 853 } |
| 835 break; | 854 break; |
| 836 } | 855 } |
| 837 | 856 |
| 838 default: | 857 default: |
| 839 // other cases should have been handled before this point. | 858 // Other cases should have been handled before this point. |
| 840 UNREACHABLE(); | 859 UNREACHABLE(); |
| 841 break; | 860 break; |
| 842 } | 861 } |
| 843 | 862 |
| 844 GenericBinaryOpStub igostub(op_, overwrite_mode_); | 863 GenericBinaryOpStub stub(op_, overwrite_mode_); |
| 845 Result arg0 = cgen()->allocator()->Allocate(r1); | 864 __ CallStub(&stub); |
| 846 ASSERT(arg0.is_valid()); | |
| 847 Result arg1 = cgen()->allocator()->Allocate(r0); | |
| 848 ASSERT(arg1.is_valid()); | |
| 849 cgen()->frame()->CallStub(&igostub, &arg0, &arg1); | |
| 850 exit_.Jump(); | |
| 851 } | 865 } |
| 852 | 866 |
| 853 | 867 |
| 854 #undef __ | |
| 855 #define __ ACCESS_MASM(masm_) | |
| 856 | |
| 857 | |
| 858 void CodeGenerator::SmiOperation(Token::Value op, | 868 void CodeGenerator::SmiOperation(Token::Value op, |
| 859 Handle<Object> value, | 869 Handle<Object> value, |
| 860 bool reversed, | 870 bool reversed, |
| 861 OverwriteMode mode) { | 871 OverwriteMode mode) { |
| 862 VirtualFrame::SpilledScope spilled_scope; | 872 VirtualFrame::SpilledScope spilled_scope; |
| 863 // NOTE: This is an attempt to inline (a bit) more of the code for | 873 // NOTE: This is an attempt to inline (a bit) more of the code for |
| 864 // some possible smi operations (like + and -) when (at least) one | 874 // some possible smi operations (like + and -) when (at least) one |
| 865 // of the operands is a literal smi. With this optimization, the | 875 // of the operands is a literal smi. With this optimization, the |
| 866 // performance of the system is increased by ~15%, and the generated | 876 // performance of the system is increased by ~15%, and the generated |
| 867 // code size is increased by ~1% (measured on a combination of | 877 // code size is increased by ~1% (measured on a combination of |
| 868 // different benchmarks). | 878 // different benchmarks). |
| 869 | 879 |
| 870 // sp[0] : operand | 880 // sp[0] : operand |
| 871 | 881 |
| 872 int int_value = Smi::cast(*value)->value(); | 882 int int_value = Smi::cast(*value)->value(); |
| 873 | 883 |
| 874 JumpTarget exit; | 884 JumpTarget exit; |
| 875 frame_->EmitPop(r0); | 885 frame_->EmitPop(r0); |
| 876 | 886 |
| 877 switch (op) { | 887 switch (op) { |
| 878 case Token::ADD: { | 888 case Token::ADD: { |
| 879 DeferredCode* deferred = | 889 DeferredCode* deferred = |
| 880 new DeferredInlineSmiOperation(op, int_value, reversed, mode); | 890 new DeferredInlineSmiOperation(op, int_value, reversed, mode); |
| 881 | 891 |
| 882 __ add(r0, r0, Operand(value), SetCC); | 892 __ add(r0, r0, Operand(value), SetCC); |
| 883 deferred->enter()->Branch(vs); | 893 deferred->Branch(vs); |
| 884 __ tst(r0, Operand(kSmiTagMask)); | 894 __ tst(r0, Operand(kSmiTagMask)); |
| 885 deferred->enter()->Branch(ne); | 895 deferred->Branch(ne); |
| 886 deferred->BindExit(); | 896 deferred->BindExit(); |
| 887 break; | 897 break; |
| 888 } | 898 } |
| 889 | 899 |
| 890 case Token::SUB: { | 900 case Token::SUB: { |
| 891 DeferredCode* deferred = | 901 DeferredCode* deferred = |
| 892 new DeferredInlineSmiOperation(op, int_value, reversed, mode); | 902 new DeferredInlineSmiOperation(op, int_value, reversed, mode); |
| 893 | 903 |
| 894 if (!reversed) { | 904 if (reversed) { |
| 905 __ rsb(r0, r0, Operand(value), SetCC); |
| 906 } else { |
| 895 __ sub(r0, r0, Operand(value), SetCC); | 907 __ sub(r0, r0, Operand(value), SetCC); |
| 896 } else { | |
| 897 __ rsb(r0, r0, Operand(value), SetCC); | |
| 898 } | 908 } |
| 899 deferred->enter()->Branch(vs); | 909 deferred->Branch(vs); |
| 900 __ tst(r0, Operand(kSmiTagMask)); | 910 __ tst(r0, Operand(kSmiTagMask)); |
| 901 deferred->enter()->Branch(ne); | 911 deferred->Branch(ne); |
| 902 deferred->BindExit(); | 912 deferred->BindExit(); |
| 903 break; | 913 break; |
| 904 } | 914 } |
| 905 | 915 |
| 906 case Token::BIT_OR: | 916 case Token::BIT_OR: |
| 907 case Token::BIT_XOR: | 917 case Token::BIT_XOR: |
| 908 case Token::BIT_AND: { | 918 case Token::BIT_AND: { |
| 909 DeferredCode* deferred = | 919 DeferredCode* deferred = |
| 910 new DeferredInlineSmiOperation(op, int_value, reversed, mode); | 920 new DeferredInlineSmiOperation(op, int_value, reversed, mode); |
| 911 __ tst(r0, Operand(kSmiTagMask)); | 921 __ tst(r0, Operand(kSmiTagMask)); |
| 912 deferred->enter()->Branch(ne); | 922 deferred->Branch(ne); |
| 913 switch (op) { | 923 switch (op) { |
| 914 case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break; | 924 case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break; |
| 915 case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break; | 925 case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break; |
| 916 case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break; | 926 case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break; |
| 917 default: UNREACHABLE(); | 927 default: UNREACHABLE(); |
| 918 } | 928 } |
| 919 deferred->BindExit(); | 929 deferred->BindExit(); |
| 920 break; | 930 break; |
| 921 } | 931 } |
| 922 | 932 |
| 923 case Token::SHL: | 933 case Token::SHL: |
| 924 case Token::SHR: | 934 case Token::SHR: |
| 925 case Token::SAR: { | 935 case Token::SAR: { |
| 926 if (reversed) { | 936 if (reversed) { |
| 927 __ mov(ip, Operand(value)); | 937 __ mov(ip, Operand(value)); |
| 928 frame_->EmitPush(ip); | 938 frame_->EmitPush(ip); |
| 929 frame_->EmitPush(r0); | 939 frame_->EmitPush(r0); |
| 930 GenericBinaryOperation(op, mode); | 940 GenericBinaryOperation(op, mode); |
| 931 | 941 |
| 932 } else { | 942 } else { |
| 933 int shift_value = int_value & 0x1f; // least significant 5 bits | 943 int shift_value = int_value & 0x1f; // least significant 5 bits |
| 934 DeferredCode* deferred = | 944 DeferredCode* deferred = |
| 935 new DeferredInlineSmiOperation(op, shift_value, false, mode); | 945 new DeferredInlineSmiOperation(op, shift_value, false, mode); |
| 936 __ tst(r0, Operand(kSmiTagMask)); | 946 __ tst(r0, Operand(kSmiTagMask)); |
| 937 deferred->enter()->Branch(ne); | 947 deferred->Branch(ne); |
| 938 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags | 948 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags |
| 939 switch (op) { | 949 switch (op) { |
| 940 case Token::SHL: { | 950 case Token::SHL: { |
| 941 __ mov(r2, Operand(r2, LSL, shift_value)); | 951 __ mov(r2, Operand(r2, LSL, shift_value)); |
| 942 // check that the *unsigned* result fits in a smi | 952 // check that the *unsigned* result fits in a smi |
| 943 __ add(r3, r2, Operand(0x40000000), SetCC); | 953 __ add(r3, r2, Operand(0x40000000), SetCC); |
| 944 deferred->enter()->Branch(mi); | 954 deferred->Branch(mi); |
| 945 break; | 955 break; |
| 946 } | 956 } |
| 947 case Token::SHR: { | 957 case Token::SHR: { |
| 948 // LSR by immediate 0 means shifting 32 bits. | 958 // LSR by immediate 0 means shifting 32 bits. |
| 949 if (shift_value != 0) { | 959 if (shift_value != 0) { |
| 950 __ mov(r2, Operand(r2, LSR, shift_value)); | 960 __ mov(r2, Operand(r2, LSR, shift_value)); |
| 951 } | 961 } |
| 952 // check that the *unsigned* result fits in a smi | 962 // check that the *unsigned* result fits in a smi |
| 953 // neither of the two high-order bits can be set: | 963 // neither of the two high-order bits can be set: |
| 954 // - 0x80000000: high bit would be lost when smi tagging | 964 // - 0x80000000: high bit would be lost when smi tagging |
| 955 // - 0x40000000: this number would convert to negative when | 965 // - 0x40000000: this number would convert to negative when |
| 956 // smi tagging these two cases can only happen with shifts | 966 // smi tagging these two cases can only happen with shifts |
| 957 // by 0 or 1 when handed a valid smi | 967 // by 0 or 1 when handed a valid smi |
| 958 __ and_(r3, r2, Operand(0xc0000000), SetCC); | 968 __ and_(r3, r2, Operand(0xc0000000), SetCC); |
| 959 deferred->enter()->Branch(ne); | 969 deferred->Branch(ne); |
| 960 break; | 970 break; |
| 961 } | 971 } |
| 962 case Token::SAR: { | 972 case Token::SAR: { |
| 963 if (shift_value != 0) { | 973 if (shift_value != 0) { |
| 964 // ASR by immediate 0 means shifting 32 bits. | 974 // ASR by immediate 0 means shifting 32 bits. |
| 965 __ mov(r2, Operand(r2, ASR, shift_value)); | 975 __ mov(r2, Operand(r2, ASR, shift_value)); |
| 966 } | 976 } |
| 967 break; | 977 break; |
| 968 } | 978 } |
| 969 default: UNREACHABLE(); | 979 default: UNREACHABLE(); |
| (...skipping 1693 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2663 set_comment("[ DeferredObjectLiteral"); | 2673 set_comment("[ DeferredObjectLiteral"); |
| 2664 } | 2674 } |
| 2665 | 2675 |
| 2666 virtual void Generate(); | 2676 virtual void Generate(); |
| 2667 | 2677 |
| 2668 private: | 2678 private: |
| 2669 ObjectLiteral* node_; | 2679 ObjectLiteral* node_; |
| 2670 }; | 2680 }; |
| 2671 | 2681 |
| 2672 | 2682 |
| 2673 #undef __ | |
| 2674 #define __ ACCESS_MASM(masm) | |
| 2675 | |
| 2676 | |
| 2677 void DeferredObjectLiteral::Generate() { | 2683 void DeferredObjectLiteral::Generate() { |
| 2678 MacroAssembler* masm = cgen()->masm(); | |
| 2679 // Argument is passed in r1. | 2684 // Argument is passed in r1. |
| 2680 enter()->Bind(); | |
| 2681 VirtualFrame::SpilledScope spilled_scope; | |
| 2682 | 2685 |
| 2683 // If the entry is undefined we call the runtime system to compute | 2686 // If the entry is undefined we call the runtime system to compute |
| 2684 // the literal. | 2687 // the literal. |
| 2685 | |
| 2686 VirtualFrame* frame = cgen()->frame(); | |
| 2687 // Literal array (0). | 2688 // Literal array (0). |
| 2688 frame->EmitPush(r1); | 2689 __ push(r1); |
| 2689 // Literal index (1). | 2690 // Literal index (1). |
| 2690 __ mov(r0, Operand(Smi::FromInt(node_->literal_index()))); | 2691 __ mov(r0, Operand(Smi::FromInt(node_->literal_index()))); |
| 2691 frame->EmitPush(r0); | 2692 __ push(r0); |
| 2692 // Constant properties (2). | 2693 // Constant properties (2). |
| 2693 __ mov(r0, Operand(node_->constant_properties())); | 2694 __ mov(r0, Operand(node_->constant_properties())); |
| 2694 frame->EmitPush(r0); | 2695 __ push(r0); |
| 2695 Result boilerplate = | 2696 __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3); |
| 2696 frame->CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3); | 2697 __ mov(r2, Operand(r0)); |
| 2697 __ mov(r2, Operand(boilerplate.reg())); | |
| 2698 // Result is returned in r2. | 2698 // Result is returned in r2. |
| 2699 exit_.Jump(); | |
| 2700 } | 2699 } |
| 2701 | 2700 |
| 2702 | 2701 |
| 2703 #undef __ | |
| 2704 #define __ ACCESS_MASM(masm_) | |
| 2705 | |
| 2706 | |
| 2707 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { | 2702 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { |
| 2708 #ifdef DEBUG | 2703 #ifdef DEBUG |
| 2709 int original_height = frame_->height(); | 2704 int original_height = frame_->height(); |
| 2710 #endif | 2705 #endif |
| 2711 VirtualFrame::SpilledScope spilled_scope; | 2706 VirtualFrame::SpilledScope spilled_scope; |
| 2712 Comment cmnt(masm_, "[ ObjectLiteral"); | 2707 Comment cmnt(masm_, "[ ObjectLiteral"); |
| 2713 | 2708 |
| 2714 DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node); | 2709 DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node); |
| 2715 | 2710 |
| 2716 // Retrieve the literal array and check the allocated entry. | 2711 // Retrieve the literal array and check the allocated entry. |
| 2717 | 2712 |
| 2718 // Load the function of this activation. | 2713 // Load the function of this activation. |
| 2719 __ ldr(r1, frame_->Function()); | 2714 __ ldr(r1, frame_->Function()); |
| 2720 | 2715 |
| 2721 // Load the literals array of the function. | 2716 // Load the literals array of the function. |
| 2722 __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset)); | 2717 __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset)); |
| 2723 | 2718 |
| 2724 // Load the literal at the ast saved index. | 2719 // Load the literal at the ast saved index. |
| 2725 int literal_offset = | 2720 int literal_offset = |
| 2726 FixedArray::kHeaderSize + node->literal_index() * kPointerSize; | 2721 FixedArray::kHeaderSize + node->literal_index() * kPointerSize; |
| 2727 __ ldr(r2, FieldMemOperand(r1, literal_offset)); | 2722 __ ldr(r2, FieldMemOperand(r1, literal_offset)); |
| 2728 | 2723 |
| 2729 // Check whether we need to materialize the object literal boilerplate. | 2724 // Check whether we need to materialize the object literal boilerplate. |
| 2730 // If so, jump to the deferred code. | 2725 // If so, jump to the deferred code. |
| 2731 __ cmp(r2, Operand(Factory::undefined_value())); | 2726 __ cmp(r2, Operand(Factory::undefined_value())); |
| 2732 deferred->enter()->Branch(eq); | 2727 deferred->Branch(eq); |
| 2733 deferred->BindExit(); | 2728 deferred->BindExit(); |
| 2734 | 2729 |
| 2735 // Push the object literal boilerplate. | 2730 // Push the object literal boilerplate. |
| 2736 frame_->EmitPush(r2); | 2731 frame_->EmitPush(r2); |
| 2737 | 2732 |
| 2738 // Clone the boilerplate object. | 2733 // Clone the boilerplate object. |
| 2739 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate; | 2734 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate; |
| 2740 if (node->depth() == 1) { | 2735 if (node->depth() == 1) { |
| 2741 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate; | 2736 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate; |
| 2742 } | 2737 } |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2800 set_comment("[ DeferredArrayLiteral"); | 2795 set_comment("[ DeferredArrayLiteral"); |
| 2801 } | 2796 } |
| 2802 | 2797 |
| 2803 virtual void Generate(); | 2798 virtual void Generate(); |
| 2804 | 2799 |
| 2805 private: | 2800 private: |
| 2806 ArrayLiteral* node_; | 2801 ArrayLiteral* node_; |
| 2807 }; | 2802 }; |
| 2808 | 2803 |
| 2809 | 2804 |
| 2810 #undef __ | |
| 2811 #define __ ACCESS_MASM(masm) | |
| 2812 | |
| 2813 | |
| 2814 void DeferredArrayLiteral::Generate() { | 2805 void DeferredArrayLiteral::Generate() { |
| 2815 MacroAssembler* masm = cgen()->masm(); | |
| 2816 // Argument is passed in r1. | 2806 // Argument is passed in r1. |
| 2817 enter()->Bind(); | |
| 2818 VirtualFrame::SpilledScope spilled_scope; | |
| 2819 | 2807 |
| 2820 // If the entry is undefined we call the runtime system to computed | 2808 // If the entry is undefined we call the runtime system to computed |
| 2821 // the literal. | 2809 // the literal. |
| 2822 | |
| 2823 VirtualFrame* frame = cgen()->frame(); | |
| 2824 // Literal array (0). | 2810 // Literal array (0). |
| 2825 frame->EmitPush(r1); | 2811 __ push(r1); |
| 2826 // Literal index (1). | 2812 // Literal index (1). |
| 2827 __ mov(r0, Operand(Smi::FromInt(node_->literal_index()))); | 2813 __ mov(r0, Operand(Smi::FromInt(node_->literal_index()))); |
| 2828 frame->EmitPush(r0); | 2814 __ push(r0); |
| 2829 // Constant properties (2). | 2815 // Constant properties (2). |
| 2830 __ mov(r0, Operand(node_->literals())); | 2816 __ mov(r0, Operand(node_->literals())); |
| 2831 frame->EmitPush(r0); | 2817 __ push(r0); |
| 2832 Result boilerplate = | 2818 __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3); |
| 2833 frame->CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3); | 2819 __ mov(r2, Operand(r0)); |
| 2834 __ mov(r2, Operand(boilerplate.reg())); | |
| 2835 // Result is returned in r2. | 2820 // Result is returned in r2. |
| 2836 exit_.Jump(); | |
| 2837 } | 2821 } |
| 2838 | 2822 |
| 2839 | 2823 |
| 2840 #undef __ | |
| 2841 #define __ ACCESS_MASM(masm_) | |
| 2842 | |
| 2843 | |
| 2844 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { | 2824 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { |
| 2845 #ifdef DEBUG | 2825 #ifdef DEBUG |
| 2846 int original_height = frame_->height(); | 2826 int original_height = frame_->height(); |
| 2847 #endif | 2827 #endif |
| 2848 VirtualFrame::SpilledScope spilled_scope; | 2828 VirtualFrame::SpilledScope spilled_scope; |
| 2849 Comment cmnt(masm_, "[ ArrayLiteral"); | 2829 Comment cmnt(masm_, "[ ArrayLiteral"); |
| 2850 | 2830 |
| 2851 DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node); | 2831 DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node); |
| 2852 | 2832 |
| 2853 // Retrieve the literal array and check the allocated entry. | 2833 // Retrieve the literal array and check the allocated entry. |
| 2854 | 2834 |
| 2855 // Load the function of this activation. | 2835 // Load the function of this activation. |
| 2856 __ ldr(r1, frame_->Function()); | 2836 __ ldr(r1, frame_->Function()); |
| 2857 | 2837 |
| 2858 // Load the literals array of the function. | 2838 // Load the literals array of the function. |
| 2859 __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset)); | 2839 __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset)); |
| 2860 | 2840 |
| 2861 // Load the literal at the ast saved index. | 2841 // Load the literal at the ast saved index. |
| 2862 int literal_offset = | 2842 int literal_offset = |
| 2863 FixedArray::kHeaderSize + node->literal_index() * kPointerSize; | 2843 FixedArray::kHeaderSize + node->literal_index() * kPointerSize; |
| 2864 __ ldr(r2, FieldMemOperand(r1, literal_offset)); | 2844 __ ldr(r2, FieldMemOperand(r1, literal_offset)); |
| 2865 | 2845 |
| 2866 // Check whether we need to materialize the object literal boilerplate. | 2846 // Check whether we need to materialize the object literal boilerplate. |
| 2867 // If so, jump to the deferred code. | 2847 // If so, jump to the deferred code. |
| 2868 __ cmp(r2, Operand(Factory::undefined_value())); | 2848 __ cmp(r2, Operand(Factory::undefined_value())); |
| 2869 deferred->enter()->Branch(eq); | 2849 deferred->Branch(eq); |
| 2870 deferred->BindExit(); | 2850 deferred->BindExit(); |
| 2871 | 2851 |
| 2872 // Push the object literal boilerplate. | 2852 // Push the object literal boilerplate. |
| 2873 frame_->EmitPush(r2); | 2853 frame_->EmitPush(r2); |
| 2874 | 2854 |
| 2875 // Clone the boilerplate object. | 2855 // Clone the boilerplate object. |
| 2876 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate; | 2856 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate; |
| 2877 if (node->depth() == 1) { | 2857 if (node->depth() == 1) { |
| 2878 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate; | 2858 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate; |
| 2879 } | 2859 } |
| (...skipping 2330 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5210 __ mov(r2, Operand(0)); | 5190 __ mov(r2, Operand(0)); |
| 5211 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); | 5191 __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); |
| 5212 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), | 5192 __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), |
| 5213 RelocInfo::CODE_TARGET); | 5193 RelocInfo::CODE_TARGET); |
| 5214 } | 5194 } |
| 5215 | 5195 |
| 5216 | 5196 |
| 5217 #undef __ | 5197 #undef __ |
| 5218 | 5198 |
| 5219 } } // namespace v8::internal | 5199 } } // namespace v8::internal |
| OLD | NEW |