Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 668 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 679 Register scratch); | 679 Register scratch); |
| 680 // Allocate a heap number in new space with undefined value. | 680 // Allocate a heap number in new space with undefined value. |
| 681 // Returns tagged pointer in eax, or jumps to need_gc if new space is full. | 681 // Returns tagged pointer in eax, or jumps to need_gc if new space is full. |
| 682 static void AllocateHeapNumber(MacroAssembler* masm, | 682 static void AllocateHeapNumber(MacroAssembler* masm, |
| 683 Label* need_gc, | 683 Label* need_gc, |
| 684 Register scratch1, | 684 Register scratch1, |
| 685 Register scratch2); | 685 Register scratch2); |
| 686 }; | 686 }; |
| 687 | 687 |
| 688 | 688 |
| 689 // Flag that indicates whether or not the code for dealing with smis | |
| 690 // is inlined or should be dealt with in the stub. | |
| 691 enum GenericBinaryFlags { | |
| 692 SMI_CODE_IN_STUB, | |
| 693 SMI_CODE_INLINED | |
| 694 }; | |
| 695 | |
| 696 | |
| 689 class GenericBinaryOpStub: public CodeStub { | 697 class GenericBinaryOpStub: public CodeStub { |
| 690 public: | 698 public: |
| 691 GenericBinaryOpStub(Token::Value op, OverwriteMode mode) | 699 GenericBinaryOpStub(Token::Value op, |
| 692 : op_(op), mode_(mode) { } | 700 OverwriteMode mode, |
| 701 GenericBinaryFlags flags) | |
| 702 : op_(op), mode_(mode), flags_(flags) { } | |
| 703 | |
| 704 void GenerateSmiCode(MacroAssembler* masm, Label* slow); | |
| 693 | 705 |
| 694 private: | 706 private: |
| 695 Token::Value op_; | 707 Token::Value op_; |
| 696 OverwriteMode mode_; | 708 OverwriteMode mode_; |
| 709 GenericBinaryFlags flags_; | |
| 697 | 710 |
| 698 const char* GetName(); | 711 const char* GetName(); |
| 699 | 712 |
| 700 #ifdef DEBUG | 713 #ifdef DEBUG |
| 701 void Print() { | 714 void Print() { |
| 702 PrintF("GenericBinaryOpStub (op %s), (mode %d)\n", | 715 PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n", |
| 703 Token::String(op_), | 716 Token::String(op_), |
| 704 static_cast<int>(mode_)); | 717 static_cast<int>(mode_), |
| 718 static_cast<int>(flags_)); | |
| 705 } | 719 } |
| 706 #endif | 720 #endif |
| 707 | 721 |
| 708 // Minor key encoding in 16 bits OOOOOOOOOOOOOOMM. | 722 // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM. |
| 709 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; | 723 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; |
| 710 class OpBits: public BitField<Token::Value, 2, 14> {}; | 724 class OpBits: public BitField<Token::Value, 2, 13> {}; |
| 725 class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {}; | |
| 711 | 726 |
| 712 Major MajorKey() { return GenericBinaryOp; } | 727 Major MajorKey() { return GenericBinaryOp; } |
| 713 int MinorKey() { | 728 int MinorKey() { |
| 714 // Encode the parameters in a unique 16 bit value. | 729 // Encode the parameters in a unique 16 bit value. |
| 715 return OpBits::encode(op_) | | 730 return OpBits::encode(op_) | |
| 716 ModeBits::encode(mode_); | 731 ModeBits::encode(mode_) | |
| 732 FlagBits::encode(flags_); | |
| 717 } | 733 } |
| 718 void Generate(MacroAssembler* masm); | 734 void Generate(MacroAssembler* masm); |
| 719 }; | 735 }; |
| 720 | 736 |
| 721 | 737 |
| 722 const char* GenericBinaryOpStub::GetName() { | 738 const char* GenericBinaryOpStub::GetName() { |
| 723 switch (op_) { | 739 switch (op_) { |
| 724 case Token::ADD: return "GenericBinaryOpStub_ADD"; | 740 case Token::ADD: return "GenericBinaryOpStub_ADD"; |
| 725 case Token::SUB: return "GenericBinaryOpStub_SUB"; | 741 case Token::SUB: return "GenericBinaryOpStub_SUB"; |
| 726 case Token::MUL: return "GenericBinaryOpStub_MUL"; | 742 case Token::MUL: return "GenericBinaryOpStub_MUL"; |
| 727 case Token::DIV: return "GenericBinaryOpStub_DIV"; | 743 case Token::DIV: return "GenericBinaryOpStub_DIV"; |
| 728 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; | 744 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; |
| 729 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; | 745 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; |
| 730 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; | 746 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; |
| 731 case Token::SAR: return "GenericBinaryOpStub_SAR"; | 747 case Token::SAR: return "GenericBinaryOpStub_SAR"; |
| 732 case Token::SHL: return "GenericBinaryOpStub_SHL"; | 748 case Token::SHL: return "GenericBinaryOpStub_SHL"; |
| 733 case Token::SHR: return "GenericBinaryOpStub_SHR"; | 749 case Token::SHR: return "GenericBinaryOpStub_SHR"; |
| 734 default: return "GenericBinaryOpStub"; | 750 default: return "GenericBinaryOpStub"; |
| 735 } | 751 } |
| 736 } | 752 } |
| 737 | 753 |
| 738 | 754 |
| 755 class DeferredInlineBinaryOperation: public DeferredCode { | |
| 756 public: | |
| 757 DeferredInlineBinaryOperation(CodeGenerator* generator, | |
| 758 Token::Value op, | |
| 759 OverwriteMode mode, | |
| 760 GenericBinaryFlags flags) | |
| 761 : DeferredCode(generator), stub_(op, mode, flags) { } | |
| 762 | |
| 763 void GenerateInlineCode() { | |
| 764 stub_.GenerateSmiCode(masm(), enter()); | |
| 765 } | |
| 766 | |
| 767 virtual void Generate() { | |
| 768 __ push(ebx); | |
| 769 __ CallStub(&stub_); | |
| 770 // We must preserve the eax value here, because it will be written | |
| 771 // to the top-of-stack element when getting back to the fast case | |
| 772 // code. See comment in GenericBinaryOperation where | |
| 773 // deferred->exit() is bound. | |
| 774 __ push(eax); | |
|
iposva
2008/10/21 16:59:22
This push here relies on the fact that the code ge
| |
| 775 } | |
| 776 | |
| 777 private: | |
| 778 GenericBinaryOpStub stub_; | |
| 779 }; | |
| 780 | |
| 781 | |
| 739 void CodeGenerator::GenericBinaryOperation(Token::Value op, | 782 void CodeGenerator::GenericBinaryOperation(Token::Value op, |
| 740 OverwriteMode overwrite_mode) { | 783 OverwriteMode overwrite_mode) { |
| 741 Comment cmnt(masm_, "[ BinaryOperation"); | 784 Comment cmnt(masm_, "[ BinaryOperation"); |
| 742 Comment cmnt_token(masm_, Token::String(op)); | 785 Comment cmnt_token(masm_, Token::String(op)); |
| 786 | |
| 787 if (op == Token::COMMA) { | |
| 788 // Simply discard left value. | |
| 789 frame_->Pop(eax); | |
| 790 frame_->Pop(); | |
| 791 frame_->Push(eax); | |
| 792 return; | |
| 793 } | |
| 794 | |
| 795 // For now, we keep the old behavior and only inline the smi code | |
| 796 // for the bitwise operations. | |
| 797 GenericBinaryFlags flags; | |
| 743 switch (op) { | 798 switch (op) { |
| 744 case Token::ADD: | |
| 745 case Token::SUB: | |
| 746 case Token::MUL: | |
| 747 case Token::DIV: | |
| 748 case Token::MOD: { | |
| 749 GenericBinaryOpStub stub(op, overwrite_mode); | |
| 750 __ CallStub(&stub); | |
| 751 frame_->Push(eax); | |
| 752 break; | |
| 753 } | |
| 754 case Token::BIT_OR: | 799 case Token::BIT_OR: |
| 755 case Token::BIT_AND: | 800 case Token::BIT_AND: |
| 756 case Token::BIT_XOR: { | 801 case Token::BIT_XOR: |
| 757 Label slow, exit; | |
| 758 frame_->Pop(eax); // get y | |
| 759 frame_->Pop(edx); // get x | |
| 760 __ mov(ecx, Operand(edx)); // Prepare smi check. | |
| 761 // tag check | |
| 762 __ or_(ecx, Operand(eax)); // ecx = x | y; | |
| 763 ASSERT(kSmiTag == 0); // adjust code below | |
| 764 __ test(ecx, Immediate(kSmiTagMask)); | |
| 765 __ j(not_zero, &slow, taken); | |
| 766 switch (op) { | |
| 767 case Token::BIT_OR: __ or_(eax, Operand(edx)); break; | |
| 768 case Token::BIT_AND: __ and_(eax, Operand(edx)); break; | |
| 769 case Token::BIT_XOR: __ xor_(eax, Operand(edx)); break; | |
| 770 default: UNREACHABLE(); | |
| 771 } | |
| 772 __ jmp(&exit); | |
| 773 __ bind(&slow); | |
| 774 frame_->Push(edx); // restore stack slots | |
| 775 frame_->Push(eax); | |
| 776 GenericBinaryOpStub stub(op, overwrite_mode); | |
| 777 __ CallStub(&stub); | |
| 778 __ bind(&exit); | |
| 779 frame_->Push(eax); // push the result to the stack | |
| 780 break; | |
| 781 } | |
| 782 case Token::SHL: | 802 case Token::SHL: |
| 783 case Token::SHR: | 803 case Token::SHR: |
| 784 case Token::SAR: { | 804 case Token::SAR: |
| 785 Label slow, exit; | 805 flags = SMI_CODE_INLINED; |
| 786 frame_->Pop(edx); // get y | |
| 787 frame_->Pop(eax); // get x | |
| 788 // tag check | |
| 789 __ mov(ecx, Operand(edx)); | |
| 790 __ or_(ecx, Operand(eax)); // ecx = x | y; | |
| 791 ASSERT(kSmiTag == 0); // adjust code below | |
| 792 __ test(ecx, Immediate(kSmiTagMask)); | |
| 793 __ j(not_zero, &slow, not_taken); | |
| 794 // get copies of operands | |
| 795 __ mov(ebx, Operand(eax)); | |
| 796 __ mov(ecx, Operand(edx)); | |
| 797 // remove tags from operands (but keep sign) | |
| 798 __ sar(ebx, kSmiTagSize); | |
| 799 __ sar(ecx, kSmiTagSize); | |
| 800 // perform operation | |
| 801 switch (op) { | |
| 802 case Token::SAR: | |
| 803 __ sar(ebx); | |
| 804 // no checks of result necessary | |
| 805 break; | |
| 806 case Token::SHR: | |
| 807 __ shr(ebx); | |
| 808 // Check that the *unsigned* result fits in a smi. | |
| 809 // neither of the two high-order bits can be set: | |
| 810 // - 0x80000000: high bit would be lost when smi tagging. | |
| 811 // - 0x40000000: this number would convert to negative when | |
| 812 // smi tagging these two cases can only happen with shifts | |
| 813 // by 0 or 1 when handed a valid smi. | |
| 814 __ test(ebx, Immediate(0xc0000000)); | |
| 815 __ j(not_zero, &slow, not_taken); | |
| 816 break; | |
| 817 case Token::SHL: | |
| 818 __ shl(ebx); | |
| 819 // Check that the *signed* result fits in a smi. | |
| 820 __ lea(ecx, Operand(ebx, 0x40000000)); | |
| 821 __ test(ecx, Immediate(0x80000000)); | |
| 822 __ j(not_zero, &slow, not_taken); | |
| 823 break; | |
| 824 default: UNREACHABLE(); | |
| 825 } | |
| 826 // tag result and store it in TOS (eax) | |
| 827 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | |
| 828 __ lea(eax, Operand(ebx, times_2, kSmiTag)); | |
| 829 __ jmp(&exit); | |
| 830 // slow case | |
| 831 __ bind(&slow); | |
| 832 frame_->Push(eax); // restore stack | |
| 833 frame_->Push(edx); | |
| 834 GenericBinaryOpStub stub(op, overwrite_mode); | |
| 835 __ CallStub(&stub); | |
| 836 __ bind(&exit); | |
| 837 frame_->Push(eax); | |
| 838 break; | 806 break; |
| 839 } | 807 |
| 840 case Token::COMMA: { | 808 default: |
| 841 // simply discard left value | 809 flags = SMI_CODE_IN_STUB; |
| 842 frame_->Pop(eax); | |
| 843 frame_->Pop(); | |
| 844 frame_->Push(eax); | |
| 845 break; | 810 break; |
| 846 } | 811 } |
| 847 default: UNREACHABLE(); | 812 |
| 813 if (flags == SMI_CODE_INLINED) { | |
| 814 // Create a new deferred code for the slow-case part. | |
| 815 DeferredInlineBinaryOperation* deferred = | |
| 816 new DeferredInlineBinaryOperation(this, op, overwrite_mode, flags); | |
| 817 // Fetch the operands from the stack. | |
| 818 frame_->Pop(ebx); // get y | |
| 819 __ mov(eax, frame_->Top()); // get x | |
|
iposva
2008/10/21 16:59:22
This should be a Pop(reg) to balance it with the r
| |
| 820 // Generate the inline part of the code. | |
| 821 deferred->GenerateInlineCode(); | |
| 822 // Put result back on the stack. It seems somewhat weird to let | |
| 823 // the deferred code jump back before the assignment to the frame | |
| 824 // top, but this is just to let the peephole optimizer get rid of | |
| 825 // more code. | |
| 826 __ bind(deferred->exit()); | |
| 827 __ mov(frame_->Top(), eax); | |
|
iposva
2008/10/21 16:59:22
We should frame_->Push(eax) here, since the deferr
| |
| 828 } else { | |
| 829 // Call the stub and push the result to the stack. | |
| 830 GenericBinaryOpStub stub(op, overwrite_mode, flags); | |
| 831 __ CallStub(&stub); | |
| 832 frame_->Push(eax); | |
| 848 } | 833 } |
| 849 } | 834 } |
| 850 | 835 |
| 851 | 836 |
| 852 class DeferredInlinedSmiOperation: public DeferredCode { | 837 class DeferredInlinedSmiOperation: public DeferredCode { |
| 853 public: | 838 public: |
| 854 DeferredInlinedSmiOperation(CodeGenerator* generator, | 839 DeferredInlinedSmiOperation(CodeGenerator* generator, |
| 855 Token::Value op, int value, | 840 Token::Value op, int value, |
| 856 OverwriteMode overwrite_mode) : | 841 OverwriteMode overwrite_mode) : |
| 857 DeferredCode(generator), op_(op), value_(value), | 842 DeferredCode(generator), op_(op), value_(value), |
| 858 overwrite_mode_(overwrite_mode) { | 843 overwrite_mode_(overwrite_mode) { |
| 859 set_comment("[ DeferredInlinedSmiOperation"); | 844 set_comment("[ DeferredInlinedSmiOperation"); |
| 860 } | 845 } |
| 861 virtual void Generate() { | 846 virtual void Generate() { |
| 862 __ push(eax); | 847 __ push(eax); |
| 863 __ push(Immediate(Smi::FromInt(value_))); | 848 __ push(Immediate(Smi::FromInt(value_))); |
| 864 GenericBinaryOpStub igostub(op_, overwrite_mode_); | 849 GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED); |
| 865 __ CallStub(&igostub); | 850 __ CallStub(&igostub); |
| 866 } | 851 } |
| 867 | 852 |
| 868 private: | 853 private: |
| 869 Token::Value op_; | 854 Token::Value op_; |
| 870 int value_; | 855 int value_; |
| 871 OverwriteMode overwrite_mode_; | 856 OverwriteMode overwrite_mode_; |
| 872 }; | 857 }; |
| 873 | 858 |
| 874 | 859 |
| 875 class DeferredInlinedSmiOperationReversed: public DeferredCode { | 860 class DeferredInlinedSmiOperationReversed: public DeferredCode { |
| 876 public: | 861 public: |
| 877 DeferredInlinedSmiOperationReversed(CodeGenerator* generator, | 862 DeferredInlinedSmiOperationReversed(CodeGenerator* generator, |
| 878 Token::Value op, int value, | 863 Token::Value op, int value, |
| 879 OverwriteMode overwrite_mode) : | 864 OverwriteMode overwrite_mode) : |
| 880 DeferredCode(generator), op_(op), value_(value), | 865 DeferredCode(generator), op_(op), value_(value), |
| 881 overwrite_mode_(overwrite_mode) { | 866 overwrite_mode_(overwrite_mode) { |
| 882 set_comment("[ DeferredInlinedSmiOperationReversed"); | 867 set_comment("[ DeferredInlinedSmiOperationReversed"); |
| 883 } | 868 } |
| 884 virtual void Generate() { | 869 virtual void Generate() { |
| 885 __ push(Immediate(Smi::FromInt(value_))); | 870 __ push(Immediate(Smi::FromInt(value_))); |
| 886 __ push(eax); | 871 __ push(eax); |
| 887 GenericBinaryOpStub igostub(op_, overwrite_mode_); | 872 GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED); |
| 888 __ CallStub(&igostub); | 873 __ CallStub(&igostub); |
| 889 } | 874 } |
| 890 | 875 |
| 891 private: | 876 private: |
| 892 Token::Value op_; | 877 Token::Value op_; |
| 893 int value_; | 878 int value_; |
| 894 OverwriteMode overwrite_mode_; | 879 OverwriteMode overwrite_mode_; |
| 895 }; | 880 }; |
| 896 | 881 |
| 897 | 882 |
| 898 class DeferredInlinedSmiAdd: public DeferredCode { | 883 class DeferredInlinedSmiAdd: public DeferredCode { |
| 899 public: | 884 public: |
| 900 DeferredInlinedSmiAdd(CodeGenerator* generator, int value, | 885 DeferredInlinedSmiAdd(CodeGenerator* generator, int value, |
| 901 OverwriteMode overwrite_mode) : | 886 OverwriteMode overwrite_mode) : |
| 902 DeferredCode(generator), value_(value), overwrite_mode_(overwrite_mode) { | 887 DeferredCode(generator), value_(value), overwrite_mode_(overwrite_mode) { |
| 903 set_comment("[ DeferredInlinedSmiAdd"); | 888 set_comment("[ DeferredInlinedSmiAdd"); |
| 904 } | 889 } |
| 905 | 890 |
| 906 virtual void Generate() { | 891 virtual void Generate() { |
| 907 // Undo the optimistic add operation and call the shared stub. | 892 // Undo the optimistic add operation and call the shared stub. |
| 908 Immediate immediate(Smi::FromInt(value_)); | 893 Immediate immediate(Smi::FromInt(value_)); |
| 909 __ sub(Operand(eax), immediate); | 894 __ sub(Operand(eax), immediate); |
| 910 __ push(eax); | 895 __ push(eax); |
| 911 __ push(immediate); | 896 __ push(immediate); |
| 912 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_); | 897 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); |
| 913 __ CallStub(&igostub); | 898 __ CallStub(&igostub); |
| 914 } | 899 } |
| 915 | 900 |
| 916 private: | 901 private: |
| 917 int value_; | 902 int value_; |
| 918 OverwriteMode overwrite_mode_; | 903 OverwriteMode overwrite_mode_; |
| 919 }; | 904 }; |
| 920 | 905 |
| 921 | 906 |
| 922 class DeferredInlinedSmiAddReversed: public DeferredCode { | 907 class DeferredInlinedSmiAddReversed: public DeferredCode { |
| 923 public: | 908 public: |
| 924 DeferredInlinedSmiAddReversed(CodeGenerator* generator, int value, | 909 DeferredInlinedSmiAddReversed(CodeGenerator* generator, int value, |
| 925 OverwriteMode overwrite_mode) : | 910 OverwriteMode overwrite_mode) : |
| 926 DeferredCode(generator), value_(value), overwrite_mode_(overwrite_mode) { | 911 DeferredCode(generator), value_(value), overwrite_mode_(overwrite_mode) { |
| 927 set_comment("[ DeferredInlinedSmiAddReversed"); | 912 set_comment("[ DeferredInlinedSmiAddReversed"); |
| 928 } | 913 } |
| 929 | 914 |
| 930 virtual void Generate() { | 915 virtual void Generate() { |
| 931 // Undo the optimistic add operation and call the shared stub. | 916 // Undo the optimistic add operation and call the shared stub. |
| 932 Immediate immediate(Smi::FromInt(value_)); | 917 Immediate immediate(Smi::FromInt(value_)); |
| 933 __ sub(Operand(eax), immediate); | 918 __ sub(Operand(eax), immediate); |
| 934 __ push(immediate); | 919 __ push(immediate); |
| 935 __ push(eax); | 920 __ push(eax); |
| 936 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_); | 921 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); |
| 937 __ CallStub(&igostub); | 922 __ CallStub(&igostub); |
| 938 } | 923 } |
| 939 | 924 |
| 940 private: | 925 private: |
| 941 int value_; | 926 int value_; |
| 942 OverwriteMode overwrite_mode_; | 927 OverwriteMode overwrite_mode_; |
| 943 }; | 928 }; |
| 944 | 929 |
| 945 | 930 |
| 946 class DeferredInlinedSmiSub: public DeferredCode { | 931 class DeferredInlinedSmiSub: public DeferredCode { |
| 947 public: | 932 public: |
| 948 DeferredInlinedSmiSub(CodeGenerator* generator, int value, | 933 DeferredInlinedSmiSub(CodeGenerator* generator, int value, |
| 949 OverwriteMode overwrite_mode) : | 934 OverwriteMode overwrite_mode) : |
| 950 DeferredCode(generator), value_(value), overwrite_mode_(overwrite_mode) { | 935 DeferredCode(generator), value_(value), overwrite_mode_(overwrite_mode) { |
| 951 set_comment("[ DeferredInlinedSmiSub"); | 936 set_comment("[ DeferredInlinedSmiSub"); |
| 952 } | 937 } |
| 953 | 938 |
| 954 virtual void Generate() { | 939 virtual void Generate() { |
| 955 // Undo the optimistic sub operation and call the shared stub. | 940 // Undo the optimistic sub operation and call the shared stub. |
| 956 Immediate immediate(Smi::FromInt(value_)); | 941 Immediate immediate(Smi::FromInt(value_)); |
| 957 __ add(Operand(eax), immediate); | 942 __ add(Operand(eax), immediate); |
| 958 __ push(eax); | 943 __ push(eax); |
| 959 __ push(immediate); | 944 __ push(immediate); |
| 960 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_); | 945 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); |
| 961 __ CallStub(&igostub); | 946 __ CallStub(&igostub); |
| 962 } | 947 } |
| 963 | 948 |
| 964 private: | 949 private: |
| 965 int value_; | 950 int value_; |
| 966 OverwriteMode overwrite_mode_; | 951 OverwriteMode overwrite_mode_; |
| 967 }; | 952 }; |
| 968 | 953 |
| 969 | 954 |
| 970 class DeferredInlinedSmiSubReversed: public DeferredCode { | 955 class DeferredInlinedSmiSubReversed: public DeferredCode { |
| 971 public: | 956 public: |
| 972 // tos_reg is used to save the TOS value before reversing the operands | 957 // tos_reg is used to save the TOS value before reversing the operands |
| 973 // eax will contain the immediate value after undoing the optimistic sub. | 958 // eax will contain the immediate value after undoing the optimistic sub. |
| 974 DeferredInlinedSmiSubReversed(CodeGenerator* generator, Register tos_reg, | 959 DeferredInlinedSmiSubReversed(CodeGenerator* generator, Register tos_reg, |
| 975 OverwriteMode overwrite_mode) : | 960 OverwriteMode overwrite_mode) : |
| 976 DeferredCode(generator), tos_reg_(tos_reg), | 961 DeferredCode(generator), tos_reg_(tos_reg), |
| 977 overwrite_mode_(overwrite_mode) { | 962 overwrite_mode_(overwrite_mode) { |
| 978 set_comment("[ DeferredInlinedSmiSubReversed"); | 963 set_comment("[ DeferredInlinedSmiSubReversed"); |
| 979 } | 964 } |
| 980 | 965 |
| 981 virtual void Generate() { | 966 virtual void Generate() { |
| 982 // Undo the optimistic sub operation and call the shared stub. | 967 // Undo the optimistic sub operation and call the shared stub. |
| 983 __ add(eax, Operand(tos_reg_)); | 968 __ add(eax, Operand(tos_reg_)); |
| 984 __ push(eax); | 969 __ push(eax); |
| 985 __ push(tos_reg_); | 970 __ push(tos_reg_); |
| 986 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_); | 971 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); |
| 987 __ CallStub(&igostub); | 972 __ CallStub(&igostub); |
| 988 } | 973 } |
| 989 | 974 |
| 990 private: | 975 private: |
| 991 Register tos_reg_; | 976 Register tos_reg_; |
| 992 OverwriteMode overwrite_mode_; | 977 OverwriteMode overwrite_mode_; |
| 993 }; | 978 }; |
| 994 | 979 |
| 995 | 980 |
| 996 void CodeGenerator::SmiOperation(Token::Value op, | 981 void CodeGenerator::SmiOperation(Token::Value op, |
| (...skipping 2955 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3952 // Return 1/0 for true/false in eax. | 3937 // Return 1/0 for true/false in eax. |
| 3953 __ bind(&true_result); | 3938 __ bind(&true_result); |
| 3954 __ mov(eax, 1); | 3939 __ mov(eax, 1); |
| 3955 __ ret(1 * kPointerSize); | 3940 __ ret(1 * kPointerSize); |
| 3956 __ bind(&false_result); | 3941 __ bind(&false_result); |
| 3957 __ mov(eax, 0); | 3942 __ mov(eax, 0); |
| 3958 __ ret(1 * kPointerSize); | 3943 __ ret(1 * kPointerSize); |
| 3959 } | 3944 } |
| 3960 | 3945 |
| 3961 | 3946 |
| 3947 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { | |
| 3948 // Perform fast-case smi code for the operation (eax <op> ebx) and | |
| 3949 // leave result in register eax. | |
| 3950 | |
| 3951 // Prepare the smi check of both operands by or'ing them together | |
| 3952 // before checking against the smi mask. | |
| 3953 __ mov(ecx, Operand(ebx)); | |
| 3954 __ or_(ecx, Operand(eax)); | |
| 3955 | |
| 3956 switch (op_) { | |
| 3957 case Token::ADD: | |
| 3958 __ add(eax, Operand(ebx)); // add optimistically | |
| 3959 __ j(overflow, slow, not_taken); | |
| 3960 break; | |
| 3961 | |
| 3962 case Token::SUB: | |
| 3963 __ sub(eax, Operand(ebx)); // subtract optimistically | |
| 3964 __ j(overflow, slow, not_taken); | |
| 3965 break; | |
| 3966 | |
| 3967 case Token::DIV: | |
| 3968 case Token::MOD: | |
| 3969 // Sign extend eax into edx:eax. | |
| 3970 __ cdq(); | |
| 3971 // Check for 0 divisor. | |
| 3972 __ test(ebx, Operand(ebx)); | |
| 3973 __ j(zero, slow, not_taken); | |
| 3974 break; | |
| 3975 | |
| 3976 default: | |
| 3977 // Fall-through to smi check. | |
| 3978 break; | |
| 3979 } | |
| 3980 | |
| 3981 // Perform the actual smi check. | |
| 3982 ASSERT(kSmiTag == 0); // adjust zero check if not the case | |
| 3983 __ test(ecx, Immediate(kSmiTagMask)); | |
| 3984 __ j(not_zero, slow, not_taken); | |
| 3985 | |
| 3986 switch (op_) { | |
| 3987 case Token::ADD: | |
| 3988 case Token::SUB: | |
| 3989 // Do nothing here. | |
| 3990 break; | |
| 3991 | |
| 3992 case Token::MUL: | |
| 3993 // If the smi tag is 0 we can just leave the tag on one operand. | |
| 3994 ASSERT(kSmiTag == 0); // adjust code below if not the case | |
| 3995 // Remove tag from one of the operands (but keep sign). | |
| 3996 __ sar(eax, kSmiTagSize); | |
| 3997 // Do multiplication. | |
| 3998 __ imul(eax, Operand(ebx)); // multiplication of smis; result in eax | |
| 3999 // Go slow on overflows. | |
| 4000 __ j(overflow, slow, not_taken); | |
| 4001 // Check for negative zero result. | |
| 4002 __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y | |
| 4003 break; | |
| 4004 | |
| 4005 case Token::DIV: | |
| 4006 // Divide edx:eax by ebx. | |
| 4007 __ idiv(ebx); | |
| 4008 // Check for the corner case of dividing the most negative smi | |
| 4009 // by -1. We cannot use the overflow flag, since it is not set | |
| 4010 // by idiv instruction. | |
| 4011 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); | |
| 4012 __ cmp(eax, 0x40000000); | |
| 4013 __ j(equal, slow); | |
| 4014 // Check for negative zero result. | |
| 4015 __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y | |
| 4016 // Check that the remainder is zero. | |
| 4017 __ test(edx, Operand(edx)); | |
| 4018 __ j(not_zero, slow); | |
| 4019 // Tag the result and store it in register eax. | |
| 4020 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | |
| 4021 __ lea(eax, Operand(eax, times_2, kSmiTag)); | |
| 4022 break; | |
| 4023 | |
| 4024 case Token::MOD: | |
| 4025 // Divide edx:eax by ebx. | |
| 4026 __ idiv(ebx); | |
| 4027 // Check for negative zero result. | |
| 4028 __ NegativeZeroTest(edx, ecx, slow); // use ecx = x | y | |
| 4029 // Move remainder to register eax. | |
| 4030 __ mov(eax, Operand(edx)); | |
| 4031 break; | |
| 4032 | |
| 4033 case Token::BIT_OR: | |
| 4034 __ or_(eax, Operand(ebx)); | |
| 4035 break; | |
| 4036 | |
| 4037 case Token::BIT_AND: | |
| 4038 __ and_(eax, Operand(ebx)); | |
| 4039 break; | |
| 4040 | |
| 4041 case Token::BIT_XOR: | |
| 4042 __ xor_(eax, Operand(ebx)); | |
| 4043 break; | |
| 4044 | |
| 4045 case Token::SHL: | |
| 4046 case Token::SHR: | |
| 4047 case Token::SAR: | |
| 4048 // Move the second operand into register ecx. | |
| 4049 __ mov(ecx, Operand(ebx)); | |
| 4050 // Remove tags from operands (but keep sign). | |
| 4051 __ sar(eax, kSmiTagSize); | |
| 4052 __ sar(ecx, kSmiTagSize); | |
| 4053 // Perform the operation. | |
| 4054 switch (op_) { | |
| 4055 case Token::SAR: | |
| 4056 __ sar(eax); | |
| 4057 // No checks of result necessary | |
| 4058 break; | |
| 4059 case Token::SHR: | |
| 4060 __ shr(eax); | |
| 4061 // Check that the *unsigned* result fits in a smi. | |
| 4062 // Neither of the two high-order bits can be set: | |
| 4063 // - 0x80000000: high bit would be lost when smi tagging. | |
| 4064 // - 0x40000000: this number would convert to negative when | |
| 4065 // Smi tagging these two cases can only happen with shifts | |
| 4066 // by 0 or 1 when handed a valid smi. | |
| 4067 __ test(eax, Immediate(0xc0000000)); | |
| 4068 __ j(not_zero, slow, not_taken); | |
| 4069 break; | |
| 4070 case Token::SHL: | |
| 4071 __ shl(eax); | |
| 4072 // Check that the *signed* result fits in a smi. | |
| 4073 __ lea(ecx, Operand(eax, 0x40000000)); | |
| 4074 __ test(ecx, Immediate(0x80000000)); | |
| 4075 __ j(not_zero, slow, not_taken); | |
| 4076 break; | |
| 4077 default: | |
| 4078 UNREACHABLE(); | |
| 4079 } | |
| 4080 // Tag the result and store it in register eax. | |
| 4081 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | |
| 4082 __ lea(eax, Operand(eax, times_2, kSmiTag)); | |
| 4083 break; | |
| 4084 | |
| 4085 default: | |
| 4086 UNREACHABLE(); | |
| 4087 break; | |
| 4088 } | |
| 4089 } | |
| 4090 | |
| 4091 | |
| 3962 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { | 4092 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
| 3963 Label call_runtime; | 4093 Label call_runtime; |
| 3964 __ mov(eax, Operand(esp, 1 * kPointerSize)); // Get y. | |
| 3965 __ mov(edx, Operand(esp, 2 * kPointerSize)); // Get x. | |
| 3966 | 4094 |
| 3967 // 1. Smi case. | 4095 if (flags_ == SMI_CODE_IN_STUB) { |
| 3968 switch (op_) { | 4096 // The fast case smi code wasn't inlined in the stub caller |
| 3969 case Token::ADD: { | 4097 // code. Generate it here to speed up common operations. |
| 3970 // eax: y. | 4098 Label slow; |
| 3971 // edx: x. | 4099 __ mov(ebx, Operand(esp, 1 * kPointerSize)); // get y |
| 3972 Label revert; | 4100 __ mov(eax, Operand(esp, 2 * kPointerSize)); // get x |
| 3973 __ mov(ecx, Operand(eax)); | 4101 GenerateSmiCode(masm, &slow); |
| 3974 __ or_(ecx, Operand(edx)); // ecx = x | y. | 4102 __ ret(2 * kPointerSize); // remove both operands |
| 3975 __ add(eax, Operand(edx)); // Add y optimistically. | |
| 3976 // Go slow-path in case of overflow. | |
| 3977 __ j(overflow, &revert, not_taken); | |
| 3978 // Go slow-path in case of non-smi operands. | |
| 3979 ASSERT(kSmiTag == 0); // adjust code below | |
| 3980 __ test(ecx, Immediate(kSmiTagMask)); | |
| 3981 __ j(not_zero, &revert, not_taken); | |
| 3982 __ ret(2 * kPointerSize); // Remove all operands. | |
| 3983 | 4103 |
| 3984 // Revert optimistic add. | 4104 // Too bad. The fast case smi code didn't succeed. |
| 3985 __ bind(&revert); | 4105 __ bind(&slow); |
| 3986 __ sub(eax, Operand(edx)); | |
| 3987 break; | |
| 3988 } | |
| 3989 case Token::SUB: { | |
| 3990 // eax: y. | |
| 3991 // edx: x. | |
| 3992 Label revert; | |
| 3993 __ mov(ecx, Operand(edx)); | |
| 3994 __ or_(ecx, Operand(eax)); // ecx = x | y. | |
| 3995 __ sub(edx, Operand(eax)); // Subtract y optimistically. | |
| 3996 // Go slow-path in case of overflow. | |
| 3997 __ j(overflow, &revert, not_taken); | |
| 3998 // Go slow-path in case of non-smi operands. | |
| 3999 ASSERT(kSmiTag == 0); // adjust code below | |
| 4000 __ test(ecx, Immediate(kSmiTagMask)); | |
| 4001 __ j(not_zero, &revert, not_taken); | |
| 4002 __ mov(eax, Operand(edx)); | |
| 4003 __ ret(2 * kPointerSize); // Remove all operands. | |
| 4004 | |
| 4005 // Revert optimistic sub. | |
| 4006 __ bind(&revert); | |
| 4007 __ add(edx, Operand(eax)); | |
| 4008 break; | |
| 4009 } | |
| 4010 case Token::MUL: { | |
| 4011 // eax: y | |
| 4012 // edx: x | |
| 4013 // a) both operands smi and result fits into a smi -> return. | |
| 4014 // b) at least one of operands non-smi -> non_smi_operands. | |
| 4015 // c) result does not fit in a smi -> non_smi_result. | |
| 4016 Label non_smi_operands, non_smi_result; | |
| 4017 // Tag check. | |
| 4018 __ mov(ecx, Operand(edx)); | |
| 4019 __ or_(ecx, Operand(eax)); // ecx = x | y. | |
| 4020 ASSERT(kSmiTag == 0); // Adjust code below. | |
| 4021 __ test(ecx, Immediate(kSmiTagMask)); | |
| 4022 // Jump if not both smi; check if float numbers. | |
| 4023 __ j(not_zero, &non_smi_operands, not_taken); | |
| 4024 | |
| 4025 // Get copies of operands. | |
| 4026 __ mov(ebx, Operand(eax)); | |
| 4027 __ mov(ecx, Operand(edx)); | |
| 4028 // If the smi tag is 0 we can just leave the tag on one operand. | |
| 4029 ASSERT(kSmiTag == 0); // adjust code below | |
| 4030 // Remove tag from one of the operands (but keep sign). | |
| 4031 __ sar(ecx, kSmiTagSize); | |
| 4032 // Do multiplication. | |
| 4033 __ imul(eax, Operand(ecx)); // Multiplication of Smis; result in eax. | |
| 4034 // Go slow on overflows. | |
| 4035 __ j(overflow, &non_smi_result, not_taken); | |
| 4036 // ...but operands OK for float arithmetic. | |
| 4037 | |
| 4038 // If the result is +0 we may need to check if the result should | |
| 4039 // really be -0. Welcome to the -0 fan club. | |
| 4040 __ NegativeZeroTest(eax, ebx, edx, ecx, &non_smi_result); | |
| 4041 | |
| 4042 __ ret(2 * kPointerSize); | |
| 4043 | |
| 4044 __ bind(&non_smi_result); | |
| 4045 // TODO(1243132): Do not check float operands here. | |
| 4046 __ bind(&non_smi_operands); | |
| 4047 __ mov(eax, Operand(esp, 1 * kPointerSize)); | |
| 4048 __ mov(edx, Operand(esp, 2 * kPointerSize)); | |
| 4049 break; | |
| 4050 } | |
| 4051 case Token::DIV: { | |
| 4052 // eax: y | |
| 4053 // edx: x | |
| 4054 Label non_smi_operands, non_smi_result, division_by_zero; | |
| 4055 __ mov(ebx, Operand(eax)); // Get y | |
| 4056 __ mov(eax, Operand(edx)); // Get x | |
| 4057 | |
| 4058 __ cdq(); // Sign extend eax into edx:eax. | |
| 4059 // Tag check. | |
| 4060 __ mov(ecx, Operand(ebx)); | |
| 4061 __ or_(ecx, Operand(eax)); // ecx = x | y. | |
| 4062 ASSERT(kSmiTag == 0); // Adjust code below. | |
| 4063 __ test(ecx, Immediate(kSmiTagMask)); | |
| 4064 // Jump if not both smi; check if float numbers. | |
| 4065 __ j(not_zero, &non_smi_operands, not_taken); | |
| 4066 __ test(ebx, Operand(ebx)); // Check for 0 divisor. | |
| 4067 __ j(zero, &division_by_zero, not_taken); | |
| 4068 | |
| 4069 __ idiv(ebx); | |
| 4070 // Check for the corner case of dividing the most negative smi by -1. | |
| 4071 // (We cannot use the overflow flag, since it is not set by idiv.) | |
| 4072 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); | |
| 4073 __ cmp(eax, 0x40000000); | |
| 4074 __ j(equal, &non_smi_result); | |
| 4075 // If the result is +0 we may need to check if the result should | |
| 4076 // really be -0. Welcome to the -0 fan club. | |
| 4077 __ NegativeZeroTest(eax, ecx, &non_smi_result); // Use ecx = x | y. | |
| 4078 __ test(edx, Operand(edx)); | |
| 4079 // Use floats if there's a remainder. | |
| 4080 __ j(not_zero, &non_smi_result, not_taken); | |
| 4081 __ shl(eax, kSmiTagSize); | |
| 4082 __ ret(2 * kPointerSize); // Remove all operands. | |
| 4083 | |
| 4084 __ bind(&division_by_zero); | |
| 4085 __ mov(eax, Operand(esp, 1 * kPointerSize)); | |
| 4086 __ mov(edx, Operand(esp, 2 * kPointerSize)); | |
| 4087 __ jmp(&call_runtime); // Division by zero must go through runtime. | |
| 4088 | |
| 4089 __ bind(&non_smi_result); | |
| 4090 // TODO(1243132): Do not check float operands here. | |
| 4091 __ bind(&non_smi_operands); | |
| 4092 __ mov(eax, Operand(esp, 1 * kPointerSize)); | |
| 4093 __ mov(edx, Operand(esp, 2 * kPointerSize)); | |
| 4094 break; | |
| 4095 } | |
| 4096 case Token::MOD: { | |
| 4097 Label slow; | |
| 4098 __ mov(ebx, Operand(eax)); // get y | |
| 4099 __ mov(eax, Operand(edx)); // get x | |
| 4100 __ cdq(); // sign extend eax into edx:eax | |
| 4101 // tag check | |
| 4102 __ mov(ecx, Operand(ebx)); | |
| 4103 __ or_(ecx, Operand(eax)); // ecx = x | y; | |
| 4104 ASSERT(kSmiTag == 0); // adjust code below | |
| 4105 __ test(ecx, Immediate(kSmiTagMask)); | |
| 4106 __ j(not_zero, &slow, not_taken); | |
| 4107 __ test(ebx, Operand(ebx)); // test for y == 0 | |
| 4108 __ j(zero, &slow); | |
| 4109 | |
| 4110 // Fast case: Do integer division and use remainder. | |
| 4111 __ idiv(ebx); | |
| 4112 __ NegativeZeroTest(edx, ecx, &slow); // use ecx = x | y | |
| 4113 __ mov(eax, Operand(edx)); | |
| 4114 __ ret(2 * kPointerSize); | |
| 4115 | |
| 4116 // Slow case: Call runtime operator implementation. | |
| 4117 __ bind(&slow); | |
| 4118 __ mov(eax, Operand(esp, 1 * kPointerSize)); | |
| 4119 __ mov(edx, Operand(esp, 2 * kPointerSize)); | |
| 4120 // Fall through to |call_runtime|. | |
| 4121 break; | |
| 4122 } | |
| 4123 case Token::BIT_OR: | |
| 4124 case Token::BIT_AND: | |
| 4125 case Token::BIT_XOR: | |
| 4126 case Token::SAR: | |
| 4127 case Token::SHL: | |
| 4128 case Token::SHR: { | |
| 4129 // Smi-case for bitops should already have been inlined. | |
| 4130 break; | |
| 4131 } | |
| 4132 default: { | |
| 4133 UNREACHABLE(); | |
| 4134 } | |
| 4135 } | 4106 } |
| 4136 | 4107 |
| 4137 // 2. Floating point case. | 4108 // Setup registers. |
| 4109 __ mov(eax, Operand(esp, 1 * kPointerSize)); // get y | |
| 4110 __ mov(edx, Operand(esp, 2 * kPointerSize)); // get x | |
| 4111 | |
| 4112 // Floating point case. | |
| 4138 switch (op_) { | 4113 switch (op_) { |
| 4139 case Token::ADD: | 4114 case Token::ADD: |
| 4140 case Token::SUB: | 4115 case Token::SUB: |
| 4141 case Token::MUL: | 4116 case Token::MUL: |
| 4142 case Token::DIV: { | 4117 case Token::DIV: { |
| 4143 // eax: y | 4118 // eax: y |
| 4144 // edx: x | 4119 // edx: x |
| 4145 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx); | 4120 FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx); |
| 4146 // Fast-case: Both operands are numbers. | 4121 // Fast-case: Both operands are numbers. |
| 4147 // Allocate a heap number, if needed. | 4122 // Allocate a heap number, if needed. |
| (...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4269 | 4244 |
| 4270 // SHR should return uint32 - go to runtime for non-smi/negative result. | 4245 // SHR should return uint32 - go to runtime for non-smi/negative result. |
| 4271 if (op_ == Token::SHR) __ bind(&non_smi_result); | 4246 if (op_ == Token::SHR) __ bind(&non_smi_result); |
| 4272 __ mov(eax, Operand(esp, 1 * kPointerSize)); | 4247 __ mov(eax, Operand(esp, 1 * kPointerSize)); |
| 4273 __ mov(edx, Operand(esp, 2 * kPointerSize)); | 4248 __ mov(edx, Operand(esp, 2 * kPointerSize)); |
| 4274 break; | 4249 break; |
| 4275 } | 4250 } |
| 4276 default: UNREACHABLE(); break; | 4251 default: UNREACHABLE(); break; |
| 4277 } | 4252 } |
| 4278 | 4253 |
| 4279 // 3. If all else fails, use the runtime system to get the correct result. | 4254 // If all else fails, use the runtime system to get the correct |
| 4255 // result. | |
| 4280 __ bind(&call_runtime); | 4256 __ bind(&call_runtime); |
| 4281 switch (op_) { | 4257 switch (op_) { |
| 4282 case Token::ADD: | 4258 case Token::ADD: |
| 4283 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); | 4259 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); |
| 4284 break; | 4260 break; |
| 4285 case Token::SUB: | 4261 case Token::SUB: |
| 4286 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); | 4262 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); |
| 4287 break; | 4263 break; |
| 4288 case Token::MUL: | 4264 case Token::MUL: |
| 4289 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); | 4265 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); |
| (...skipping 821 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5111 | 5087 |
| 5112 // Slow-case: Go through the JavaScript implementation. | 5088 // Slow-case: Go through the JavaScript implementation. |
| 5113 __ bind(&slow); | 5089 __ bind(&slow); |
| 5114 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); | 5090 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); |
| 5115 } | 5091 } |
| 5116 | 5092 |
| 5117 | 5093 |
| 5118 #undef __ | 5094 #undef __ |
| 5119 | 5095 |
| 5120 } } // namespace v8::internal | 5096 } } // namespace v8::internal |
| OLD | NEW |