OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 670 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
681 Register scratch); | 681 Register scratch); |
682 // Allocate a heap number in new space with undefined value. | 682 // Allocate a heap number in new space with undefined value. |
683 // Returns tagged pointer in eax, or jumps to need_gc if new space is full. | 683 // Returns tagged pointer in eax, or jumps to need_gc if new space is full. |
684 static void AllocateHeapNumber(MacroAssembler* masm, | 684 static void AllocateHeapNumber(MacroAssembler* masm, |
685 Label* need_gc, | 685 Label* need_gc, |
686 Register scratch1, | 686 Register scratch1, |
687 Register scratch2); | 687 Register scratch2); |
688 }; | 688 }; |
689 | 689 |
690 | 690 |
691 // Flag that indicates whether or not the code for dealing with smis | 691 // Flag that indicates whether or not the code that handles smi arguments |
692 // is inlined or should be dealt with in the stub. | 692 // should be inlined, placed in the stub, or omitted entirely. |
693 enum GenericBinaryFlags { | 693 enum GenericBinaryFlags { |
694 SMI_CODE_IN_STUB, | 694 SMI_CODE_IN_STUB, |
695 SMI_CODE_INLINED | 695 SMI_CODE_INLINED, |
| 696 // It is known at compile time that at least one argument is not a smi. |
| 697 NO_SMI_CODE |
696 }; | 698 }; |
697 | 699 |
698 | 700 |
699 class GenericBinaryOpStub: public CodeStub { | 701 class GenericBinaryOpStub: public CodeStub { |
700 public: | 702 public: |
701 GenericBinaryOpStub(Token::Value op, | 703 GenericBinaryOpStub(Token::Value op, |
702 OverwriteMode mode, | 704 OverwriteMode mode, |
703 GenericBinaryFlags flags) | 705 GenericBinaryFlags flags) |
704 : op_(op), mode_(mode), flags_(flags) { } | 706 : op_(op), mode_(mode), flags_(flags) { } |
705 | 707 |
(...skipping 10 matching lines...) Expand all Loading... |
716 void Print() { | 718 void Print() { |
717 PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n", | 719 PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n", |
718 Token::String(op_), | 720 Token::String(op_), |
719 static_cast<int>(mode_), | 721 static_cast<int>(mode_), |
720 static_cast<int>(flags_)); | 722 static_cast<int>(flags_)); |
721 } | 723 } |
722 #endif | 724 #endif |
723 | 725 |
724 // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM. | 726 // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM. |
725 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; | 727 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; |
726 class OpBits: public BitField<Token::Value, 2, 13> {}; | 728 class OpBits: public BitField<Token::Value, 2, 12> {}; |
727 class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {}; | 729 class FlagBits: public BitField<GenericBinaryFlags, 14, 2> {}; |
728 | 730 |
729 Major MajorKey() { return GenericBinaryOp; } | 731 Major MajorKey() { return GenericBinaryOp; } |
730 int MinorKey() { | 732 int MinorKey() { |
731 // Encode the parameters in a unique 16 bit value. | 733 // Encode the parameters in a unique 16 bit value. |
732 return OpBits::encode(op_) | | 734 return OpBits::encode(op_) | |
733 ModeBits::encode(mode_) | | 735 ModeBits::encode(mode_) | |
734 FlagBits::encode(flags_); | 736 FlagBits::encode(flags_); |
735 } | 737 } |
736 void Generate(MacroAssembler* masm); | 738 void Generate(MacroAssembler* masm); |
737 }; | 739 }; |
738 | 740 |
739 | 741 |
740 const char* GenericBinaryOpStub::GetName() { | 742 const char* GenericBinaryOpStub::GetName() { |
741 switch (op_) { | 743 switch (op_) { |
742 case Token::ADD: return "GenericBinaryOpStub_ADD"; | 744 case Token::ADD: return "GenericBinaryOpStub_ADD"; |
743 case Token::SUB: return "GenericBinaryOpStub_SUB"; | 745 case Token::SUB: return "GenericBinaryOpStub_SUB"; |
744 case Token::MUL: return "GenericBinaryOpStub_MUL"; | 746 case Token::MUL: return "GenericBinaryOpStub_MUL"; |
745 case Token::DIV: return "GenericBinaryOpStub_DIV"; | 747 case Token::DIV: return "GenericBinaryOpStub_DIV"; |
746 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; | 748 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; |
747 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; | 749 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; |
748 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; | 750 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; |
749 case Token::SAR: return "GenericBinaryOpStub_SAR"; | 751 case Token::SAR: return "GenericBinaryOpStub_SAR"; |
750 case Token::SHL: return "GenericBinaryOpStub_SHL"; | 752 case Token::SHL: return "GenericBinaryOpStub_SHL"; |
751 case Token::SHR: return "GenericBinaryOpStub_SHR"; | 753 case Token::SHR: return "GenericBinaryOpStub_SHR"; |
752 default: return "GenericBinaryOpStub"; | 754 default: return "GenericBinaryOpStub"; |
753 } | 755 } |
754 } | 756 } |
755 | 757 |
756 | 758 |
| 759 // A deferred code class implementing binary operations on likely smis. |
| 760 // This class generates both inline code and deferred code. |
| 761 // The fastest path is implemented inline. Deferred code calls |
| 762 // the GenericBinaryOpStub stub for slow cases. |
757 class DeferredInlineBinaryOperation: public DeferredCode { | 763 class DeferredInlineBinaryOperation: public DeferredCode { |
758 public: | 764 public: |
759 DeferredInlineBinaryOperation(CodeGenerator* generator, | 765 DeferredInlineBinaryOperation(CodeGenerator* generator, |
760 Token::Value op, | 766 Token::Value op, |
761 OverwriteMode mode, | 767 OverwriteMode mode, |
762 GenericBinaryFlags flags) | 768 GenericBinaryFlags flags) |
763 : DeferredCode(generator), stub_(op, mode, flags), op_(op) { | 769 : DeferredCode(generator), stub_(op, mode, flags), op_(op) { |
764 set_comment("[ DeferredInlineBinaryOperation"); | 770 set_comment("[ DeferredInlineBinaryOperation"); |
765 } | 771 } |
766 | 772 |
767 Result GenerateInlineCode(); | 773 // Consumes its arguments, left and right, leaving them invalid. |
| 774 Result GenerateInlineCode(Result* left, Result* right); |
768 | 775 |
769 virtual void Generate(); | 776 virtual void Generate(); |
770 | 777 |
771 private: | 778 private: |
772 GenericBinaryOpStub stub_; | 779 GenericBinaryOpStub stub_; |
773 Token::Value op_; | 780 Token::Value op_; |
774 }; | 781 }; |
775 | 782 |
776 | 783 |
777 void DeferredInlineBinaryOperation::Generate() { | 784 void DeferredInlineBinaryOperation::Generate() { |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
815 | 822 |
816 default: | 823 default: |
817 // By default only inline the Smi check code for likely smis if this | 824 // By default only inline the Smi check code for likely smis if this |
818 // operation is part of a loop. | 825 // operation is part of a loop. |
819 flags = ((loop_nesting() > 0) && type->IsLikelySmi()) | 826 flags = ((loop_nesting() > 0) && type->IsLikelySmi()) |
820 ? SMI_CODE_INLINED | 827 ? SMI_CODE_INLINED |
821 : SMI_CODE_IN_STUB; | 828 : SMI_CODE_IN_STUB; |
822 break; | 829 break; |
823 } | 830 } |
824 | 831 |
| 832 Result right = frame_->Pop(); |
| 833 Result left = frame_->Pop(); |
| 834 bool left_is_smi = left.is_constant() && left.handle()->IsSmi(); |
| 835 bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi(); |
| 836 bool right_is_smi = right.is_constant() && right.handle()->IsSmi(); |
| 837 bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi(); |
| 838 |
| 839 if (left_is_smi && right_is_smi) { |
| 840 // Compute the result, and return that as a constant on the frame. |
| 841 int left_int = Smi::cast(*left.handle())->value(); |
| 842 int right_int = Smi::cast(*right.handle())->value(); |
| 843 if (FoldConstantSmis(op, left_int, right_int)) return; |
| 844 } |
| 845 |
| 846 if (left_is_non_smi || right_is_non_smi) { |
| 847 // Set flag so that we go straight to the slow case, with no smi code. |
| 848 flags = NO_SMI_CODE; |
| 849 } else if (right_is_smi) { |
| 850 ConstantSmiBinaryOperation(op, &left, right.handle(), type, |
| 851 false, overwrite_mode); |
| 852 return; |
| 853 } else if (left_is_smi) { |
| 854 ConstantSmiBinaryOperation(op, &right, left.handle(), type, |
| 855 true, overwrite_mode); |
| 856 return; |
| 857 } |
| 858 |
825 if (flags == SMI_CODE_INLINED) { | 859 if (flags == SMI_CODE_INLINED) { |
826 // Create a new deferred code for the slow-case part. | 860 LikelySmiBinaryOperation(op, &left, &right, overwrite_mode); |
827 DeferredInlineBinaryOperation* deferred = | |
828 new DeferredInlineBinaryOperation(this, op, overwrite_mode, flags); | |
829 // Generate the inline part of the code. | |
830 // The operands are on the frame. | |
831 Result answer = deferred->GenerateInlineCode(); | |
832 deferred->BindExit(&answer); | |
833 frame_->Push(&answer); | |
834 } else { | 861 } else { |
835 // Call the stub and push the result to the stack. | 862 frame_->Push(&left); |
| 863 frame_->Push(&right); |
| 864 // If we know the arguments aren't smis, use the binary operation stub |
| 865 // that does not check for the fast smi case. |
| 866 // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED. |
| 867 if (flags == NO_SMI_CODE) { |
| 868 flags = SMI_CODE_INLINED; |
| 869 } |
836 GenericBinaryOpStub stub(op, overwrite_mode, flags); | 870 GenericBinaryOpStub stub(op, overwrite_mode, flags); |
837 Result answer = frame_->CallStub(&stub, 2); | 871 Result answer = frame_->CallStub(&stub, 2); |
838 frame_->Push(&answer); | 872 frame_->Push(&answer); |
839 } | 873 } |
840 } | 874 } |
841 | 875 |
842 | 876 |
| 877 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { |
| 878 Object* answer_object = Heap::undefined_value(); |
| 879 switch (op) { |
| 880 case Token::ADD: |
| 881 if (Smi::IsValid(left + right)) { |
| 882 answer_object = Smi::FromInt(left + right); |
| 883 } |
| 884 break; |
| 885 case Token::SUB: |
| 886 if (Smi::IsValid(left - right)) { |
| 887 answer_object = Smi::FromInt(left - right); |
| 888 } |
| 889 break; |
| 890 case Token::MUL: { |
| 891 double answer = static_cast<double>(left) * right; |
| 892 if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) { |
| 893 // If the product is zero and the non-zero factor is negative, |
| 894 // the spec requires us to return floating point negative zero. |
| 895 if (answer != 0 || (left >= 0 && right >= 0)) { |
| 896 answer_object = Smi::FromInt(static_cast<int>(answer)); |
| 897 } |
| 898 } |
| 899 } |
| 900 break; |
| 901 case Token::DIV: |
| 902 case Token::MOD: |
| 903 break; |
| 904 case Token::BIT_OR: |
| 905 answer_object = Smi::FromInt(left | right); |
| 906 break; |
| 907 case Token::BIT_AND: |
| 908 answer_object = Smi::FromInt(left & right); |
| 909 break; |
| 910 case Token::BIT_XOR: |
| 911 answer_object = Smi::FromInt(left ^ right); |
| 912 break; |
| 913 |
| 914 case Token::SHL: { |
| 915 int shift_amount = right & 0x1F; |
| 916 if (Smi::IsValid(left << shift_amount)) { |
| 917 answer_object = Smi::FromInt(left << shift_amount); |
| 918 } |
| 919 break; |
| 920 } |
| 921 case Token::SHR: { |
| 922 int shift_amount = right & 0x1F; |
| 923 unsigned int unsigned_left = left; |
| 924 unsigned_left >>= shift_amount; |
| 925 if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) { |
| 926 answer_object = Smi::FromInt(unsigned_left); |
| 927 } |
| 928 break; |
| 929 } |
| 930 case Token::SAR: { |
| 931 int shift_amount = right & 0x1F; |
| 932 unsigned int unsigned_left = left; |
| 933 if (left < 0) { |
| 934 // Perform arithmetic shift of a negative number by |
| 935 // complementing number, logical shifting, complementing again. |
| 936 unsigned_left = ~unsigned_left; |
| 937 unsigned_left >>= shift_amount; |
| 938 unsigned_left = ~unsigned_left; |
| 939 } else { |
| 940 unsigned_left >>= shift_amount; |
| 941 } |
| 942 ASSERT(Smi::IsValid(unsigned_left)); // Converted to signed. |
| 943 answer_object = Smi::FromInt(unsigned_left); // Converted to signed. |
| 944 break; |
| 945 } |
| 946 default: |
| 947 UNREACHABLE(); |
| 948 break; |
| 949 } |
| 950 if (answer_object == Heap::undefined_value()) { |
| 951 return false; |
| 952 } |
| 953 frame_->Push(Handle<Object>(answer_object)); |
| 954 return true; |
| 955 } |
| 956 |
| 957 |
| 958 void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, |
| 959 Result* left, |
| 960 Result* right, |
| 961 OverwriteMode overwrite_mode) { |
| 962 // Create a new deferred code object that calls GenericBinaryOpStub |
| 963 // in the slow case. |
| 964 DeferredInlineBinaryOperation* deferred = |
| 965 new DeferredInlineBinaryOperation(this, op, overwrite_mode, |
| 966 SMI_CODE_INLINED); |
| 967 // Generate the inline code that handles some smi operations, |
| 968 // and jumps to the deferred code for everything else. |
| 969 Result answer = deferred->GenerateInlineCode(left, right); |
| 970 deferred->BindExit(&answer); |
| 971 frame_->Push(&answer); |
| 972 } |
| 973 |
| 974 |
843 class DeferredInlineSmiOperation: public DeferredCode { | 975 class DeferredInlineSmiOperation: public DeferredCode { |
844 public: | 976 public: |
845 DeferredInlineSmiOperation(CodeGenerator* generator, | 977 DeferredInlineSmiOperation(CodeGenerator* generator, |
846 Token::Value op, | 978 Token::Value op, |
847 Smi* value, | 979 Smi* value, |
848 OverwriteMode overwrite_mode) | 980 OverwriteMode overwrite_mode) |
849 : DeferredCode(generator), | 981 : DeferredCode(generator), |
850 op_(op), | 982 op_(op), |
851 value_(value), | 983 value_(value), |
852 overwrite_mode_(overwrite_mode) { | 984 overwrite_mode_(overwrite_mode) { |
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1032 Result right(generator()); | 1164 Result right(generator()); |
1033 enter()->Bind(&right); | 1165 enter()->Bind(&right); |
1034 generator()->frame()->Push(value_); | 1166 generator()->frame()->Push(value_); |
1035 generator()->frame()->Push(&right); | 1167 generator()->frame()->Push(&right); |
1036 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); | 1168 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); |
1037 Result answer = generator()->frame()->CallStub(&igostub, 2); | 1169 Result answer = generator()->frame()->CallStub(&igostub, 2); |
1038 exit_.Jump(&answer); | 1170 exit_.Jump(&answer); |
1039 } | 1171 } |
1040 | 1172 |
1041 | 1173 |
1042 void CodeGenerator::SmiOperation(Token::Value op, | 1174 void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, |
1043 StaticType* type, | 1175 Result* operand, |
1044 Handle<Object> value, | 1176 Handle<Object> value, |
1045 bool reversed, | 1177 StaticType* type, |
1046 OverwriteMode overwrite_mode) { | 1178 bool reversed, |
| 1179 OverwriteMode overwrite_mode) { |
1047 // NOTE: This is an attempt to inline (a bit) more of the code for | 1180 // NOTE: This is an attempt to inline (a bit) more of the code for |
1048 // some possible smi operations (like + and -) when (at least) one | 1181 // some possible smi operations (like + and -) when (at least) one |
1049 // of the operands is a literal smi. With this optimization, the | 1182 // of the operands is a constant smi. |
1050 // performance of the system is increased by ~15%, and the generated | 1183 // Consumes the argument "operand". |
1051 // code size is increased by ~1% (measured on a combination of | |
1052 // different benchmarks). | |
1053 | 1184 |
1054 // TODO(199): Optimize some special cases of operations involving a | 1185 // TODO(199): Optimize some special cases of operations involving a |
1055 // smi literal (multiply by 2, shift by 0, etc.). | 1186 // smi literal (multiply by 2, shift by 0, etc.). |
| 1187 if (IsUnsafeSmi(value)) { |
| 1188 Result unsafe_operand(value, this); |
| 1189 if (reversed) { |
| 1190 LikelySmiBinaryOperation(op, &unsafe_operand, operand, |
| 1191 overwrite_mode); |
| 1192 } else { |
| 1193 LikelySmiBinaryOperation(op, operand, &unsafe_operand, |
| 1194 overwrite_mode); |
| 1195 } |
| 1196 ASSERT(!operand->is_valid()); |
| 1197 return; |
| 1198 } |
1056 | 1199 |
1057 // Get the literal value. | 1200 // Get the literal value. |
1058 Smi* smi_value = Smi::cast(*value); | 1201 Smi* smi_value = Smi::cast(*value); |
1059 int int_value = smi_value->value(); | 1202 int int_value = smi_value->value(); |
1060 ASSERT(is_intn(int_value, kMaxSmiInlinedBits)); | |
1061 | 1203 |
1062 switch (op) { | 1204 switch (op) { |
1063 case Token::ADD: { | 1205 case Token::ADD: { |
1064 DeferredCode* deferred = NULL; | 1206 DeferredCode* deferred = NULL; |
1065 if (!reversed) { | 1207 if (reversed) { |
1066 deferred = new DeferredInlineSmiAdd(this, smi_value, overwrite_mode); | |
1067 } else { | |
1068 deferred = new DeferredInlineSmiAddReversed(this, smi_value, | 1208 deferred = new DeferredInlineSmiAddReversed(this, smi_value, |
1069 overwrite_mode); | 1209 overwrite_mode); |
| 1210 } else { |
| 1211 deferred = new DeferredInlineSmiAdd(this, smi_value, overwrite_mode); |
1070 } | 1212 } |
1071 Result operand = frame_->Pop(); | 1213 operand->ToRegister(); |
1072 operand.ToRegister(); | 1214 frame_->Spill(operand->reg()); |
1073 frame_->Spill(operand.reg()); | 1215 __ add(Operand(operand->reg()), Immediate(value)); |
1074 __ add(Operand(operand.reg()), Immediate(value)); | 1216 deferred->enter()->Branch(overflow, operand, not_taken); |
1075 deferred->enter()->Branch(overflow, &operand, not_taken); | 1217 __ test(operand->reg(), Immediate(kSmiTagMask)); |
1076 __ test(operand.reg(), Immediate(kSmiTagMask)); | 1218 deferred->enter()->Branch(not_zero, operand, not_taken); |
1077 deferred->enter()->Branch(not_zero, &operand, not_taken); | 1219 deferred->BindExit(operand); |
1078 deferred->BindExit(&operand); | 1220 frame_->Push(operand); |
1079 frame_->Push(&operand); | |
1080 break; | 1221 break; |
1081 } | 1222 } |
1082 | 1223 |
1083 case Token::SUB: { | 1224 case Token::SUB: { |
1084 DeferredCode* deferred = NULL; | 1225 DeferredCode* deferred = NULL; |
1085 Result operand = frame_->Pop(); | |
1086 Result answer(this); // Only allocated a new register if reversed. | 1226 Result answer(this); // Only allocated a new register if reversed. |
1087 if (!reversed) { | 1227 if (reversed) { |
1088 operand.ToRegister(); | |
1089 frame_->Spill(operand.reg()); | |
1090 deferred = new DeferredInlineSmiSub(this, | |
1091 smi_value, | |
1092 overwrite_mode); | |
1093 __ sub(Operand(operand.reg()), Immediate(value)); | |
1094 answer = operand; | |
1095 } else { | |
1096 answer = allocator()->Allocate(); | 1228 answer = allocator()->Allocate(); |
1097 ASSERT(answer.is_valid()); | 1229 ASSERT(answer.is_valid()); |
1098 deferred = new DeferredInlineSmiSubReversed(this, | 1230 deferred = new DeferredInlineSmiSubReversed(this, |
1099 smi_value, | 1231 smi_value, |
1100 overwrite_mode); | 1232 overwrite_mode); |
1101 __ mov(answer.reg(), Immediate(value)); | 1233 __ Set(answer.reg(), Immediate(value)); |
1102 if (operand.is_register()) { | 1234 if (operand->is_register()) { |
1103 __ sub(answer.reg(), Operand(operand.reg())); | 1235 __ sub(answer.reg(), Operand(operand->reg())); |
1104 } else { | 1236 } else { |
1105 ASSERT(operand.is_constant()); | 1237 ASSERT(operand->is_constant()); |
1106 __ sub(Operand(answer.reg()), Immediate(operand.handle())); | 1238 __ sub(Operand(answer.reg()), Immediate(operand->handle())); |
1107 } | 1239 } |
| 1240 } else { |
| 1241 operand->ToRegister(); |
| 1242 frame_->Spill(operand->reg()); |
| 1243 deferred = new DeferredInlineSmiSub(this, |
| 1244 smi_value, |
| 1245 overwrite_mode); |
| 1246 __ sub(Operand(operand->reg()), Immediate(value)); |
| 1247 answer = *operand; |
1108 } | 1248 } |
1109 deferred->enter()->Branch(overflow, &operand, not_taken); | 1249 deferred->enter()->Branch(overflow, operand, not_taken); |
1110 __ test(answer.reg(), Immediate(kSmiTagMask)); | 1250 __ test(answer.reg(), Immediate(kSmiTagMask)); |
1111 deferred->enter()->Branch(not_zero, &operand, not_taken); | 1251 deferred->enter()->Branch(not_zero, operand, not_taken); |
1112 operand.Unuse(); | 1252 operand->Unuse(); |
1113 deferred->BindExit(&answer); | 1253 deferred->BindExit(&answer); |
1114 frame_->Push(&answer); | 1254 frame_->Push(&answer); |
1115 break; | 1255 break; |
1116 } | 1256 } |
1117 | 1257 |
1118 case Token::SAR: { | 1258 case Token::SAR: { |
1119 if (reversed) { | 1259 if (reversed) { |
1120 Result top = frame_->Pop(); | 1260 Result constant_operand(value, this); |
1121 frame_->Push(value); | 1261 LikelySmiBinaryOperation(op, &constant_operand, operand, |
1122 frame_->Push(&top); | 1262 overwrite_mode); |
1123 GenericBinaryOperation(op, type, overwrite_mode); | |
1124 } else { | 1263 } else { |
1125 // Only the least significant 5 bits of the shift value are used. | 1264 // Only the least significant 5 bits of the shift value are used. |
1126 // In the slow case, this masking is done inside the runtime call. | 1265 // In the slow case, this masking is done inside the runtime call. |
1127 int shift_value = int_value & 0x1f; | 1266 int shift_value = int_value & 0x1f; |
1128 DeferredCode* deferred = | 1267 DeferredCode* deferred = |
1129 new DeferredInlineSmiOperation(this, Token::SAR, smi_value, | 1268 new DeferredInlineSmiOperation(this, Token::SAR, smi_value, |
1130 overwrite_mode); | 1269 overwrite_mode); |
1131 Result result = frame_->Pop(); | 1270 operand->ToRegister(); |
1132 result.ToRegister(); | 1271 __ test(operand->reg(), Immediate(kSmiTagMask)); |
1133 __ test(result.reg(), Immediate(kSmiTagMask)); | 1272 deferred->enter()->Branch(not_zero, operand, not_taken); |
1134 deferred->enter()->Branch(not_zero, &result, not_taken); | 1273 if (shift_value > 0) { |
1135 frame_->Spill(result.reg()); | 1274 frame_->Spill(operand->reg()); |
1136 __ sar(result.reg(), shift_value); | 1275 __ sar(operand->reg(), shift_value); |
1137 __ and_(result.reg(), ~kSmiTagMask); | 1276 __ and_(operand->reg(), ~kSmiTagMask); |
1138 deferred->BindExit(&result); | 1277 } |
1139 frame_->Push(&result); | 1278 deferred->BindExit(operand); |
| 1279 frame_->Push(operand); |
1140 } | 1280 } |
1141 break; | 1281 break; |
1142 } | 1282 } |
1143 | 1283 |
1144 case Token::SHR: { | 1284 case Token::SHR: { |
1145 if (reversed) { | 1285 if (reversed) { |
1146 Result top = frame_->Pop(); | 1286 Result constant_operand(value, this); |
1147 frame_->Push(value); | 1287 LikelySmiBinaryOperation(op, &constant_operand, operand, |
1148 frame_->Push(&top); | 1288 overwrite_mode); |
1149 GenericBinaryOperation(op, type, overwrite_mode); | |
1150 } else { | 1289 } else { |
1151 // Only the least significant 5 bits of the shift value are used. | 1290 // Only the least significant 5 bits of the shift value are used. |
1152 // In the slow case, this masking is done inside the runtime call. | 1291 // In the slow case, this masking is done inside the runtime call. |
1153 int shift_value = int_value & 0x1f; | 1292 int shift_value = int_value & 0x1f; |
1154 DeferredCode* deferred = | 1293 DeferredCode* deferred = |
1155 new DeferredInlineSmiOperation(this, Token::SHR, smi_value, | 1294 new DeferredInlineSmiOperation(this, Token::SHR, smi_value, |
1156 overwrite_mode); | 1295 overwrite_mode); |
1157 Result operand = frame_->Pop(); | 1296 operand->ToRegister(); |
1158 operand.ToRegister(); | 1297 __ test(operand->reg(), Immediate(kSmiTagMask)); |
1159 __ test(operand.reg(), Immediate(kSmiTagMask)); | 1298 deferred->enter()->Branch(not_zero, operand, not_taken); |
1160 deferred->enter()->Branch(not_zero, &operand, not_taken); | |
1161 Result answer = allocator()->Allocate(); | 1299 Result answer = allocator()->Allocate(); |
1162 ASSERT(answer.is_valid()); | 1300 ASSERT(answer.is_valid()); |
1163 __ mov(answer.reg(), Operand(operand.reg())); | 1301 __ mov(answer.reg(), operand->reg()); |
1164 __ sar(answer.reg(), kSmiTagSize); | 1302 __ sar(answer.reg(), kSmiTagSize); |
1165 __ shr(answer.reg(), shift_value); | 1303 __ shr(answer.reg(), shift_value); |
1166 // A negative Smi shifted right two is in the positive Smi range. | 1304 // A negative Smi shifted right two is in the positive Smi range. |
1167 if (shift_value < 2) { | 1305 if (shift_value < 2) { |
1168 __ test(answer.reg(), Immediate(0xc0000000)); | 1306 __ test(answer.reg(), Immediate(0xc0000000)); |
1169 deferred->enter()->Branch(not_zero, &operand, not_taken); | 1307 deferred->enter()->Branch(not_zero, operand, not_taken); |
1170 } | 1308 } |
1171 operand.Unuse(); | 1309 operand->Unuse(); |
1172 ASSERT(kSmiTagSize == times_2); // Adjust the code if not true. | 1310 ASSERT(kSmiTagSize == times_2); // Adjust the code if not true. |
1173 __ lea(answer.reg(), | 1311 __ lea(answer.reg(), |
1174 Operand(answer.reg(), answer.reg(), times_1, kSmiTag)); | 1312 Operand(answer.reg(), answer.reg(), times_1, kSmiTag)); |
1175 deferred->BindExit(&answer); | 1313 deferred->BindExit(&answer); |
1176 frame_->Push(&answer); | 1314 frame_->Push(&answer); |
1177 } | 1315 } |
1178 break; | 1316 break; |
1179 } | 1317 } |
1180 | 1318 |
1181 case Token::SHL: { | 1319 case Token::SHL: { |
1182 if (reversed) { | 1320 if (reversed) { |
1183 Result top = frame_->Pop(); | 1321 Result constant_operand(value, this); |
1184 frame_->Push(value); | 1322 LikelySmiBinaryOperation(op, &constant_operand, operand, |
1185 frame_->Push(&top); | 1323 overwrite_mode); |
1186 GenericBinaryOperation(op, type, overwrite_mode); | |
1187 } else { | 1324 } else { |
1188 // Only the least significant 5 bits of the shift value are used. | 1325 // Only the least significant 5 bits of the shift value are used. |
1189 // In the slow case, this masking is done inside the runtime call. | 1326 // In the slow case, this masking is done inside the runtime call. |
1190 int shift_value = int_value & 0x1f; | 1327 int shift_value = int_value & 0x1f; |
1191 DeferredCode* deferred = | 1328 DeferredCode* deferred = |
1192 new DeferredInlineSmiOperation(this, Token::SHL, smi_value, | 1329 new DeferredInlineSmiOperation(this, Token::SHL, smi_value, |
1193 overwrite_mode); | 1330 overwrite_mode); |
1194 Result operand = frame_->Pop(); | 1331 operand->ToRegister(); |
1195 operand.ToRegister(); | 1332 __ test(operand->reg(), Immediate(kSmiTagMask)); |
1196 __ test(operand.reg(), Immediate(kSmiTagMask)); | 1333 deferred->enter()->Branch(not_zero, operand, not_taken); |
1197 deferred->enter()->Branch(not_zero, &operand, not_taken); | |
1198 Result answer = allocator()->Allocate(); | 1334 Result answer = allocator()->Allocate(); |
1199 ASSERT(answer.is_valid()); | 1335 ASSERT(answer.is_valid()); |
1200 __ mov(answer.reg(), Operand(operand.reg())); | 1336 __ mov(answer.reg(), operand->reg()); |
1201 ASSERT(kSmiTag == 0); // adjust code if not the case | 1337 ASSERT(kSmiTag == 0); // adjust code if not the case |
1202 // We do no shifts, only the Smi conversion, if shift_value is 1. | 1338 // We do no shifts, only the Smi conversion, if shift_value is 1. |
1203 if (shift_value == 0) { | 1339 if (shift_value == 0) { |
1204 __ sar(answer.reg(), kSmiTagSize); | 1340 __ sar(answer.reg(), kSmiTagSize); |
1205 } else if (shift_value > 1) { | 1341 } else if (shift_value > 1) { |
1206 __ shl(answer.reg(), shift_value - 1); | 1342 __ shl(answer.reg(), shift_value - 1); |
1207 } | 1343 } |
1208 // Convert int result to Smi, checking that it is in int range. | 1344 // Convert int result to Smi, checking that it is in int range. |
1209 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | 1345 ASSERT(kSmiTagSize == times_2); // adjust code if not the case |
1210 __ add(answer.reg(), Operand(answer.reg())); | 1346 __ add(answer.reg(), Operand(answer.reg())); |
1211 deferred->enter()->Branch(overflow, &operand, not_taken); | 1347 deferred->enter()->Branch(overflow, operand, not_taken); |
1212 operand.Unuse(); | 1348 operand->Unuse(); |
1213 deferred->BindExit(&answer); | 1349 deferred->BindExit(&answer); |
1214 frame_->Push(&answer); | 1350 frame_->Push(&answer); |
1215 } | 1351 } |
1216 break; | 1352 break; |
1217 } | 1353 } |
1218 | 1354 |
1219 case Token::BIT_OR: | 1355 case Token::BIT_OR: |
1220 case Token::BIT_XOR: | 1356 case Token::BIT_XOR: |
1221 case Token::BIT_AND: { | 1357 case Token::BIT_AND: { |
1222 DeferredCode* deferred = NULL; | 1358 DeferredCode* deferred = NULL; |
1223 if (!reversed) { | 1359 if (reversed) { |
| 1360 deferred = new DeferredInlineSmiOperationReversed(this, op, smi_value, |
| 1361 overwrite_mode); |
| 1362 } else { |
1224 deferred = new DeferredInlineSmiOperation(this, op, smi_value, | 1363 deferred = new DeferredInlineSmiOperation(this, op, smi_value, |
1225 overwrite_mode); | 1364 overwrite_mode); |
1226 } else { | |
1227 deferred = new DeferredInlineSmiOperationReversed(this, op, smi_value, | |
1228 overwrite_mode); | |
1229 } | 1365 } |
1230 Result operand = frame_->Pop(); | 1366 operand->ToRegister(); |
1231 operand.ToRegister(); | 1367 __ test(operand->reg(), Immediate(kSmiTagMask)); |
1232 __ test(operand.reg(), Immediate(kSmiTagMask)); | 1368 deferred->enter()->Branch(not_zero, operand, not_taken); |
1233 deferred->enter()->Branch(not_zero, &operand, not_taken); | 1369 frame_->Spill(operand->reg()); |
1234 frame_->Spill(operand.reg()); | |
1235 if (op == Token::BIT_AND) { | 1370 if (op == Token::BIT_AND) { |
1236 if (int_value == 0) { | 1371 if (int_value == 0) { |
1237 __ xor_(Operand(operand.reg()), operand.reg()); | 1372 __ xor_(Operand(operand->reg()), operand->reg()); |
1238 } else { | 1373 } else { |
1239 __ and_(Operand(operand.reg()), Immediate(value)); | 1374 __ and_(Operand(operand->reg()), Immediate(value)); |
1240 } | 1375 } |
1241 } else if (op == Token::BIT_XOR) { | 1376 } else if (op == Token::BIT_XOR) { |
1242 if (int_value != 0) { | 1377 if (int_value != 0) { |
1243 __ xor_(Operand(operand.reg()), Immediate(value)); | 1378 __ xor_(Operand(operand->reg()), Immediate(value)); |
1244 } | 1379 } |
1245 } else { | 1380 } else { |
1246 ASSERT(op == Token::BIT_OR); | 1381 ASSERT(op == Token::BIT_OR); |
1247 if (int_value != 0) { | 1382 if (int_value != 0) { |
1248 __ or_(Operand(operand.reg()), Immediate(value)); | 1383 __ or_(Operand(operand->reg()), Immediate(value)); |
1249 } | 1384 } |
1250 } | 1385 } |
1251 deferred->BindExit(&operand); | 1386 deferred->BindExit(operand); |
1252 frame_->Push(&operand); | 1387 frame_->Push(operand); |
1253 break; | 1388 break; |
1254 } | 1389 } |
1255 | 1390 |
1256 default: { | 1391 default: { |
1257 if (!reversed) { | 1392 Result constant_operand(value, this); |
1258 frame_->Push(value); | 1393 if (reversed) { |
| 1394 LikelySmiBinaryOperation(op, &constant_operand, operand, |
| 1395 overwrite_mode); |
1259 } else { | 1396 } else { |
1260 Result top = frame_->Pop(); | 1397 LikelySmiBinaryOperation(op, operand, &constant_operand, |
1261 frame_->Push(value); | 1398 overwrite_mode); |
1262 frame_->Push(&top); | |
1263 } | 1399 } |
1264 GenericBinaryOperation(op, type, overwrite_mode); | |
1265 break; | 1400 break; |
1266 } | 1401 } |
1267 } | 1402 } |
| 1403 ASSERT(!operand->is_valid()); |
1268 } | 1404 } |
1269 | 1405 |
1270 | 1406 |
1271 class CompareStub: public CodeStub { | 1407 class CompareStub: public CodeStub { |
1272 public: | 1408 public: |
1273 CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { } | 1409 CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { } |
1274 | 1410 |
1275 void Generate(MacroAssembler* masm); | 1411 void Generate(MacroAssembler* masm); |
1276 | 1412 |
1277 private: | 1413 private: |
(...skipping 2269 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3547 // assign the exception value to the catch variable. | 3683 // assign the exception value to the catch variable. |
3548 Comment cmnt(masm_, "[ CatchExtensionObject"); | 3684 Comment cmnt(masm_, "[ CatchExtensionObject"); |
3549 Load(node->key()); | 3685 Load(node->key()); |
3550 Load(node->value()); | 3686 Load(node->value()); |
3551 Result result = | 3687 Result result = |
3552 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2); | 3688 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2); |
3553 frame_->Push(&result); | 3689 frame_->Push(&result); |
3554 } | 3690 } |
3555 | 3691 |
3556 | 3692 |
3557 bool CodeGenerator::IsInlineSmi(Literal* literal) { | |
3558 if (literal == NULL || !literal->handle()->IsSmi()) return false; | |
3559 int int_value = Smi::cast(*literal->handle())->value(); | |
3560 return is_intn(int_value, kMaxSmiInlinedBits); | |
3561 } | |
3562 | |
3563 | |
3564 void CodeGenerator::VisitAssignment(Assignment* node) { | 3693 void CodeGenerator::VisitAssignment(Assignment* node) { |
3565 Comment cmnt(masm_, "[ Assignment"); | 3694 Comment cmnt(masm_, "[ Assignment"); |
3566 CodeForStatementPosition(node); | 3695 CodeForStatementPosition(node); |
3567 | 3696 |
3568 { Reference target(this, node->target()); | 3697 { Reference target(this, node->target()); |
3569 if (target.is_illegal()) { | 3698 if (target.is_illegal()) { |
3570 // Fool the virtual frame into thinking that we left the assignment's | 3699 // Fool the virtual frame into thinking that we left the assignment's |
3571 // value on the frame. | 3700 // value on the frame. |
3572 frame_->Push(Smi::FromInt(0)); | 3701 frame_->Push(Smi::FromInt(0)); |
3573 return; | 3702 return; |
(...skipping 10 matching lines...) Expand all Loading... |
3584 // There are two cases where the target is not read in the right hand | 3713 // There are two cases where the target is not read in the right hand |
3585 // side, that are easy to test for: the right hand side is a literal, | 3714 // side, that are easy to test for: the right hand side is a literal, |
3586 // or the right hand side is a different variable. TakeValue invalidates | 3715 // or the right hand side is a different variable. TakeValue invalidates |
3587 // the target, with an implicit promise that it will be written to again | 3716 // the target, with an implicit promise that it will be written to again |
3588 // before it is read. | 3717 // before it is read. |
3589 if (literal != NULL || (right_var != NULL && right_var != var)) { | 3718 if (literal != NULL || (right_var != NULL && right_var != var)) { |
3590 target.TakeValue(NOT_INSIDE_TYPEOF); | 3719 target.TakeValue(NOT_INSIDE_TYPEOF); |
3591 } else { | 3720 } else { |
3592 target.GetValue(NOT_INSIDE_TYPEOF); | 3721 target.GetValue(NOT_INSIDE_TYPEOF); |
3593 } | 3722 } |
3594 if (IsInlineSmi(literal)) { | 3723 Load(node->value()); |
3595 SmiOperation(node->binary_op(), node->type(), literal->handle(), false, | 3724 GenericBinaryOperation(node->binary_op(), node->type()); |
3596 NO_OVERWRITE); | |
3597 } else { | |
3598 Load(node->value()); | |
3599 GenericBinaryOperation(node->binary_op(), node->type()); | |
3600 } | |
3601 } | 3725 } |
3602 | 3726 |
3603 if (var != NULL && | 3727 if (var != NULL && |
3604 var->mode() == Variable::CONST && | 3728 var->mode() == Variable::CONST && |
3605 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) { | 3729 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) { |
3606 // Assignment ignored - leave the value on the stack. | 3730 // Assignment ignored - leave the value on the stack. |
3607 } else { | 3731 } else { |
3608 CodeForSourcePosition(node->position()); | 3732 CodeForSourcePosition(node->position()); |
3609 if (node->op() == Token::INIT_CONST) { | 3733 if (node->op() == Token::INIT_CONST) { |
3610 // Dynamic constant initializations must use the function context | 3734 // Dynamic constant initializations must use the function context |
(...skipping 1134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4745 // never return a constant/immutable object. | 4869 // never return a constant/immutable object. |
4746 OverwriteMode overwrite_mode = NO_OVERWRITE; | 4870 OverwriteMode overwrite_mode = NO_OVERWRITE; |
4747 if (node->left()->AsBinaryOperation() != NULL && | 4871 if (node->left()->AsBinaryOperation() != NULL && |
4748 node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) { | 4872 node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) { |
4749 overwrite_mode = OVERWRITE_LEFT; | 4873 overwrite_mode = OVERWRITE_LEFT; |
4750 } else if (node->right()->AsBinaryOperation() != NULL && | 4874 } else if (node->right()->AsBinaryOperation() != NULL && |
4751 node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) { | 4875 node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) { |
4752 overwrite_mode = OVERWRITE_RIGHT; | 4876 overwrite_mode = OVERWRITE_RIGHT; |
4753 } | 4877 } |
4754 | 4878 |
4755 // Optimize for the case where (at least) one of the expressions | 4879 Load(node->left()); |
4756 // is a literal small integer. | 4880 Load(node->right()); |
4757 Literal* lliteral = node->left()->AsLiteral(); | 4881 GenericBinaryOperation(node->op(), node->type(), overwrite_mode); |
4758 Literal* rliteral = node->right()->AsLiteral(); | |
4759 | |
4760 if (IsInlineSmi(rliteral)) { | |
4761 Load(node->left()); | |
4762 SmiOperation(node->op(), node->type(), rliteral->handle(), false, | |
4763 overwrite_mode); | |
4764 } else if (IsInlineSmi(lliteral)) { | |
4765 Load(node->right()); | |
4766 SmiOperation(node->op(), node->type(), lliteral->handle(), true, | |
4767 overwrite_mode); | |
4768 } else { | |
4769 Load(node->left()); | |
4770 Load(node->right()); | |
4771 GenericBinaryOperation(node->op(), node->type(), overwrite_mode); | |
4772 } | |
4773 } | 4882 } |
4774 } | 4883 } |
4775 | 4884 |
4776 | 4885 |
4777 void CodeGenerator::VisitThisFunction(ThisFunction* node) { | 4886 void CodeGenerator::VisitThisFunction(ThisFunction* node) { |
4778 frame_->PushFunction(); | 4887 frame_->PushFunction(); |
4779 } | 4888 } |
4780 | 4889 |
4781 | 4890 |
4782 class InstanceofStub: public CodeStub { | 4891 class InstanceofStub: public CodeStub { |
(...skipping 546 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5329 __ ret(1 * kPointerSize); | 5438 __ ret(1 * kPointerSize); |
5330 __ bind(&false_result); | 5439 __ bind(&false_result); |
5331 __ mov(eax, 0); | 5440 __ mov(eax, 0); |
5332 __ ret(1 * kPointerSize); | 5441 __ ret(1 * kPointerSize); |
5333 } | 5442 } |
5334 | 5443 |
5335 | 5444 |
5336 #undef __ | 5445 #undef __ |
5337 #define __ masm_-> | 5446 #define __ masm_-> |
5338 | 5447 |
5339 Result DeferredInlineBinaryOperation::GenerateInlineCode() { | 5448 Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left, |
| 5449 Result* right) { |
5340 // Perform fast-case smi code for the operation (left <op> right) and | 5450 // Perform fast-case smi code for the operation (left <op> right) and |
5341 // returns the result in a Result. | 5451 // returns the result in a Result. |
5342 // If any fast-case tests fail, it jumps to the slow-case deferred code, | 5452 // If any fast-case tests fail, it jumps to the slow-case deferred code, |
5343 // which calls the binary operation stub, with the arguments (in registers) | 5453 // which calls the binary operation stub, with the arguments (in registers) |
5344 // on top of the frame. | 5454 // on top of the frame. |
| 5455 // Consumes its arguments (sets left and right to invalid and frees their |
| 5456 // registers). |
5345 | 5457 |
5346 VirtualFrame* frame = generator()->frame(); | 5458 left->ToRegister(); |
5347 // If operation is division or modulus, ensure | 5459 right->ToRegister(); |
5348 // that the special registers needed are free. | 5460 // A newly allocated register answer is used to hold the answer. |
5349 Result reg_eax(generator()); // Valid only if op is DIV or MOD. | 5461 // The registers containing left and right are not modified in |
5350 Result reg_edx(generator()); // Valid only if op is DIV or MOD. | 5462 // most cases, so they usually don't need to be spilled in the fast case. |
5351 if (op_ == Token::DIV || op_ == Token::MOD) { | 5463 Result answer = generator()->allocator()->Allocate(); |
5352 reg_eax = generator()->allocator()->Allocate(eax); | |
5353 ASSERT(reg_eax.is_valid()); | |
5354 reg_edx = generator()->allocator()->Allocate(edx); | |
5355 ASSERT(reg_edx.is_valid()); | |
5356 } | |
5357 | 5464 |
5358 Result right = frame->Pop(); | |
5359 Result left = frame->Pop(); | |
5360 left.ToRegister(); | |
5361 right.ToRegister(); | |
5362 // Answer is used to compute the answer, leaving left and right unchanged. | |
5363 // It is also returned from this function. | |
5364 // It is used as a temporary register in a few places, as well. | |
5365 Result answer(generator()); | |
5366 if (reg_eax.is_valid()) { | |
5367 answer = reg_eax; | |
5368 } else { | |
5369 answer = generator()->allocator()->Allocate(); | |
5370 } | |
5371 ASSERT(answer.is_valid()); | 5465 ASSERT(answer.is_valid()); |
5372 // Perform the smi check. | 5466 // Perform the smi check. |
5373 __ mov(answer.reg(), Operand(left.reg())); | 5467 __ mov(answer.reg(), left->reg()); |
5374 __ or_(answer.reg(), Operand(right.reg())); | 5468 __ or_(answer.reg(), Operand(right->reg())); |
5375 ASSERT(kSmiTag == 0); // adjust zero check if not the case | 5469 ASSERT(kSmiTag == 0); // adjust zero check if not the case |
5376 __ test(answer.reg(), Immediate(kSmiTagMask)); | 5470 __ test(answer.reg(), Immediate(kSmiTagMask)); |
5377 enter()->Branch(not_zero, &left, &right, not_taken); | 5471 enter()->Branch(not_zero, left, right, not_taken); |
5378 | 5472 |
5379 // All operations start by copying the left argument into answer. | 5473 // All operations start by copying the left argument into answer. |
5380 __ mov(answer.reg(), Operand(left.reg())); | 5474 __ mov(answer.reg(), left->reg()); |
5381 switch (op_) { | 5475 switch (op_) { |
5382 case Token::ADD: | 5476 case Token::ADD: |
5383 __ add(answer.reg(), Operand(right.reg())); // add optimistically | 5477 __ add(answer.reg(), Operand(right->reg())); // add optimistically |
5384 enter()->Branch(overflow, &left, &right, not_taken); | 5478 enter()->Branch(overflow, left, right, not_taken); |
5385 break; | 5479 break; |
5386 | 5480 |
5387 case Token::SUB: | 5481 case Token::SUB: |
5388 __ sub(answer.reg(), Operand(right.reg())); // subtract optimistically | 5482 __ sub(answer.reg(), Operand(right->reg())); // subtract optimistically |
5389 enter()->Branch(overflow, &left, &right, not_taken); | 5483 enter()->Branch(overflow, left, right, not_taken); |
5390 break; | 5484 break; |
5391 | 5485 |
5392 | |
5393 case Token::MUL: { | 5486 case Token::MUL: { |
5394 // If the smi tag is 0 we can just leave the tag on one operand. | 5487 // If the smi tag is 0 we can just leave the tag on one operand. |
5395 ASSERT(kSmiTag == 0); // adjust code below if not the case | 5488 ASSERT(kSmiTag == 0); // adjust code below if not the case |
5396 // Remove tag from the left operand (but keep sign). | 5489 // Remove tag from the left operand (but keep sign). |
5397 // Left hand operand has been copied into answer. | 5490 // Left hand operand has been copied into answer. |
5398 __ sar(answer.reg(), kSmiTagSize); | 5491 __ sar(answer.reg(), kSmiTagSize); |
5399 // Do multiplication of smis, leaving result in answer. | 5492 // Do multiplication of smis, leaving result in answer. |
5400 __ imul(answer.reg(), Operand(right.reg())); | 5493 __ imul(answer.reg(), Operand(right->reg())); |
5401 // Go slow on overflows. | 5494 // Go slow on overflows. |
5402 enter()->Branch(overflow, &left, &right, not_taken); | 5495 enter()->Branch(overflow, left, right, not_taken); |
5403 // Check for negative zero result. If product is zero, | 5496 // Check for negative zero result. If product is zero, |
5404 // and one argument is negative, go to slow case. | 5497 // and one argument is negative, go to slow case. |
5405 // The frame is unchanged in this block, so local control flow can | 5498 // The frame is unchanged in this block, so local control flow can |
5406 // use a Label rather than a JumpTarget. | 5499 // use a Label rather than a JumpTarget. |
5407 Label non_zero_result; | 5500 Label non_zero_result; |
5408 __ test(answer.reg(), Operand(answer.reg())); | 5501 __ test(answer.reg(), Operand(answer.reg())); |
5409 __ j(not_zero, &non_zero_result, taken); | 5502 __ j(not_zero, &non_zero_result, taken); |
5410 __ mov(answer.reg(), Operand(left.reg())); | 5503 __ mov(answer.reg(), left->reg()); |
5411 __ or_(answer.reg(), Operand(right.reg())); | 5504 __ or_(answer.reg(), Operand(right->reg())); |
5412 enter()->Branch(negative, &left, &right, not_taken); | 5505 enter()->Branch(negative, left, right, not_taken); |
5413 __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct. | 5506 __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct. |
5414 __ bind(&non_zero_result); | 5507 __ bind(&non_zero_result); |
5415 break; | 5508 break; |
5416 } | 5509 } |
5417 | 5510 |
5418 case Token::DIV: { | 5511 case Token::DIV: // Fall through. |
5419 // Left hand argument has been copied into answer, which is eax. | 5512 case Token::MOD: { |
| 5513 // Div and mod use the registers eax and edx. Left and right must |
| 5514 // be preserved, because the original operands are needed if we switch |
| 5515 // to the slow case. Move them if either is in eax or edx. |
| 5516 // The Result answer should be changed into an alias for eax. |
| 5517 // Precondition: |
| 5518 // The Results left and right are valid. They may be the same register, |
| 5519 // and may be unspilled. The Result answer is valid and is distinct |
| 5520 // from left and right, and is spilled. |
| 5521 // The value in left is copied to answer. |
| 5522 |
| 5523 Result reg_eax = generator()->allocator()->Allocate(eax); |
| 5524 Result reg_edx = generator()->allocator()->Allocate(edx); |
| 5525 // These allocations may have failed, if one of left, right, or answer |
| 5526 // is in register eax or edx. |
| 5527 bool left_copied_to_eax = false; // We will make sure this becomes true. |
| 5528 |
| 5529 // Part 1: Get eax |
| 5530 if (answer.reg().is(eax)) { |
| 5531 reg_eax = answer; |
| 5532 left_copied_to_eax = true; |
| 5533 } else if (right->reg().is(eax) || left->reg().is(eax)) { |
| 5534 // We need a non-edx register to move one or both of left and right to. |
| 5535 // We use answer if it is not edx, otherwise we allocate one. |
| 5536 if (answer.reg().is(edx)) { |
| 5537 reg_edx = answer; |
| 5538 answer = generator()->allocator()->Allocate(); |
| 5539 ASSERT(answer.is_valid()); |
| 5540 } |
| 5541 |
| 5542 if (left->reg().is(eax)) { |
| 5543 reg_eax = *left; |
| 5544 left_copied_to_eax = true; |
| 5545 *left = answer; |
| 5546 } |
| 5547 if (right->reg().is(eax)) { |
| 5548 reg_eax = *right; |
| 5549 *right = answer; |
| 5550 } |
| 5551 __ mov(answer.reg(), eax); |
| 5552 } |
| 5553 // End of Part 1. |
| 5554 // reg_eax is valid, and neither left nor right is in eax. |
| 5555 ASSERT(reg_eax.is_valid()); |
| 5556 ASSERT(!left->reg().is(eax)); |
| 5557 ASSERT(!right->reg().is(eax)); |
| 5558 |
| 5559 // Part 2: Get edx |
| 5560 // reg_edx is invalid if and only if either left, right, |
| 5561 // or answer is in edx. If edx is valid, then either edx |
| 5562 // was free, or it was answer, but answer was reallocated. |
| 5563 if (answer.reg().is(edx)) { |
| 5564 reg_edx = answer; |
| 5565 } else if (right->reg().is(edx) || left->reg().is(edx)) { |
| 5566 // Is answer used? |
| 5567 if (answer.reg().is(eax) || answer.reg().is(left->reg()) || |
| 5568 answer.reg().is(right->reg())) { |
| 5569 answer = generator()->allocator()->Allocate(); |
| 5570 ASSERT(answer.is_valid()); // We cannot hit both Allocate() calls. |
| 5571 } |
| 5572 if (left->reg().is(edx)) { |
| 5573 reg_edx = *left; |
| 5574 *left = answer; |
| 5575 } |
| 5576 if (right->reg().is(edx)) { |
| 5577 reg_edx = *right; |
| 5578 *right = answer; |
| 5579 } |
| 5580 __ mov(answer.reg(), edx); |
| 5581 } |
| 5582 // End of Part 2 |
| 5583 ASSERT(reg_edx.is_valid()); |
| 5584 ASSERT(!left->reg().is(eax)); |
| 5585 ASSERT(!right->reg().is(eax)); |
| 5586 |
| 5587 answer = reg_eax; // May free answer, if it was never used. |
| 5588 generator()->frame()->Spill(eax); |
| 5589 if (!left_copied_to_eax) { |
| 5590 __ mov(eax, left->reg()); |
| 5591 left_copied_to_eax = true; |
| 5592 } |
| 5593 generator()->frame()->Spill(edx); |
| 5594 |
| 5595 // Postcondition: |
| 5596 // reg_eax, reg_edx are valid, correct, and spilled. |
| 5597 // reg_eax contains the value originally in left |
| 5598 // left and right are not eax or edx. They may or may not be |
| 5599 // spilled or distinct. |
| 5600 // answer is an alias for reg_eax. |
| 5601 |
5420 // Sign extend eax into edx:eax. | 5602 // Sign extend eax into edx:eax. |
5421 __ cdq(); | 5603 __ cdq(); |
5422 // Check for 0 divisor. | 5604 // Check for 0 divisor. |
5423 __ test(right.reg(), Operand(right.reg())); | 5605 __ test(right->reg(), Operand(right->reg())); |
5424 enter()->Branch(zero, &left, &right, not_taken); | 5606 enter()->Branch(zero, left, right, not_taken); |
5425 // Divide edx:eax by ebx. | 5607 // Divide edx:eax by the right operand. |
5426 __ idiv(right.reg()); | 5608 __ idiv(right->reg()); |
5427 // Check for negative zero result. If result is zero, and divisor | 5609 if (op_ == Token::DIV) { |
5428 // is negative, return a floating point negative zero. | 5610 // Check for negative zero result. If result is zero, and divisor |
5429 // The frame is unchanged in this block, so local control flow can | 5611 // is negative, return a floating point negative zero. |
5430 // use a Label rather than a JumpTarget. | 5612 // The frame is unchanged in this block, so local control flow can |
5431 Label non_zero_result; | 5613 // use a Label rather than a JumpTarget. |
5432 __ test(left.reg(), Operand(left.reg())); | 5614 Label non_zero_result; |
5433 __ j(not_zero, &non_zero_result, taken); | 5615 __ test(left->reg(), Operand(left->reg())); |
5434 __ test(right.reg(), Operand(right.reg())); | 5616 __ j(not_zero, &non_zero_result, taken); |
5435 enter()->Branch(negative, &left, &right, not_taken); | 5617 __ test(right->reg(), Operand(right->reg())); |
5436 __ bind(&non_zero_result); | 5618 enter()->Branch(negative, left, right, not_taken); |
5437 // Check for the corner case of dividing the most negative smi | 5619 __ bind(&non_zero_result); |
5438 // by -1. We cannot use the overflow flag, since it is not set | 5620 // Check for the corner case of dividing the most negative smi |
5439 // by idiv instruction. | 5621 // by -1. We cannot use the overflow flag, since it is not set |
5440 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); | 5622 // by idiv instruction. |
5441 __ cmp(reg_eax.reg(), 0x40000000); | 5623 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); |
5442 enter()->Branch(equal, &left, &right, not_taken); | 5624 __ cmp(eax, 0x40000000); |
5443 // Check that the remainder is zero. | 5625 enter()->Branch(equal, left, right, not_taken); |
5444 __ test(reg_edx.reg(), Operand(reg_edx.reg())); | 5626 // Check that the remainder is zero. |
5445 enter()->Branch(not_zero, &left, &right, not_taken); | 5627 __ test(edx, Operand(edx)); |
5446 // Tag the result and store it in register temp. | 5628 enter()->Branch(not_zero, left, right, not_taken); |
5447 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | 5629 // Tag the result and store it in register temp. |
5448 __ lea(answer.reg(), Operand(eax, eax, times_1, kSmiTag)); | 5630 ASSERT(kSmiTagSize == times_2); // adjust code if not the case |
| 5631 __ lea(answer.reg(), Operand(eax, eax, times_1, kSmiTag)); |
| 5632 } else { |
| 5633 ASSERT(op_ == Token::MOD); |
| 5634 // Check for a negative zero result. If the result is zero, and the |
| 5635 // dividend is negative, return a floating point negative zero. |
| 5636 // The frame is unchanged in this block, so local control flow can |
| 5637 // use a Label rather than a JumpTarget. |
| 5638 Label non_zero_result; |
| 5639 __ test(edx, Operand(edx)); |
| 5640 __ j(not_zero, &non_zero_result, taken); |
| 5641 __ test(left->reg(), Operand(left->reg())); |
| 5642 enter()->Branch(negative, left, right, not_taken); |
| 5643 __ bind(&non_zero_result); |
| 5644 // The answer is in edx. |
| 5645 answer = reg_edx; |
| 5646 } |
5449 break; | 5647 break; |
5450 } | 5648 } |
5451 | |
5452 case Token::MOD: { | |
5453 // Left hand argument has been copied into answer, which is eax. | |
5454 // Sign extend eax into edx:eax. | |
5455 __ cdq(); | |
5456 // Check for 0 divisor. | |
5457 __ test(right.reg(), Operand(right.reg())); | |
5458 enter()->Branch(zero, &left, &right, not_taken); | |
5459 | |
5460 // Divide edx:eax by ebx. | |
5461 __ idiv(right.reg()); | |
5462 // Check for negative zero result. If result is zero, and divisor | |
5463 // is negative, return a floating point negative zero. | |
5464 // The frame is unchanged in this block, so local control flow can | |
5465 // use a Label rather than a JumpTarget. | |
5466 Label non_zero_result; | |
5467 __ test(reg_edx.reg(), Operand(reg_edx.reg())); | |
5468 __ j(not_zero, &non_zero_result, taken); | |
5469 __ test(left.reg(), Operand(left.reg())); | |
5470 enter()->Branch(negative, &left, &right, not_taken); | |
5471 __ bind(&non_zero_result); | |
5472 // The answer is in edx. | |
5473 answer = reg_edx; | |
5474 break; | |
5475 } | |
5476 | |
5477 case Token::BIT_OR: | 5649 case Token::BIT_OR: |
5478 __ or_(answer.reg(), Operand(right.reg())); | 5650 __ or_(answer.reg(), Operand(right->reg())); |
5479 break; | 5651 break; |
5480 | 5652 |
5481 case Token::BIT_AND: | 5653 case Token::BIT_AND: |
5482 __ and_(answer.reg(), Operand(right.reg())); | 5654 __ and_(answer.reg(), Operand(right->reg())); |
5483 break; | 5655 break; |
5484 | 5656 |
5485 case Token::BIT_XOR: | 5657 case Token::BIT_XOR: |
5486 __ xor_(answer.reg(), Operand(right.reg())); | 5658 __ xor_(answer.reg(), Operand(right->reg())); |
5487 break; | 5659 break; |
5488 | 5660 |
5489 case Token::SHL: | 5661 case Token::SHL: |
5490 case Token::SHR: | 5662 case Token::SHR: |
5491 case Token::SAR: | 5663 case Token::SAR: |
5492 // Move right into ecx. | 5664 // Move right into ecx. |
5493 // Left is in two registers already, so even if left or answer is ecx, | 5665 // Left is in two registers already, so even if left or answer is ecx, |
5494 // we can move right to it, and use the other one. | 5666 // we can move right to it, and use the other one. |
5495 // Right operand must be in register cl because x86 likes it that way. | 5667 // Right operand must be in register cl because x86 likes it that way. |
5496 if (right.reg().is(ecx)) { | 5668 if (right->reg().is(ecx)) { |
5497 // Right is already in the right place. Left may be in the | 5669 // Right is already in the right place. Left may be in the |
5498 // same register, which causes problems. Use answer instead. | 5670 // same register, which causes problems. Use answer instead. |
5499 if (left.reg().is(ecx)) { | 5671 if (left->reg().is(ecx)) { |
5500 left = answer; | 5672 *left = answer; |
5501 } | 5673 } |
5502 } else if (left.reg().is(ecx)) { | 5674 } else if (left->reg().is(ecx)) { |
5503 generator()->frame()->Spill(left.reg()); | 5675 generator()->frame()->Spill(left->reg()); |
5504 __ mov(left.reg(), Operand(right.reg())); | 5676 __ mov(left->reg(), right->reg()); |
5505 right = left; | 5677 *right = *left; |
5506 left = answer; // Use copy of left in answer as left. | 5678 *left = answer; // Use copy of left in answer as left. |
5507 } else if (answer.reg().is(ecx)) { | 5679 } else if (answer.reg().is(ecx)) { |
5508 __ mov(answer.reg(), Operand(right.reg())); | 5680 __ mov(answer.reg(), right->reg()); |
5509 right = answer; | 5681 *right = answer; |
5510 } else { | 5682 } else { |
5511 Result reg_ecx = generator()->allocator()->Allocate(ecx); | 5683 Result reg_ecx = generator()->allocator()->Allocate(ecx); |
5512 ASSERT(reg_ecx.is_valid()); | 5684 ASSERT(reg_ecx.is_valid()); |
5513 __ mov(reg_ecx.reg(), Operand(right.reg())); | 5685 __ mov(ecx, right->reg()); |
5514 right = reg_ecx; | 5686 *right = reg_ecx; |
5515 } | 5687 } |
5516 ASSERT(left.reg().is_valid()); | 5688 ASSERT(left->reg().is_valid()); |
5517 ASSERT(!left.reg().is(ecx)); | 5689 ASSERT(!left->reg().is(ecx)); |
5518 ASSERT(right.reg().is(ecx)); | 5690 ASSERT(right->reg().is(ecx)); |
5519 answer.Unuse(); // Answer may now be being used for left or right. | 5691 answer.Unuse(); // Answer may now be being used for left or right. |
5520 // We will modify left and right, which we do not do in any other | 5692 // We will modify left and right, which we do not do in any other |
5521 // binary operation. The exits to slow code need to restore the | 5693 // binary operation. The exits to slow code need to restore the |
5522 // original values of left and right, or at least values that give | 5694 // original values of left and right, or at least values that give |
5523 // the same answer. | 5695 // the same answer. |
5524 | 5696 |
5525 // We are modifying left and right. They must be spilled! | 5697 // We are modifying left and right. They must be spilled! |
5526 generator()->frame()->Spill(left.reg()); | 5698 generator()->frame()->Spill(left->reg()); |
5527 generator()->frame()->Spill(right.reg()); | 5699 generator()->frame()->Spill(right->reg()); |
5528 | 5700 |
5529 // Remove tags from operands (but keep sign). | 5701 // Remove tags from operands (but keep sign). |
5530 __ sar(left.reg(), kSmiTagSize); | 5702 __ sar(left->reg(), kSmiTagSize); |
5531 __ sar(ecx, kSmiTagSize); | 5703 __ sar(ecx, kSmiTagSize); |
5532 // Perform the operation. | 5704 // Perform the operation. |
5533 switch (op_) { | 5705 switch (op_) { |
5534 case Token::SAR: | 5706 case Token::SAR: |
5535 __ sar(left.reg()); | 5707 __ sar(left->reg()); |
5536 // No checks of result necessary | 5708 // No checks of result necessary |
5537 break; | 5709 break; |
5538 case Token::SHR: { | 5710 case Token::SHR: { |
5539 __ shr(left.reg()); | 5711 __ shr(left->reg()); |
5540 // Check that the *unsigned* result fits in a smi. | 5712 // Check that the *unsigned* result fits in a smi. |
5541 // Neither of the two high-order bits can be set: | 5713 // Neither of the two high-order bits can be set: |
5542 // - 0x80000000: high bit would be lost when smi tagging. | 5714 // - 0x80000000: high bit would be lost when smi tagging. |
5543 // - 0x40000000: this number would convert to negative when | 5715 // - 0x40000000: this number would convert to negative when |
5544 // Smi tagging these two cases can only happen with shifts | 5716 // Smi tagging these two cases can only happen with shifts |
5545 // by 0 or 1 when handed a valid smi. | 5717 // by 0 or 1 when handed a valid smi. |
5546 // If the answer cannot be represented by a SMI, restore | 5718 // If the answer cannot be represented by a SMI, restore |
5547 // the left and right arguments, and jump to slow case. | 5719 // the left and right arguments, and jump to slow case. |
5548 // The low bit of the left argument may be lost, but only | 5720 // The low bit of the left argument may be lost, but only |
5549 // in a case where it is dropped anyway. | 5721 // in a case where it is dropped anyway. |
5550 JumpTarget result_ok(generator()); | 5722 JumpTarget result_ok(generator()); |
5551 __ test(left.reg(), Immediate(0xc0000000)); | 5723 __ test(left->reg(), Immediate(0xc0000000)); |
5552 result_ok.Branch(zero, &left, &right, taken); | 5724 result_ok.Branch(zero, left, taken); |
5553 __ shl(left.reg()); | 5725 __ shl(left->reg()); |
5554 ASSERT(kSmiTag == 0); | 5726 ASSERT(kSmiTag == 0); |
5555 __ shl(left.reg(), kSmiTagSize); | 5727 __ shl(left->reg(), kSmiTagSize); |
5556 __ shl(right.reg(), kSmiTagSize); | 5728 __ shl(right->reg(), kSmiTagSize); |
5557 enter()->Jump(&left, &right); | 5729 enter()->Jump(left, right); |
5558 result_ok.Bind(&left, &right); | 5730 result_ok.Bind(left); |
5559 break; | 5731 break; |
5560 } | 5732 } |
5561 case Token::SHL: { | 5733 case Token::SHL: { |
5562 __ shl(left.reg()); | 5734 __ shl(left->reg()); |
5563 // Check that the *signed* result fits in a smi. | 5735 // Check that the *signed* result fits in a smi. |
5564 // | 5736 // |
5565 // TODO(207): Can reduce registers from 4 to 3 by | 5737 // TODO(207): Can reduce registers from 4 to 3 by |
5566 // preallocating ecx. | 5738 // preallocating ecx. |
5567 JumpTarget result_ok(generator()); | 5739 JumpTarget result_ok(generator()); |
5568 Result smi_test_reg = generator()->allocator()->Allocate(); | 5740 Result smi_test_reg = generator()->allocator()->Allocate(); |
5569 ASSERT(smi_test_reg.is_valid()); | 5741 ASSERT(smi_test_reg.is_valid()); |
5570 __ lea(smi_test_reg.reg(), Operand(left.reg(), 0x40000000)); | 5742 __ lea(smi_test_reg.reg(), Operand(left->reg(), 0x40000000)); |
5571 __ test(smi_test_reg.reg(), Immediate(0x80000000)); | 5743 __ test(smi_test_reg.reg(), Immediate(0x80000000)); |
5572 smi_test_reg.Unuse(); | 5744 smi_test_reg.Unuse(); |
5573 result_ok.Branch(zero, &left, &right, taken); | 5745 result_ok.Branch(zero, left, taken); |
5574 __ shr(left.reg()); | 5746 __ shr(left->reg()); |
5575 ASSERT(kSmiTag == 0); | 5747 ASSERT(kSmiTag == 0); |
5576 __ shl(left.reg(), kSmiTagSize); | 5748 __ shl(left->reg(), kSmiTagSize); |
5577 __ shl(right.reg(), kSmiTagSize); | 5749 __ shl(right->reg(), kSmiTagSize); |
5578 enter()->Jump(&left, &right); | 5750 enter()->Jump(left, right); |
5579 result_ok.Bind(&left, &right); | 5751 result_ok.Bind(left); |
5580 break; | 5752 break; |
5581 } | 5753 } |
5582 default: | 5754 default: |
5583 UNREACHABLE(); | 5755 UNREACHABLE(); |
5584 } | 5756 } |
5585 // Smi-tag the result, in left, and make answer an alias for left. | 5757 // Smi-tag the result, in left, and make answer an alias for left-> |
5586 answer = left; | 5758 answer = *left; |
5587 answer.ToRegister(); | 5759 answer.ToRegister(); |
5588 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | 5760 ASSERT(kSmiTagSize == times_2); // adjust code if not the case |
5589 __ lea(answer.reg(), | 5761 __ lea(answer.reg(), |
5590 Operand(answer.reg(), answer.reg(), times_1, kSmiTag)); | 5762 Operand(answer.reg(), answer.reg(), times_1, kSmiTag)); |
5591 break; | 5763 break; |
5592 | 5764 |
5593 default: | 5765 default: |
5594 UNREACHABLE(); | 5766 UNREACHABLE(); |
5595 break; | 5767 break; |
5596 } | 5768 } |
| 5769 left->Unuse(); |
| 5770 right->Unuse(); |
5597 return answer; | 5771 return answer; |
5598 } | 5772 } |
5599 | 5773 |
5600 | 5774 |
5601 #undef __ | 5775 #undef __ |
5602 #define __ masm-> | 5776 #define __ masm-> |
5603 | 5777 |
5604 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { | 5778 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { |
5605 // Perform fast-case smi code for the operation (eax <op> ebx) and | 5779 // Perform fast-case smi code for the operation (eax <op> ebx) and |
5606 // leave result in register eax. | 5780 // leave result in register eax. |
(...skipping 1154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6761 | 6935 |
6762 // Slow-case: Go through the JavaScript implementation. | 6936 // Slow-case: Go through the JavaScript implementation. |
6763 __ bind(&slow); | 6937 __ bind(&slow); |
6764 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); | 6938 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); |
6765 } | 6939 } |
6766 | 6940 |
6767 | 6941 |
6768 #undef __ | 6942 #undef __ |
6769 | 6943 |
6770 } } // namespace v8::internal | 6944 } } // namespace v8::internal |
OLD | NEW |