| OLD | NEW |
| 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 680 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 691 Register scratch); | 691 Register scratch); |
| 692 // Allocate a heap number in new space with undefined value. | 692 // Allocate a heap number in new space with undefined value. |
| 693 // Returns tagged pointer in eax, or jumps to need_gc if new space is full. | 693 // Returns tagged pointer in eax, or jumps to need_gc if new space is full. |
| 694 static void AllocateHeapNumber(MacroAssembler* masm, | 694 static void AllocateHeapNumber(MacroAssembler* masm, |
| 695 Label* need_gc, | 695 Label* need_gc, |
| 696 Register scratch1, | 696 Register scratch1, |
| 697 Register scratch2); | 697 Register scratch2); |
| 698 }; | 698 }; |
| 699 | 699 |
| 700 | 700 |
| 701 // Flag that indicates whether or not the code for dealing with smis | 701 // Flag that indicates whether or not the code that handles smi arguments |
| 702 // is inlined or should be dealt with in the stub. | 702 // should be inlined, placed in the stub, or omitted entirely. |
| 703 enum GenericBinaryFlags { | 703 enum GenericBinaryFlags { |
| 704 SMI_CODE_IN_STUB, | 704 SMI_CODE_IN_STUB, |
| 705 SMI_CODE_INLINED | 705 SMI_CODE_INLINED, |
| 706 // It is known at compile time that at least one argument is not a smi. |
| 707 NO_SMI_CODE |
| 706 }; | 708 }; |
| 707 | 709 |
| 708 | 710 |
| 709 class GenericBinaryOpStub: public CodeStub { | 711 class GenericBinaryOpStub: public CodeStub { |
| 710 public: | 712 public: |
| 711 GenericBinaryOpStub(Token::Value op, | 713 GenericBinaryOpStub(Token::Value op, |
| 712 OverwriteMode mode, | 714 OverwriteMode mode, |
| 713 GenericBinaryFlags flags) | 715 GenericBinaryFlags flags) |
| 714 : op_(op), mode_(mode), flags_(flags) { } | 716 : op_(op), mode_(mode), flags_(flags) { } |
| 715 | 717 |
| (...skipping 10 matching lines...) Expand all Loading... |
| 726 void Print() { | 728 void Print() { |
| 727 PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n", | 729 PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n", |
| 728 Token::String(op_), | 730 Token::String(op_), |
| 729 static_cast<int>(mode_), | 731 static_cast<int>(mode_), |
| 730 static_cast<int>(flags_)); | 732 static_cast<int>(flags_)); |
| 731 } | 733 } |
| 732 #endif | 734 #endif |
| 733 | 735 |
| 734 // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM. | 736 // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM. |
| 735 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; | 737 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; |
| 736 class OpBits: public BitField<Token::Value, 2, 13> {}; | 738 class OpBits: public BitField<Token::Value, 2, 12> {}; |
| 737 class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {}; | 739 class FlagBits: public BitField<GenericBinaryFlags, 14, 2> {}; |
| 738 | 740 |
| 739 Major MajorKey() { return GenericBinaryOp; } | 741 Major MajorKey() { return GenericBinaryOp; } |
| 740 int MinorKey() { | 742 int MinorKey() { |
| 741 // Encode the parameters in a unique 16 bit value. | 743 // Encode the parameters in a unique 16 bit value. |
| 742 return OpBits::encode(op_) | | 744 return OpBits::encode(op_) |
| 743 ModeBits::encode(mode_) | | 745 | ModeBits::encode(mode_) |
| 744 FlagBits::encode(flags_); | 746 | FlagBits::encode(flags_); |
| 745 } | 747 } |
| 746 void Generate(MacroAssembler* masm); | 748 void Generate(MacroAssembler* masm); |
| 747 }; | 749 }; |
| 748 | 750 |
| 749 | 751 |
| 750 const char* GenericBinaryOpStub::GetName() { | 752 const char* GenericBinaryOpStub::GetName() { |
| 751 switch (op_) { | 753 switch (op_) { |
| 752 case Token::ADD: return "GenericBinaryOpStub_ADD"; | 754 case Token::ADD: return "GenericBinaryOpStub_ADD"; |
| 753 case Token::SUB: return "GenericBinaryOpStub_SUB"; | 755 case Token::SUB: return "GenericBinaryOpStub_SUB"; |
| 754 case Token::MUL: return "GenericBinaryOpStub_MUL"; | 756 case Token::MUL: return "GenericBinaryOpStub_MUL"; |
| 755 case Token::DIV: return "GenericBinaryOpStub_DIV"; | 757 case Token::DIV: return "GenericBinaryOpStub_DIV"; |
| 756 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; | 758 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; |
| 757 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; | 759 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; |
| 758 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; | 760 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; |
| 759 case Token::SAR: return "GenericBinaryOpStub_SAR"; | 761 case Token::SAR: return "GenericBinaryOpStub_SAR"; |
| 760 case Token::SHL: return "GenericBinaryOpStub_SHL"; | 762 case Token::SHL: return "GenericBinaryOpStub_SHL"; |
| 761 case Token::SHR: return "GenericBinaryOpStub_SHR"; | 763 case Token::SHR: return "GenericBinaryOpStub_SHR"; |
| 762 default: return "GenericBinaryOpStub"; | 764 default: return "GenericBinaryOpStub"; |
| 763 } | 765 } |
| 764 } | 766 } |
| 765 | 767 |
| 766 | 768 |
| 769 // A deferred code class implementing binary operations on likely smis. |
| 770 // This class generates both inline code and deferred code. |
| 771 // The fastest path is implemented inline. Deferred code calls |
| 772 // the GenericBinaryOpStub stub for slow cases. |
| 767 class DeferredInlineBinaryOperation: public DeferredCode { | 773 class DeferredInlineBinaryOperation: public DeferredCode { |
| 768 public: | 774 public: |
| 769 DeferredInlineBinaryOperation(CodeGenerator* generator, | 775 DeferredInlineBinaryOperation(CodeGenerator* generator, |
| 770 Token::Value op, | 776 Token::Value op, |
| 771 OverwriteMode mode, | 777 OverwriteMode mode, |
| 772 GenericBinaryFlags flags) | 778 GenericBinaryFlags flags) |
| 773 : DeferredCode(generator), stub_(op, mode, flags), op_(op) { | 779 : DeferredCode(generator), stub_(op, mode, flags), op_(op) { |
| 774 set_comment("[ DeferredInlineBinaryOperation"); | 780 set_comment("[ DeferredInlineBinaryOperation"); |
| 775 } | 781 } |
| 776 | 782 |
| 777 Result GenerateInlineCode(); | 783 // Consumes its arguments, left and right, leaving them invalid. |
| 784 Result GenerateInlineCode(Result* left, Result* right); |
| 778 | 785 |
| 779 virtual void Generate(); | 786 virtual void Generate(); |
| 780 | 787 |
| 781 private: | 788 private: |
| 782 GenericBinaryOpStub stub_; | 789 GenericBinaryOpStub stub_; |
| 783 Token::Value op_; | 790 Token::Value op_; |
| 784 }; | 791 }; |
| 785 | 792 |
| 786 | 793 |
| 787 void DeferredInlineBinaryOperation::Generate() { | 794 void DeferredInlineBinaryOperation::Generate() { |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 825 | 832 |
| 826 default: | 833 default: |
| 827 // By default only inline the Smi check code for likely smis if this | 834 // By default only inline the Smi check code for likely smis if this |
| 828 // operation is part of a loop. | 835 // operation is part of a loop. |
| 829 flags = ((loop_nesting() > 0) && type->IsLikelySmi()) | 836 flags = ((loop_nesting() > 0) && type->IsLikelySmi()) |
| 830 ? SMI_CODE_INLINED | 837 ? SMI_CODE_INLINED |
| 831 : SMI_CODE_IN_STUB; | 838 : SMI_CODE_IN_STUB; |
| 832 break; | 839 break; |
| 833 } | 840 } |
| 834 | 841 |
| 842 Result right = frame_->Pop(); |
| 843 Result left = frame_->Pop(); |
| 844 bool left_is_smi = left.is_constant() && left.handle()->IsSmi(); |
| 845 bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi(); |
| 846 bool right_is_smi = right.is_constant() && right.handle()->IsSmi(); |
| 847 bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi(); |
| 848 |
| 849 if (left_is_smi && right_is_smi) { |
| 850 // Compute the result, and return that as a constant on the frame. |
| 851 int left_int = Smi::cast(*left.handle())->value(); |
| 852 int right_int = Smi::cast(*right.handle())->value(); |
| 853 if (FoldConstantSmis(op, left_int, right_int)) return; |
| 854 } |
| 855 |
| 856 if (left_is_non_smi || right_is_non_smi) { |
| 857 // Set flag so that we go straight to the slow case, with no smi code. |
| 858 flags = NO_SMI_CODE; |
| 859 } else if (right_is_smi) { |
| 860 ConstantSmiBinaryOperation(op, &left, right.handle(), type, |
| 861 false, overwrite_mode); |
| 862 return; |
| 863 } else if (left_is_smi) { |
| 864 ConstantSmiBinaryOperation(op, &right, left.handle(), type, |
| 865 true, overwrite_mode); |
| 866 return; |
| 867 } |
| 868 |
| 835 if (flags == SMI_CODE_INLINED) { | 869 if (flags == SMI_CODE_INLINED) { |
| 836 // Create a new deferred code for the slow-case part. | 870 LikelySmiBinaryOperation(op, &left, &right, overwrite_mode); |
| 837 DeferredInlineBinaryOperation* deferred = | |
| 838 new DeferredInlineBinaryOperation(this, op, overwrite_mode, flags); | |
| 839 // Generate the inline part of the code. | |
| 840 // The operands are on the frame. | |
| 841 Result answer = deferred->GenerateInlineCode(); | |
| 842 deferred->BindExit(&answer); | |
| 843 frame_->Push(&answer); | |
| 844 } else { | 871 } else { |
| 845 // Call the stub and push the result to the stack. | 872 frame_->Push(&left); |
| 873 frame_->Push(&right); |
| 874 // If we know the arguments aren't smis, use the binary operation stub |
| 875 // that does not check for the fast smi case. |
| 876 // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED. |
| 877 if (flags == NO_SMI_CODE) { |
| 878 flags = SMI_CODE_INLINED; |
| 879 } |
| 846 GenericBinaryOpStub stub(op, overwrite_mode, flags); | 880 GenericBinaryOpStub stub(op, overwrite_mode, flags); |
| 847 Result answer = frame_->CallStub(&stub, 2); | 881 Result answer = frame_->CallStub(&stub, 2); |
| 848 frame_->Push(&answer); | 882 frame_->Push(&answer); |
| 849 } | 883 } |
| 850 } | 884 } |
| 851 | 885 |
| 852 | 886 |
| 887 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { |
| 888 Object* answer_object = Heap::undefined_value(); |
| 889 switch (op) { |
| 890 case Token::ADD: |
| 891 if (Smi::IsValid(left + right)) { |
| 892 answer_object = Smi::FromInt(left + right); |
| 893 } |
| 894 break; |
| 895 case Token::SUB: |
| 896 if (Smi::IsValid(left - right)) { |
| 897 answer_object = Smi::FromInt(left - right); |
| 898 } |
| 899 break; |
| 900 case Token::MUL: { |
| 901 double answer = static_cast<double>(left) * right; |
| 902 if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) { |
| 903 // If the product is zero and the non-zero factor is negative, |
| 904 // the spec requires us to return floating point negative zero. |
| 905 if (answer != 0 || (left >= 0 && right >= 0)) { |
| 906 answer_object = Smi::FromInt(static_cast<int>(answer)); |
| 907 } |
| 908 } |
| 909 } |
| 910 break; |
| 911 case Token::DIV: |
| 912 case Token::MOD: |
| 913 break; |
| 914 case Token::BIT_OR: |
| 915 answer_object = Smi::FromInt(left | right); |
| 916 break; |
| 917 case Token::BIT_AND: |
| 918 answer_object = Smi::FromInt(left & right); |
| 919 break; |
| 920 case Token::BIT_XOR: |
| 921 answer_object = Smi::FromInt(left ^ right); |
| 922 break; |
| 923 |
| 924 case Token::SHL: { |
| 925 int shift_amount = right & 0x1F; |
| 926 if (Smi::IsValid(left << shift_amount)) { |
| 927 answer_object = Smi::FromInt(left << shift_amount); |
| 928 } |
| 929 break; |
| 930 } |
| 931 case Token::SHR: { |
| 932 int shift_amount = right & 0x1F; |
| 933 unsigned int unsigned_left = left; |
| 934 unsigned_left >>= shift_amount; |
| 935 if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) { |
| 936 answer_object = Smi::FromInt(unsigned_left); |
| 937 } |
| 938 break; |
| 939 } |
| 940 case Token::SAR: { |
| 941 int shift_amount = right & 0x1F; |
| 942 unsigned int unsigned_left = left; |
| 943 if (left < 0) { |
| 944 // Perform arithmetic shift of a negative number by |
| 945 // complementing number, logical shifting, complementing again. |
| 946 unsigned_left = ~unsigned_left; |
| 947 unsigned_left >>= shift_amount; |
| 948 unsigned_left = ~unsigned_left; |
| 949 } else { |
| 950 unsigned_left >>= shift_amount; |
| 951 } |
| 952 ASSERT(Smi::IsValid(unsigned_left)); // Converted to signed. |
| 953 answer_object = Smi::FromInt(unsigned_left); // Converted to signed. |
| 954 break; |
| 955 } |
| 956 default: |
| 957 UNREACHABLE(); |
| 958 break; |
| 959 } |
| 960 if (answer_object == Heap::undefined_value()) { |
| 961 return false; |
| 962 } |
| 963 frame_->Push(Handle<Object>(answer_object)); |
| 964 return true; |
| 965 } |
| 966 |
| 967 |
| 968 void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, |
| 969 Result* left, |
| 970 Result* right, |
| 971 OverwriteMode overwrite_mode) { |
| 972 // Create a new deferred code object that calls GenericBinaryOpStub |
| 973 // in the slow case. |
| 974 DeferredInlineBinaryOperation* deferred = |
| 975 new DeferredInlineBinaryOperation(this, op, overwrite_mode, |
| 976 SMI_CODE_INLINED); |
| 977 // Generate the inline code that handles some smi operations, |
| 978 // and jumps to the deferred code for everything else. |
| 979 Result answer = deferred->GenerateInlineCode(left, right); |
| 980 deferred->BindExit(&answer); |
| 981 frame_->Push(&answer); |
| 982 } |
| 983 |
| 984 |
| 853 class DeferredInlineSmiOperation: public DeferredCode { | 985 class DeferredInlineSmiOperation: public DeferredCode { |
| 854 public: | 986 public: |
| 855 DeferredInlineSmiOperation(CodeGenerator* generator, | 987 DeferredInlineSmiOperation(CodeGenerator* generator, |
| 856 Token::Value op, | 988 Token::Value op, |
| 857 Smi* value, | 989 Smi* value, |
| 858 OverwriteMode overwrite_mode) | 990 OverwriteMode overwrite_mode) |
| 859 : DeferredCode(generator), | 991 : DeferredCode(generator), |
| 860 op_(op), | 992 op_(op), |
| 861 value_(value), | 993 value_(value), |
| 862 overwrite_mode_(overwrite_mode) { | 994 overwrite_mode_(overwrite_mode) { |
| (...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1042 Result right(generator()); | 1174 Result right(generator()); |
| 1043 enter()->Bind(&right); | 1175 enter()->Bind(&right); |
| 1044 generator()->frame()->Push(value_); | 1176 generator()->frame()->Push(value_); |
| 1045 generator()->frame()->Push(&right); | 1177 generator()->frame()->Push(&right); |
| 1046 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); | 1178 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); |
| 1047 Result answer = generator()->frame()->CallStub(&igostub, 2); | 1179 Result answer = generator()->frame()->CallStub(&igostub, 2); |
| 1048 exit_.Jump(&answer); | 1180 exit_.Jump(&answer); |
| 1049 } | 1181 } |
| 1050 | 1182 |
| 1051 | 1183 |
| 1052 void CodeGenerator::SmiOperation(Token::Value op, | 1184 void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, |
| 1053 StaticType* type, | 1185 Result* operand, |
| 1054 Handle<Object> value, | 1186 Handle<Object> value, |
| 1055 bool reversed, | 1187 StaticType* type, |
| 1056 OverwriteMode overwrite_mode) { | 1188 bool reversed, |
| 1189 OverwriteMode overwrite_mode) { |
| 1057 // NOTE: This is an attempt to inline (a bit) more of the code for | 1190 // NOTE: This is an attempt to inline (a bit) more of the code for |
| 1058 // some possible smi operations (like + and -) when (at least) one | 1191 // some possible smi operations (like + and -) when (at least) one |
| 1059 // of the operands is a literal smi. With this optimization, the | 1192 // of the operands is a constant smi. |
| 1060 // performance of the system is increased by ~15%, and the generated | 1193 // Consumes the argument "operand". |
| 1061 // code size is increased by ~1% (measured on a combination of | |
| 1062 // different benchmarks). | |
| 1063 | 1194 |
| 1064 // TODO(199): Optimize some special cases of operations involving a | 1195 // TODO(199): Optimize some special cases of operations involving a |
| 1065 // smi literal (multiply by 2, shift by 0, etc.). | 1196 // smi literal (multiply by 2, shift by 0, etc.). |
| 1197 if (IsUnsafeSmi(value)) { |
| 1198 Result unsafe_operand(value, this); |
| 1199 if (reversed) { |
| 1200 LikelySmiBinaryOperation(op, &unsafe_operand, operand, |
| 1201 overwrite_mode); |
| 1202 } else { |
| 1203 LikelySmiBinaryOperation(op, operand, &unsafe_operand, |
| 1204 overwrite_mode); |
| 1205 } |
| 1206 ASSERT(!operand->is_valid()); |
| 1207 return; |
| 1208 } |
| 1066 | 1209 |
| 1067 // Get the literal value. | 1210 // Get the literal value. |
| 1068 Smi* smi_value = Smi::cast(*value); | 1211 Smi* smi_value = Smi::cast(*value); |
| 1069 int int_value = smi_value->value(); | 1212 int int_value = smi_value->value(); |
| 1070 ASSERT(is_intn(int_value, kMaxSmiInlinedBits)); | |
| 1071 | 1213 |
| 1072 switch (op) { | 1214 switch (op) { |
| 1073 case Token::ADD: { | 1215 case Token::ADD: { |
| 1074 DeferredCode* deferred = NULL; | 1216 DeferredCode* deferred = NULL; |
| 1075 if (!reversed) { | 1217 if (reversed) { |
| 1076 deferred = new DeferredInlineSmiAdd(this, smi_value, overwrite_mode); | |
| 1077 } else { | |
| 1078 deferred = new DeferredInlineSmiAddReversed(this, smi_value, | 1218 deferred = new DeferredInlineSmiAddReversed(this, smi_value, |
| 1079 overwrite_mode); | 1219 overwrite_mode); |
| 1220 } else { |
| 1221 deferred = new DeferredInlineSmiAdd(this, smi_value, overwrite_mode); |
| 1080 } | 1222 } |
| 1081 Result operand = frame_->Pop(); | 1223 operand->ToRegister(); |
| 1082 operand.ToRegister(); | 1224 frame_->Spill(operand->reg()); |
| 1083 frame_->Spill(operand.reg()); | 1225 __ add(Operand(operand->reg()), Immediate(value)); |
| 1084 __ add(Operand(operand.reg()), Immediate(value)); | 1226 deferred->enter()->Branch(overflow, operand, not_taken); |
| 1085 deferred->enter()->Branch(overflow, &operand, not_taken); | 1227 __ test(operand->reg(), Immediate(kSmiTagMask)); |
| 1086 __ test(operand.reg(), Immediate(kSmiTagMask)); | 1228 deferred->enter()->Branch(not_zero, operand, not_taken); |
| 1087 deferred->enter()->Branch(not_zero, &operand, not_taken); | 1229 deferred->BindExit(operand); |
| 1088 deferred->BindExit(&operand); | 1230 frame_->Push(operand); |
| 1089 frame_->Push(&operand); | |
| 1090 break; | 1231 break; |
| 1091 } | 1232 } |
| 1092 | 1233 |
| 1093 case Token::SUB: { | 1234 case Token::SUB: { |
| 1094 DeferredCode* deferred = NULL; | 1235 DeferredCode* deferred = NULL; |
| 1095 Result operand = frame_->Pop(); | |
| 1096 Result answer(this); // Only allocated a new register if reversed. | 1236 Result answer(this); // Only allocated a new register if reversed. |
| 1097 if (!reversed) { | 1237 if (reversed) { |
| 1098 operand.ToRegister(); | |
| 1099 frame_->Spill(operand.reg()); | |
| 1100 deferred = new DeferredInlineSmiSub(this, | |
| 1101 smi_value, | |
| 1102 overwrite_mode); | |
| 1103 __ sub(Operand(operand.reg()), Immediate(value)); | |
| 1104 answer = operand; | |
| 1105 } else { | |
| 1106 answer = allocator()->Allocate(); | 1238 answer = allocator()->Allocate(); |
| 1107 ASSERT(answer.is_valid()); | 1239 ASSERT(answer.is_valid()); |
| 1108 deferred = new DeferredInlineSmiSubReversed(this, | 1240 deferred = new DeferredInlineSmiSubReversed(this, |
| 1109 smi_value, | 1241 smi_value, |
| 1110 overwrite_mode); | 1242 overwrite_mode); |
| 1111 __ mov(answer.reg(), Immediate(value)); | 1243 __ Set(answer.reg(), Immediate(value)); |
| 1112 if (operand.is_register()) { | 1244 if (operand->is_register()) { |
| 1113 __ sub(answer.reg(), Operand(operand.reg())); | 1245 __ sub(answer.reg(), Operand(operand->reg())); |
| 1114 } else { | 1246 } else { |
| 1115 ASSERT(operand.is_constant()); | 1247 ASSERT(operand->is_constant()); |
| 1116 __ sub(Operand(answer.reg()), Immediate(operand.handle())); | 1248 __ sub(Operand(answer.reg()), Immediate(operand->handle())); |
| 1117 } | 1249 } |
| 1250 } else { |
| 1251 operand->ToRegister(); |
| 1252 frame_->Spill(operand->reg()); |
| 1253 deferred = new DeferredInlineSmiSub(this, |
| 1254 smi_value, |
| 1255 overwrite_mode); |
| 1256 __ sub(Operand(operand->reg()), Immediate(value)); |
| 1257 answer = *operand; |
| 1118 } | 1258 } |
| 1119 deferred->enter()->Branch(overflow, &operand, not_taken); | 1259 deferred->enter()->Branch(overflow, operand, not_taken); |
| 1120 __ test(answer.reg(), Immediate(kSmiTagMask)); | 1260 __ test(answer.reg(), Immediate(kSmiTagMask)); |
| 1121 deferred->enter()->Branch(not_zero, &operand, not_taken); | 1261 deferred->enter()->Branch(not_zero, operand, not_taken); |
| 1122 operand.Unuse(); | 1262 operand->Unuse(); |
| 1123 deferred->BindExit(&answer); | 1263 deferred->BindExit(&answer); |
| 1124 frame_->Push(&answer); | 1264 frame_->Push(&answer); |
| 1125 break; | 1265 break; |
| 1126 } | 1266 } |
| 1127 | 1267 |
| 1128 case Token::SAR: { | 1268 case Token::SAR: { |
| 1129 if (reversed) { | 1269 if (reversed) { |
| 1130 Result top = frame_->Pop(); | 1270 Result constant_operand(value, this); |
| 1131 frame_->Push(value); | 1271 LikelySmiBinaryOperation(op, &constant_operand, operand, |
| 1132 frame_->Push(&top); | 1272 overwrite_mode); |
| 1133 GenericBinaryOperation(op, type, overwrite_mode); | |
| 1134 } else { | 1273 } else { |
| 1135 // Only the least significant 5 bits of the shift value are used. | 1274 // Only the least significant 5 bits of the shift value are used. |
| 1136 // In the slow case, this masking is done inside the runtime call. | 1275 // In the slow case, this masking is done inside the runtime call. |
| 1137 int shift_value = int_value & 0x1f; | 1276 int shift_value = int_value & 0x1f; |
| 1138 DeferredCode* deferred = | 1277 DeferredCode* deferred = |
| 1139 new DeferredInlineSmiOperation(this, Token::SAR, smi_value, | 1278 new DeferredInlineSmiOperation(this, Token::SAR, smi_value, |
| 1140 overwrite_mode); | 1279 overwrite_mode); |
| 1141 Result result = frame_->Pop(); | 1280 operand->ToRegister(); |
| 1142 result.ToRegister(); | 1281 __ test(operand->reg(), Immediate(kSmiTagMask)); |
| 1143 __ test(result.reg(), Immediate(kSmiTagMask)); | 1282 deferred->enter()->Branch(not_zero, operand, not_taken); |
| 1144 deferred->enter()->Branch(not_zero, &result, not_taken); | 1283 if (shift_value > 0) { |
| 1145 frame_->Spill(result.reg()); | 1284 frame_->Spill(operand->reg()); |
| 1146 __ sar(result.reg(), shift_value); | 1285 __ sar(operand->reg(), shift_value); |
| 1147 __ and_(result.reg(), ~kSmiTagMask); | 1286 __ and_(operand->reg(), ~kSmiTagMask); |
| 1148 deferred->BindExit(&result); | 1287 } |
| 1149 frame_->Push(&result); | 1288 deferred->BindExit(operand); |
| 1289 frame_->Push(operand); |
| 1150 } | 1290 } |
| 1151 break; | 1291 break; |
| 1152 } | 1292 } |
| 1153 | 1293 |
| 1154 case Token::SHR: { | 1294 case Token::SHR: { |
| 1155 if (reversed) { | 1295 if (reversed) { |
| 1156 Result top = frame_->Pop(); | 1296 Result constant_operand(value, this); |
| 1157 frame_->Push(value); | 1297 LikelySmiBinaryOperation(op, &constant_operand, operand, |
| 1158 frame_->Push(&top); | 1298 overwrite_mode); |
| 1159 GenericBinaryOperation(op, type, overwrite_mode); | |
| 1160 } else { | 1299 } else { |
| 1161 // Only the least significant 5 bits of the shift value are used. | 1300 // Only the least significant 5 bits of the shift value are used. |
| 1162 // In the slow case, this masking is done inside the runtime call. | 1301 // In the slow case, this masking is done inside the runtime call. |
| 1163 int shift_value = int_value & 0x1f; | 1302 int shift_value = int_value & 0x1f; |
| 1164 DeferredCode* deferred = | 1303 DeferredCode* deferred = |
| 1165 new DeferredInlineSmiOperation(this, Token::SHR, smi_value, | 1304 new DeferredInlineSmiOperation(this, Token::SHR, smi_value, |
| 1166 overwrite_mode); | 1305 overwrite_mode); |
| 1167 Result operand = frame_->Pop(); | 1306 operand->ToRegister(); |
| 1168 operand.ToRegister(); | 1307 __ test(operand->reg(), Immediate(kSmiTagMask)); |
| 1169 __ test(operand.reg(), Immediate(kSmiTagMask)); | 1308 deferred->enter()->Branch(not_zero, operand, not_taken); |
| 1170 deferred->enter()->Branch(not_zero, &operand, not_taken); | |
| 1171 Result answer = allocator()->Allocate(); | 1309 Result answer = allocator()->Allocate(); |
| 1172 ASSERT(answer.is_valid()); | 1310 ASSERT(answer.is_valid()); |
| 1173 __ mov(answer.reg(), Operand(operand.reg())); | 1311 __ mov(answer.reg(), operand->reg()); |
| 1174 __ sar(answer.reg(), kSmiTagSize); | 1312 __ sar(answer.reg(), kSmiTagSize); |
| 1175 __ shr(answer.reg(), shift_value); | 1313 __ shr(answer.reg(), shift_value); |
| 1176 // A negative Smi shifted right two is in the positive Smi range. | 1314 // A negative Smi shifted right two is in the positive Smi range. |
| 1177 if (shift_value < 2) { | 1315 if (shift_value < 2) { |
| 1178 __ test(answer.reg(), Immediate(0xc0000000)); | 1316 __ test(answer.reg(), Immediate(0xc0000000)); |
| 1179 deferred->enter()->Branch(not_zero, &operand, not_taken); | 1317 deferred->enter()->Branch(not_zero, operand, not_taken); |
| 1180 } | 1318 } |
| 1181 operand.Unuse(); | 1319 operand->Unuse(); |
| 1182 ASSERT(kSmiTagSize == times_2); // Adjust the code if not true. | 1320 ASSERT(kSmiTagSize == times_2); // Adjust the code if not true. |
| 1183 __ lea(answer.reg(), | 1321 __ lea(answer.reg(), |
| 1184 Operand(answer.reg(), answer.reg(), times_1, kSmiTag)); | 1322 Operand(answer.reg(), answer.reg(), times_1, kSmiTag)); |
| 1185 deferred->BindExit(&answer); | 1323 deferred->BindExit(&answer); |
| 1186 frame_->Push(&answer); | 1324 frame_->Push(&answer); |
| 1187 } | 1325 } |
| 1188 break; | 1326 break; |
| 1189 } | 1327 } |
| 1190 | 1328 |
| 1191 case Token::SHL: { | 1329 case Token::SHL: { |
| 1192 if (reversed) { | 1330 if (reversed) { |
| 1193 Result top = frame_->Pop(); | 1331 Result constant_operand(value, this); |
| 1194 frame_->Push(value); | 1332 LikelySmiBinaryOperation(op, &constant_operand, operand, |
| 1195 frame_->Push(&top); | 1333 overwrite_mode); |
| 1196 GenericBinaryOperation(op, type, overwrite_mode); | |
| 1197 } else { | 1334 } else { |
| 1198 // Only the least significant 5 bits of the shift value are used. | 1335 // Only the least significant 5 bits of the shift value are used. |
| 1199 // In the slow case, this masking is done inside the runtime call. | 1336 // In the slow case, this masking is done inside the runtime call. |
| 1200 int shift_value = int_value & 0x1f; | 1337 int shift_value = int_value & 0x1f; |
| 1201 DeferredCode* deferred = | 1338 DeferredCode* deferred = |
| 1202 new DeferredInlineSmiOperation(this, Token::SHL, smi_value, | 1339 new DeferredInlineSmiOperation(this, Token::SHL, smi_value, |
| 1203 overwrite_mode); | 1340 overwrite_mode); |
| 1204 Result operand = frame_->Pop(); | 1341 operand->ToRegister(); |
| 1205 operand.ToRegister(); | 1342 __ test(operand->reg(), Immediate(kSmiTagMask)); |
| 1206 __ test(operand.reg(), Immediate(kSmiTagMask)); | 1343 deferred->enter()->Branch(not_zero, operand, not_taken); |
| 1207 deferred->enter()->Branch(not_zero, &operand, not_taken); | |
| 1208 Result answer = allocator()->Allocate(); | 1344 Result answer = allocator()->Allocate(); |
| 1209 ASSERT(answer.is_valid()); | 1345 ASSERT(answer.is_valid()); |
| 1210 __ mov(answer.reg(), Operand(operand.reg())); | 1346 __ mov(answer.reg(), operand->reg()); |
| 1211 ASSERT(kSmiTag == 0); // adjust code if not the case | 1347 ASSERT(kSmiTag == 0); // adjust code if not the case |
| 1212 // We do no shifts, only the Smi conversion, if shift_value is 1. | 1348 // We do no shifts, only the Smi conversion, if shift_value is 1. |
| 1213 if (shift_value == 0) { | 1349 if (shift_value == 0) { |
| 1214 __ sar(answer.reg(), kSmiTagSize); | 1350 __ sar(answer.reg(), kSmiTagSize); |
| 1215 } else if (shift_value > 1) { | 1351 } else if (shift_value > 1) { |
| 1216 __ shl(answer.reg(), shift_value - 1); | 1352 __ shl(answer.reg(), shift_value - 1); |
| 1217 } | 1353 } |
| 1218 // Convert int result to Smi, checking that it is in int range. | 1354 // Convert int result to Smi, checking that it is in int range. |
| 1219 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | 1355 ASSERT(kSmiTagSize == times_2); // adjust code if not the case |
| 1220 __ add(answer.reg(), Operand(answer.reg())); | 1356 __ add(answer.reg(), Operand(answer.reg())); |
| 1221 deferred->enter()->Branch(overflow, &operand, not_taken); | 1357 deferred->enter()->Branch(overflow, operand, not_taken); |
| 1222 operand.Unuse(); | 1358 operand->Unuse(); |
| 1223 deferred->BindExit(&answer); | 1359 deferred->BindExit(&answer); |
| 1224 frame_->Push(&answer); | 1360 frame_->Push(&answer); |
| 1225 } | 1361 } |
| 1226 break; | 1362 break; |
| 1227 } | 1363 } |
| 1228 | 1364 |
| 1229 case Token::BIT_OR: | 1365 case Token::BIT_OR: |
| 1230 case Token::BIT_XOR: | 1366 case Token::BIT_XOR: |
| 1231 case Token::BIT_AND: { | 1367 case Token::BIT_AND: { |
| 1232 DeferredCode* deferred = NULL; | 1368 DeferredCode* deferred = NULL; |
| 1233 if (!reversed) { | 1369 if (reversed) { |
| 1370 deferred = new DeferredInlineSmiOperationReversed(this, op, smi_value, |
| 1371 overwrite_mode); |
| 1372 } else { |
| 1234 deferred = new DeferredInlineSmiOperation(this, op, smi_value, | 1373 deferred = new DeferredInlineSmiOperation(this, op, smi_value, |
| 1235 overwrite_mode); | 1374 overwrite_mode); |
| 1236 } else { | |
| 1237 deferred = new DeferredInlineSmiOperationReversed(this, op, smi_value, | |
| 1238 overwrite_mode); | |
| 1239 } | 1375 } |
| 1240 Result operand = frame_->Pop(); | 1376 operand->ToRegister(); |
| 1241 operand.ToRegister(); | 1377 __ test(operand->reg(), Immediate(kSmiTagMask)); |
| 1242 __ test(operand.reg(), Immediate(kSmiTagMask)); | 1378 deferred->enter()->Branch(not_zero, operand, not_taken); |
| 1243 deferred->enter()->Branch(not_zero, &operand, not_taken); | 1379 frame_->Spill(operand->reg()); |
| 1244 frame_->Spill(operand.reg()); | |
| 1245 if (op == Token::BIT_AND) { | 1380 if (op == Token::BIT_AND) { |
| 1246 if (int_value == 0) { | 1381 if (int_value == 0) { |
| 1247 __ xor_(Operand(operand.reg()), operand.reg()); | 1382 __ xor_(Operand(operand->reg()), operand->reg()); |
| 1248 } else { | 1383 } else { |
| 1249 __ and_(Operand(operand.reg()), Immediate(value)); | 1384 __ and_(Operand(operand->reg()), Immediate(value)); |
| 1250 } | 1385 } |
| 1251 } else if (op == Token::BIT_XOR) { | 1386 } else if (op == Token::BIT_XOR) { |
| 1252 if (int_value != 0) { | 1387 if (int_value != 0) { |
| 1253 __ xor_(Operand(operand.reg()), Immediate(value)); | 1388 __ xor_(Operand(operand->reg()), Immediate(value)); |
| 1254 } | 1389 } |
| 1255 } else { | 1390 } else { |
| 1256 ASSERT(op == Token::BIT_OR); | 1391 ASSERT(op == Token::BIT_OR); |
| 1257 if (int_value != 0) { | 1392 if (int_value != 0) { |
| 1258 __ or_(Operand(operand.reg()), Immediate(value)); | 1393 __ or_(Operand(operand->reg()), Immediate(value)); |
| 1259 } | 1394 } |
| 1260 } | 1395 } |
| 1261 deferred->BindExit(&operand); | 1396 deferred->BindExit(operand); |
| 1262 frame_->Push(&operand); | 1397 frame_->Push(operand); |
| 1263 break; | 1398 break; |
| 1264 } | 1399 } |
| 1265 | 1400 |
| 1266 default: { | 1401 default: { |
| 1267 if (!reversed) { | 1402 Result constant_operand(value, this); |
| 1268 frame_->Push(value); | 1403 if (reversed) { |
| 1404 LikelySmiBinaryOperation(op, &constant_operand, operand, |
| 1405 overwrite_mode); |
| 1269 } else { | 1406 } else { |
| 1270 Result top = frame_->Pop(); | 1407 LikelySmiBinaryOperation(op, operand, &constant_operand, |
| 1271 frame_->Push(value); | 1408 overwrite_mode); |
| 1272 frame_->Push(&top); | |
| 1273 } | 1409 } |
| 1274 GenericBinaryOperation(op, type, overwrite_mode); | |
| 1275 break; | 1410 break; |
| 1276 } | 1411 } |
| 1277 } | 1412 } |
| 1413 ASSERT(!operand->is_valid()); |
| 1278 } | 1414 } |
| 1279 | 1415 |
| 1280 | 1416 |
| 1281 class CompareStub: public CodeStub { | 1417 class CompareStub: public CodeStub { |
| 1282 public: | 1418 public: |
| 1283 CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { } | 1419 CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { } |
| 1284 | 1420 |
| 1285 void Generate(MacroAssembler* masm); | 1421 void Generate(MacroAssembler* masm); |
| 1286 | 1422 |
| 1287 private: | 1423 private: |
| (...skipping 2436 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3724 // assign the exception value to the catch variable. | 3860 // assign the exception value to the catch variable. |
| 3725 Comment cmnt(masm_, "[ CatchExtensionObject"); | 3861 Comment cmnt(masm_, "[ CatchExtensionObject"); |
| 3726 Load(node->key()); | 3862 Load(node->key()); |
| 3727 Load(node->value()); | 3863 Load(node->value()); |
| 3728 Result result = | 3864 Result result = |
| 3729 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2); | 3865 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2); |
| 3730 frame_->Push(&result); | 3866 frame_->Push(&result); |
| 3731 } | 3867 } |
| 3732 | 3868 |
| 3733 | 3869 |
| 3734 bool CodeGenerator::IsInlineSmi(Literal* literal) { | |
| 3735 if (literal == NULL || !literal->handle()->IsSmi()) return false; | |
| 3736 int int_value = Smi::cast(*literal->handle())->value(); | |
| 3737 return is_intn(int_value, kMaxSmiInlinedBits); | |
| 3738 } | |
| 3739 | |
| 3740 | |
| 3741 void CodeGenerator::VisitAssignment(Assignment* node) { | 3870 void CodeGenerator::VisitAssignment(Assignment* node) { |
| 3742 Comment cmnt(masm_, "[ Assignment"); | 3871 Comment cmnt(masm_, "[ Assignment"); |
| 3743 CodeForStatementPosition(node); | 3872 CodeForStatementPosition(node); |
| 3744 | 3873 |
| 3745 { Reference target(this, node->target()); | 3874 { Reference target(this, node->target()); |
| 3746 if (target.is_illegal()) { | 3875 if (target.is_illegal()) { |
| 3747 // Fool the virtual frame into thinking that we left the assignment's | 3876 // Fool the virtual frame into thinking that we left the assignment's |
| 3748 // value on the frame. | 3877 // value on the frame. |
| 3749 frame_->Push(Smi::FromInt(0)); | 3878 frame_->Push(Smi::FromInt(0)); |
| 3750 return; | 3879 return; |
| (...skipping 24 matching lines...) Expand all Loading... |
| 3775 // There are two cases where the target is not read in the right hand | 3904 // There are two cases where the target is not read in the right hand |
| 3776 // side, that are easy to test for: the right hand side is a literal, | 3905 // side, that are easy to test for: the right hand side is a literal, |
| 3777 // or the right hand side is a different variable. TakeValue invalidates | 3906 // or the right hand side is a different variable. TakeValue invalidates |
| 3778 // the target, with an implicit promise that it will be written to again | 3907 // the target, with an implicit promise that it will be written to again |
| 3779 // before it is read. | 3908 // before it is read. |
| 3780 if (literal != NULL || (right_var != NULL && right_var != var)) { | 3909 if (literal != NULL || (right_var != NULL && right_var != var)) { |
| 3781 target.TakeValue(NOT_INSIDE_TYPEOF); | 3910 target.TakeValue(NOT_INSIDE_TYPEOF); |
| 3782 } else { | 3911 } else { |
| 3783 target.GetValue(NOT_INSIDE_TYPEOF); | 3912 target.GetValue(NOT_INSIDE_TYPEOF); |
| 3784 } | 3913 } |
| 3785 if (IsInlineSmi(literal)) { | 3914 Load(node->value()); |
| 3786 SmiOperation(node->binary_op(), node->type(), literal->handle(), false, | 3915 GenericBinaryOperation(node->binary_op(), node->type()); |
| 3787 NO_OVERWRITE); | |
| 3788 } else { | |
| 3789 Load(node->value()); | |
| 3790 GenericBinaryOperation(node->binary_op(), node->type()); | |
| 3791 } | |
| 3792 } | 3916 } |
| 3793 | 3917 |
| 3794 if (var != NULL && | 3918 if (var != NULL && |
| 3795 var->mode() == Variable::CONST && | 3919 var->mode() == Variable::CONST && |
| 3796 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) { | 3920 node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) { |
| 3797 // Assignment ignored - leave the value on the stack. | 3921 // Assignment ignored - leave the value on the stack. |
| 3798 } else { | 3922 } else { |
| 3799 CodeForSourcePosition(node->position()); | 3923 CodeForSourcePosition(node->position()); |
| 3800 if (node->op() == Token::INIT_CONST) { | 3924 if (node->op() == Token::INIT_CONST) { |
| 3801 // Dynamic constant initializations must use the function context | 3925 // Dynamic constant initializations must use the function context |
| (...skipping 1110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4912 // never return a constant/immutable object. | 5036 // never return a constant/immutable object. |
| 4913 OverwriteMode overwrite_mode = NO_OVERWRITE; | 5037 OverwriteMode overwrite_mode = NO_OVERWRITE; |
| 4914 if (node->left()->AsBinaryOperation() != NULL && | 5038 if (node->left()->AsBinaryOperation() != NULL && |
| 4915 node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) { | 5039 node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) { |
| 4916 overwrite_mode = OVERWRITE_LEFT; | 5040 overwrite_mode = OVERWRITE_LEFT; |
| 4917 } else if (node->right()->AsBinaryOperation() != NULL && | 5041 } else if (node->right()->AsBinaryOperation() != NULL && |
| 4918 node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) { | 5042 node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) { |
| 4919 overwrite_mode = OVERWRITE_RIGHT; | 5043 overwrite_mode = OVERWRITE_RIGHT; |
| 4920 } | 5044 } |
| 4921 | 5045 |
| 4922 // Optimize for the case where (at least) one of the expressions | 5046 Load(node->left()); |
| 4923 // is a literal small integer. | 5047 Load(node->right()); |
| 4924 Literal* lliteral = node->left()->AsLiteral(); | 5048 GenericBinaryOperation(node->op(), node->type(), overwrite_mode); |
| 4925 Literal* rliteral = node->right()->AsLiteral(); | |
| 4926 | |
| 4927 if (IsInlineSmi(rliteral)) { | |
| 4928 Load(node->left()); | |
| 4929 SmiOperation(node->op(), node->type(), rliteral->handle(), false, | |
| 4930 overwrite_mode); | |
| 4931 } else if (IsInlineSmi(lliteral)) { | |
| 4932 Load(node->right()); | |
| 4933 SmiOperation(node->op(), node->type(), lliteral->handle(), true, | |
| 4934 overwrite_mode); | |
| 4935 } else { | |
| 4936 Load(node->left()); | |
| 4937 Load(node->right()); | |
| 4938 GenericBinaryOperation(node->op(), node->type(), overwrite_mode); | |
| 4939 } | |
| 4940 } | 5049 } |
| 4941 } | 5050 } |
| 4942 | 5051 |
| 4943 | 5052 |
| 4944 void CodeGenerator::VisitThisFunction(ThisFunction* node) { | 5053 void CodeGenerator::VisitThisFunction(ThisFunction* node) { |
| 4945 frame_->PushFunction(); | 5054 frame_->PushFunction(); |
| 4946 } | 5055 } |
| 4947 | 5056 |
| 4948 | 5057 |
| 4949 class InstanceofStub: public CodeStub { | 5058 class InstanceofStub: public CodeStub { |
| (...skipping 511 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5461 __ ret(1 * kPointerSize); | 5570 __ ret(1 * kPointerSize); |
| 5462 __ bind(&false_result); | 5571 __ bind(&false_result); |
| 5463 __ mov(eax, 0); | 5572 __ mov(eax, 0); |
| 5464 __ ret(1 * kPointerSize); | 5573 __ ret(1 * kPointerSize); |
| 5465 } | 5574 } |
| 5466 | 5575 |
| 5467 | 5576 |
| 5468 #undef __ | 5577 #undef __ |
| 5469 #define __ masm_-> | 5578 #define __ masm_-> |
| 5470 | 5579 |
| 5471 Result DeferredInlineBinaryOperation::GenerateInlineCode() { | 5580 Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left, |
| 5581 Result* right) { |
| 5472 // Perform fast-case smi code for the operation (left <op> right) and | 5582 // Perform fast-case smi code for the operation (left <op> right) and |
| 5473 // returns the result in a Result. | 5583 // returns the result in a Result. |
| 5474 // If any fast-case tests fail, it jumps to the slow-case deferred code, | 5584 // If any fast-case tests fail, it jumps to the slow-case deferred code, |
| 5475 // which calls the binary operation stub, with the arguments (in registers) | 5585 // which calls the binary operation stub, with the arguments (in registers) |
| 5476 // on top of the frame. | 5586 // on top of the frame. |
| 5587 // Consumes its arguments (sets left and right to invalid and frees their |
| 5588 // registers). |
| 5477 | 5589 |
| 5478 VirtualFrame* frame = generator()->frame(); | 5590 left->ToRegister(); |
| 5479 // If operation is division or modulus, ensure | 5591 right->ToRegister(); |
| 5480 // that the special registers needed are free. | 5592 // A newly allocated register answer is used to hold the answer. |
| 5481 Result reg_eax(generator()); // Valid only if op is DIV or MOD. | 5593 // The registers containing left and right are not modified in |
| 5482 Result reg_edx(generator()); // Valid only if op is DIV or MOD. | 5594 // most cases, so they usually don't need to be spilled in the fast case. |
| 5483 if (op_ == Token::DIV || op_ == Token::MOD) { | 5595 Result answer = generator()->allocator()->Allocate(); |
| 5484 reg_eax = generator()->allocator()->Allocate(eax); | |
| 5485 ASSERT(reg_eax.is_valid()); | |
| 5486 reg_edx = generator()->allocator()->Allocate(edx); | |
| 5487 ASSERT(reg_edx.is_valid()); | |
| 5488 } | |
| 5489 | 5596 |
| 5490 Result right = frame->Pop(); | |
| 5491 Result left = frame->Pop(); | |
| 5492 left.ToRegister(); | |
| 5493 right.ToRegister(); | |
| 5494 // Answer is used to compute the answer, leaving left and right unchanged. | |
| 5495 // It is also returned from this function. | |
| 5496 // It is used as a temporary register in a few places, as well. | |
| 5497 Result answer(generator()); | |
| 5498 if (reg_eax.is_valid()) { | |
| 5499 answer = reg_eax; | |
| 5500 } else { | |
| 5501 answer = generator()->allocator()->Allocate(); | |
| 5502 } | |
| 5503 ASSERT(answer.is_valid()); | 5597 ASSERT(answer.is_valid()); |
| 5504 // Perform the smi check. | 5598 // Perform the smi check. |
| 5505 __ mov(answer.reg(), Operand(left.reg())); | 5599 __ mov(answer.reg(), left->reg()); |
| 5506 __ or_(answer.reg(), Operand(right.reg())); | 5600 __ or_(answer.reg(), Operand(right->reg())); |
| 5507 ASSERT(kSmiTag == 0); // adjust zero check if not the case | 5601 ASSERT(kSmiTag == 0); // adjust zero check if not the case |
| 5508 __ test(answer.reg(), Immediate(kSmiTagMask)); | 5602 __ test(answer.reg(), Immediate(kSmiTagMask)); |
| 5509 enter()->Branch(not_zero, &left, &right, not_taken); | 5603 enter()->Branch(not_zero, left, right, not_taken); |
| 5510 | 5604 |
| 5511 // All operations start by copying the left argument into answer. | 5605 // All operations start by copying the left argument into answer. |
| 5512 __ mov(answer.reg(), Operand(left.reg())); | 5606 __ mov(answer.reg(), left->reg()); |
| 5513 switch (op_) { | 5607 switch (op_) { |
| 5514 case Token::ADD: | 5608 case Token::ADD: |
| 5515 __ add(answer.reg(), Operand(right.reg())); // add optimistically | 5609 __ add(answer.reg(), Operand(right->reg())); // add optimistically |
| 5516 enter()->Branch(overflow, &left, &right, not_taken); | 5610 enter()->Branch(overflow, left, right, not_taken); |
| 5517 break; | 5611 break; |
| 5518 | 5612 |
| 5519 case Token::SUB: | 5613 case Token::SUB: |
| 5520 __ sub(answer.reg(), Operand(right.reg())); // subtract optimistically | 5614 __ sub(answer.reg(), Operand(right->reg())); // subtract optimistically |
| 5521 enter()->Branch(overflow, &left, &right, not_taken); | 5615 enter()->Branch(overflow, left, right, not_taken); |
| 5522 break; | 5616 break; |
| 5523 | 5617 |
| 5524 | |
| 5525 case Token::MUL: { | 5618 case Token::MUL: { |
| 5526 // If the smi tag is 0 we can just leave the tag on one operand. | 5619 // If the smi tag is 0 we can just leave the tag on one operand. |
| 5527 ASSERT(kSmiTag == 0); // adjust code below if not the case | 5620 ASSERT(kSmiTag == 0); // adjust code below if not the case |
| 5528 // Remove tag from the left operand (but keep sign). | 5621 // Remove tag from the left operand (but keep sign). |
| 5529 // Left hand operand has been copied into answer. | 5622 // Left hand operand has been copied into answer. |
| 5530 __ sar(answer.reg(), kSmiTagSize); | 5623 __ sar(answer.reg(), kSmiTagSize); |
| 5531 // Do multiplication of smis, leaving result in answer. | 5624 // Do multiplication of smis, leaving result in answer. |
| 5532 __ imul(answer.reg(), Operand(right.reg())); | 5625 __ imul(answer.reg(), Operand(right->reg())); |
| 5533 // Go slow on overflows. | 5626 // Go slow on overflows. |
| 5534 enter()->Branch(overflow, &left, &right, not_taken); | 5627 enter()->Branch(overflow, left, right, not_taken); |
| 5535 // Check for negative zero result. If product is zero, | 5628 // Check for negative zero result. If product is zero, |
| 5536 // and one argument is negative, go to slow case. | 5629 // and one argument is negative, go to slow case. |
| 5537 // The frame is unchanged in this block, so local control flow can | 5630 // The frame is unchanged in this block, so local control flow can |
| 5538 // use a Label rather than a JumpTarget. | 5631 // use a Label rather than a JumpTarget. |
| 5539 Label non_zero_result; | 5632 Label non_zero_result; |
| 5540 __ test(answer.reg(), Operand(answer.reg())); | 5633 __ test(answer.reg(), Operand(answer.reg())); |
| 5541 __ j(not_zero, &non_zero_result, taken); | 5634 __ j(not_zero, &non_zero_result, taken); |
| 5542 __ mov(answer.reg(), Operand(left.reg())); | 5635 __ mov(answer.reg(), left->reg()); |
| 5543 __ or_(answer.reg(), Operand(right.reg())); | 5636 __ or_(answer.reg(), Operand(right->reg())); |
| 5544 enter()->Branch(negative, &left, &right, not_taken); | 5637 enter()->Branch(negative, left, right, not_taken); |
| 5545 __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct. | 5638 __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct. |
| 5546 __ bind(&non_zero_result); | 5639 __ bind(&non_zero_result); |
| 5547 break; | 5640 break; |
| 5548 } | 5641 } |
| 5549 | 5642 |
| 5550 case Token::DIV: { | 5643 case Token::DIV: // Fall through. |
| 5551 // Left hand argument has been copied into answer, which is eax. | 5644 case Token::MOD: { |
| 5645 // Div and mod use the registers eax and edx. Left and right must |
| 5646 // be preserved, because the original operands are needed if we switch |
| 5647 // to the slow case. Move them if either is in eax or edx. |
| 5648 // The Result answer should be changed into an alias for eax. |
| 5649 // Precondition: |
| 5650 // The Results left and right are valid. They may be the same register, |
| 5651 // and may be unspilled. The Result answer is valid and is distinct |
| 5652 // from left and right, and is spilled. |
| 5653 // The value in left is copied to answer. |
| 5654 |
| 5655 Result reg_eax = generator()->allocator()->Allocate(eax); |
| 5656 Result reg_edx = generator()->allocator()->Allocate(edx); |
| 5657 // These allocations may have failed, if one of left, right, or answer |
| 5658 // is in register eax or edx. |
| 5659 bool left_copied_to_eax = false; // We will make sure this becomes true. |
| 5660 |
| 5661 // Part 1: Get eax |
| 5662 if (answer.reg().is(eax)) { |
| 5663 reg_eax = answer; |
| 5664 left_copied_to_eax = true; |
| 5665 } else if (right->reg().is(eax) || left->reg().is(eax)) { |
| 5666 // We need a non-edx register to move one or both of left and right to. |
| 5667 // We use answer if it is not edx, otherwise we allocate one. |
| 5668 if (answer.reg().is(edx)) { |
| 5669 reg_edx = answer; |
| 5670 answer = generator()->allocator()->Allocate(); |
| 5671 ASSERT(answer.is_valid()); |
| 5672 } |
| 5673 |
| 5674 if (left->reg().is(eax)) { |
| 5675 reg_eax = *left; |
| 5676 left_copied_to_eax = true; |
| 5677 *left = answer; |
| 5678 } |
| 5679 if (right->reg().is(eax)) { |
| 5680 reg_eax = *right; |
| 5681 *right = answer; |
| 5682 } |
| 5683 __ mov(answer.reg(), eax); |
| 5684 } |
| 5685 // End of Part 1. |
| 5686 // reg_eax is valid, and neither left nor right is in eax. |
| 5687 ASSERT(reg_eax.is_valid()); |
| 5688 ASSERT(!left->reg().is(eax)); |
| 5689 ASSERT(!right->reg().is(eax)); |
| 5690 |
| 5691 // Part 2: Get edx |
| 5692 // reg_edx is invalid if and only if either left, right, |
| 5693 // or answer is in edx. If edx is valid, then either edx |
| 5694 // was free, or it was answer, but answer was reallocated. |
| 5695 if (answer.reg().is(edx)) { |
| 5696 reg_edx = answer; |
| 5697 } else if (right->reg().is(edx) || left->reg().is(edx)) { |
| 5698 // Is answer used? |
| 5699 if (answer.reg().is(eax) || answer.reg().is(left->reg()) || |
| 5700 answer.reg().is(right->reg())) { |
| 5701 answer = generator()->allocator()->Allocate(); |
| 5702 ASSERT(answer.is_valid()); // We cannot hit both Allocate() calls. |
| 5703 } |
| 5704 if (left->reg().is(edx)) { |
| 5705 reg_edx = *left; |
| 5706 *left = answer; |
| 5707 } |
| 5708 if (right->reg().is(edx)) { |
| 5709 reg_edx = *right; |
| 5710 *right = answer; |
| 5711 } |
| 5712 __ mov(answer.reg(), edx); |
| 5713 } |
| 5714 // End of Part 2 |
| 5715 ASSERT(reg_edx.is_valid()); |
| 5716 ASSERT(!left->reg().is(eax)); |
| 5717 ASSERT(!right->reg().is(eax)); |
| 5718 |
| 5719 answer = reg_eax; // May free answer, if it was never used. |
| 5720 generator()->frame()->Spill(eax); |
| 5721 if (!left_copied_to_eax) { |
| 5722 __ mov(eax, left->reg()); |
| 5723 left_copied_to_eax = true; |
| 5724 } |
| 5725 generator()->frame()->Spill(edx); |
| 5726 |
| 5727 // Postcondition: |
| 5728 // reg_eax, reg_edx are valid, correct, and spilled. |
| 5729 // reg_eax contains the value originally in left |
| 5730 // left and right are not eax or edx. They may or may not be |
| 5731 // spilled or distinct. |
| 5732 // answer is an alias for reg_eax. |
| 5733 |
| 5552 // Sign extend eax into edx:eax. | 5734 // Sign extend eax into edx:eax. |
| 5553 __ cdq(); | 5735 __ cdq(); |
| 5554 // Check for 0 divisor. | 5736 // Check for 0 divisor. |
| 5555 __ test(right.reg(), Operand(right.reg())); | 5737 __ test(right->reg(), Operand(right->reg())); |
| 5556 enter()->Branch(zero, &left, &right, not_taken); | 5738 enter()->Branch(zero, left, right, not_taken); |
| 5557 // Divide edx:eax by ebx. | 5739 // Divide edx:eax by the right operand. |
| 5558 __ idiv(right.reg()); | 5740 __ idiv(right->reg()); |
| 5559 // Check for negative zero result. If result is zero, and divisor | 5741 if (op_ == Token::DIV) { |
| 5560 // is negative, return a floating point negative zero. | 5742 // Check for negative zero result. If result is zero, and divisor |
| 5561 // The frame is unchanged in this block, so local control flow can | 5743 // is negative, return a floating point negative zero. |
| 5562 // use a Label rather than a JumpTarget. | 5744 // The frame is unchanged in this block, so local control flow can |
| 5563 Label non_zero_result; | 5745 // use a Label rather than a JumpTarget. |
| 5564 __ test(left.reg(), Operand(left.reg())); | 5746 Label non_zero_result; |
| 5565 __ j(not_zero, &non_zero_result, taken); | 5747 __ test(left->reg(), Operand(left->reg())); |
| 5566 __ test(right.reg(), Operand(right.reg())); | 5748 __ j(not_zero, &non_zero_result, taken); |
| 5567 enter()->Branch(negative, &left, &right, not_taken); | 5749 __ test(right->reg(), Operand(right->reg())); |
| 5568 __ bind(&non_zero_result); | 5750 enter()->Branch(negative, left, right, not_taken); |
| 5569 // Check for the corner case of dividing the most negative smi | 5751 __ bind(&non_zero_result); |
| 5570 // by -1. We cannot use the overflow flag, since it is not set | 5752 // Check for the corner case of dividing the most negative smi |
| 5571 // by idiv instruction. | 5753 // by -1. We cannot use the overflow flag, since it is not set |
| 5572 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); | 5754 // by idiv instruction. |
| 5573 __ cmp(reg_eax.reg(), 0x40000000); | 5755 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); |
| 5574 enter()->Branch(equal, &left, &right, not_taken); | 5756 __ cmp(eax, 0x40000000); |
| 5575 // Check that the remainder is zero. | 5757 enter()->Branch(equal, left, right, not_taken); |
| 5576 __ test(reg_edx.reg(), Operand(reg_edx.reg())); | 5758 // Check that the remainder is zero. |
| 5577 enter()->Branch(not_zero, &left, &right, not_taken); | 5759 __ test(edx, Operand(edx)); |
| 5578 // Tag the result and store it in register temp. | 5760 enter()->Branch(not_zero, left, right, not_taken); |
| 5579 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | 5761 // Tag the result and store it in register temp. |
| 5580 __ lea(answer.reg(), Operand(eax, eax, times_1, kSmiTag)); | 5762 ASSERT(kSmiTagSize == times_2); // adjust code if not the case |
| 5763 __ lea(answer.reg(), Operand(eax, eax, times_1, kSmiTag)); |
| 5764 } else { |
| 5765 ASSERT(op_ == Token::MOD); |
| 5766 // Check for a negative zero result. If the result is zero, and the |
| 5767 // dividend is negative, return a floating point negative zero. |
| 5768 // The frame is unchanged in this block, so local control flow can |
| 5769 // use a Label rather than a JumpTarget. |
| 5770 Label non_zero_result; |
| 5771 __ test(edx, Operand(edx)); |
| 5772 __ j(not_zero, &non_zero_result, taken); |
| 5773 __ test(left->reg(), Operand(left->reg())); |
| 5774 enter()->Branch(negative, left, right, not_taken); |
| 5775 __ bind(&non_zero_result); |
| 5776 // The answer is in edx. |
| 5777 answer = reg_edx; |
| 5778 } |
| 5581 break; | 5779 break; |
| 5582 } | 5780 } |
| 5583 | |
| 5584 case Token::MOD: { | |
| 5585 // Left hand argument has been copied into answer, which is eax. | |
| 5586 // Sign extend eax into edx:eax. | |
| 5587 __ cdq(); | |
| 5588 // Check for 0 divisor. | |
| 5589 __ test(right.reg(), Operand(right.reg())); | |
| 5590 enter()->Branch(zero, &left, &right, not_taken); | |
| 5591 | |
| 5592 // Divide edx:eax by ebx. | |
| 5593 __ idiv(right.reg()); | |
| 5594 // Check for negative zero result. If result is zero, and divisor | |
| 5595 // is negative, return a floating point negative zero. | |
| 5596 // The frame is unchanged in this block, so local control flow can | |
| 5597 // use a Label rather than a JumpTarget. | |
| 5598 Label non_zero_result; | |
| 5599 __ test(reg_edx.reg(), Operand(reg_edx.reg())); | |
| 5600 __ j(not_zero, &non_zero_result, taken); | |
| 5601 __ test(left.reg(), Operand(left.reg())); | |
| 5602 enter()->Branch(negative, &left, &right, not_taken); | |
| 5603 __ bind(&non_zero_result); | |
| 5604 // The answer is in edx. | |
| 5605 answer = reg_edx; | |
| 5606 break; | |
| 5607 } | |
| 5608 | |
| 5609 case Token::BIT_OR: | 5781 case Token::BIT_OR: |
| 5610 __ or_(answer.reg(), Operand(right.reg())); | 5782 __ or_(answer.reg(), Operand(right->reg())); |
| 5611 break; | 5783 break; |
| 5612 | 5784 |
| 5613 case Token::BIT_AND: | 5785 case Token::BIT_AND: |
| 5614 __ and_(answer.reg(), Operand(right.reg())); | 5786 __ and_(answer.reg(), Operand(right->reg())); |
| 5615 break; | 5787 break; |
| 5616 | 5788 |
| 5617 case Token::BIT_XOR: | 5789 case Token::BIT_XOR: |
| 5618 __ xor_(answer.reg(), Operand(right.reg())); | 5790 __ xor_(answer.reg(), Operand(right->reg())); |
| 5619 break; | 5791 break; |
| 5620 | 5792 |
| 5621 case Token::SHL: | 5793 case Token::SHL: |
| 5622 case Token::SHR: | 5794 case Token::SHR: |
| 5623 case Token::SAR: | 5795 case Token::SAR: |
| 5624 // Move right into ecx. | 5796 // Move right into ecx. |
| 5625 // Left is in two registers already, so even if left or answer is ecx, | 5797 // Left is in two registers already, so even if left or answer is ecx, |
| 5626 // we can move right to it, and use the other one. | 5798 // we can move right to it, and use the other one. |
| 5627 // Right operand must be in register cl because x86 likes it that way. | 5799 // Right operand must be in register cl because x86 likes it that way. |
| 5628 if (right.reg().is(ecx)) { | 5800 if (right->reg().is(ecx)) { |
| 5629 // Right is already in the right place. Left may be in the | 5801 // Right is already in the right place. Left may be in the |
| 5630 // same register, which causes problems. Use answer instead. | 5802 // same register, which causes problems. Use answer instead. |
| 5631 if (left.reg().is(ecx)) { | 5803 if (left->reg().is(ecx)) { |
| 5632 left = answer; | 5804 *left = answer; |
| 5633 } | 5805 } |
| 5634 } else if (left.reg().is(ecx)) { | 5806 } else if (left->reg().is(ecx)) { |
| 5635 generator()->frame()->Spill(left.reg()); | 5807 generator()->frame()->Spill(left->reg()); |
| 5636 __ mov(left.reg(), Operand(right.reg())); | 5808 __ mov(left->reg(), right->reg()); |
| 5637 right = left; | 5809 *right = *left; |
| 5638 left = answer; // Use copy of left in answer as left. | 5810 *left = answer; // Use copy of left in answer as left. |
| 5639 } else if (answer.reg().is(ecx)) { | 5811 } else if (answer.reg().is(ecx)) { |
| 5640 __ mov(answer.reg(), Operand(right.reg())); | 5812 __ mov(answer.reg(), right->reg()); |
| 5641 right = answer; | 5813 *right = answer; |
| 5642 } else { | 5814 } else { |
| 5643 Result reg_ecx = generator()->allocator()->Allocate(ecx); | 5815 Result reg_ecx = generator()->allocator()->Allocate(ecx); |
| 5644 ASSERT(reg_ecx.is_valid()); | 5816 ASSERT(reg_ecx.is_valid()); |
| 5645 __ mov(reg_ecx.reg(), Operand(right.reg())); | 5817 __ mov(ecx, right->reg()); |
| 5646 right = reg_ecx; | 5818 *right = reg_ecx; |
| 5647 } | 5819 } |
| 5648 ASSERT(left.reg().is_valid()); | 5820 ASSERT(left->reg().is_valid()); |
| 5649 ASSERT(!left.reg().is(ecx)); | 5821 ASSERT(!left->reg().is(ecx)); |
| 5650 ASSERT(right.reg().is(ecx)); | 5822 ASSERT(right->reg().is(ecx)); |
| 5651 answer.Unuse(); // Answer may now be being used for left or right. | 5823 answer.Unuse(); // Answer may now be being used for left or right. |
| 5652 // We will modify left and right, which we do not do in any other | 5824 // We will modify left and right, which we do not do in any other |
| 5653 // binary operation. The exits to slow code need to restore the | 5825 // binary operation. The exits to slow code need to restore the |
| 5654 // original values of left and right, or at least values that give | 5826 // original values of left and right, or at least values that give |
| 5655 // the same answer. | 5827 // the same answer. |
| 5656 | 5828 |
| 5657 // We are modifying left and right. They must be spilled! | 5829 // We are modifying left and right. They must be spilled! |
| 5658 generator()->frame()->Spill(left.reg()); | 5830 generator()->frame()->Spill(left->reg()); |
| 5659 generator()->frame()->Spill(right.reg()); | 5831 generator()->frame()->Spill(right->reg()); |
| 5660 | 5832 |
| 5661 // Remove tags from operands (but keep sign). | 5833 // Remove tags from operands (but keep sign). |
| 5662 __ sar(left.reg(), kSmiTagSize); | 5834 __ sar(left->reg(), kSmiTagSize); |
| 5663 __ sar(ecx, kSmiTagSize); | 5835 __ sar(ecx, kSmiTagSize); |
| 5664 // Perform the operation. | 5836 // Perform the operation. |
| 5665 switch (op_) { | 5837 switch (op_) { |
| 5666 case Token::SAR: | 5838 case Token::SAR: |
| 5667 __ sar(left.reg()); | 5839 __ sar(left->reg()); |
| 5668 // No checks of result necessary | 5840 // No checks of result necessary |
| 5669 break; | 5841 break; |
| 5670 case Token::SHR: { | 5842 case Token::SHR: { |
| 5671 __ shr(left.reg()); | 5843 __ shr(left->reg()); |
| 5672 // Check that the *unsigned* result fits in a smi. | 5844 // Check that the *unsigned* result fits in a smi. |
| 5673 // Neither of the two high-order bits can be set: | 5845 // Neither of the two high-order bits can be set: |
| 5674 // - 0x80000000: high bit would be lost when smi tagging. | 5846 // - 0x80000000: high bit would be lost when smi tagging. |
| 5675 // - 0x40000000: this number would convert to negative when | 5847 // - 0x40000000: this number would convert to negative when |
| 5676 // Smi tagging these two cases can only happen with shifts | 5848 // Smi tagging these two cases can only happen with shifts |
| 5677 // by 0 or 1 when handed a valid smi. | 5849 // by 0 or 1 when handed a valid smi. |
| 5678 // If the answer cannot be represented by a SMI, restore | 5850 // If the answer cannot be represented by a SMI, restore |
| 5679 // the left and right arguments, and jump to slow case. | 5851 // the left and right arguments, and jump to slow case. |
| 5680 // The low bit of the left argument may be lost, but only | 5852 // The low bit of the left argument may be lost, but only |
| 5681 // in a case where it is dropped anyway. | 5853 // in a case where it is dropped anyway. |
| 5682 JumpTarget result_ok(generator()); | 5854 JumpTarget result_ok(generator()); |
| 5683 __ test(left.reg(), Immediate(0xc0000000)); | 5855 __ test(left->reg(), Immediate(0xc0000000)); |
| 5684 result_ok.Branch(zero, &left, &right, taken); | 5856 result_ok.Branch(zero, left, taken); |
| 5685 __ shl(left.reg()); | 5857 __ shl(left->reg()); |
| 5686 ASSERT(kSmiTag == 0); | 5858 ASSERT(kSmiTag == 0); |
| 5687 __ shl(left.reg(), kSmiTagSize); | 5859 __ shl(left->reg(), kSmiTagSize); |
| 5688 __ shl(right.reg(), kSmiTagSize); | 5860 __ shl(right->reg(), kSmiTagSize); |
| 5689 enter()->Jump(&left, &right); | 5861 enter()->Jump(left, right); |
| 5690 result_ok.Bind(&left, &right); | 5862 result_ok.Bind(left); |
| 5691 break; | 5863 break; |
| 5692 } | 5864 } |
| 5693 case Token::SHL: { | 5865 case Token::SHL: { |
| 5694 __ shl(left.reg()); | 5866 __ shl(left->reg()); |
| 5695 // Check that the *signed* result fits in a smi. | 5867 // Check that the *signed* result fits in a smi. |
| 5696 // | 5868 // |
| 5697 // TODO(207): Can reduce registers from 4 to 3 by | 5869 // TODO(207): Can reduce registers from 4 to 3 by |
| 5698 // preallocating ecx. | 5870 // preallocating ecx. |
| 5699 JumpTarget result_ok(generator()); | 5871 JumpTarget result_ok(generator()); |
| 5700 Result smi_test_reg = generator()->allocator()->Allocate(); | 5872 Result smi_test_reg = generator()->allocator()->Allocate(); |
| 5701 ASSERT(smi_test_reg.is_valid()); | 5873 ASSERT(smi_test_reg.is_valid()); |
| 5702 __ lea(smi_test_reg.reg(), Operand(left.reg(), 0x40000000)); | 5874 __ lea(smi_test_reg.reg(), Operand(left->reg(), 0x40000000)); |
| 5703 __ test(smi_test_reg.reg(), Immediate(0x80000000)); | 5875 __ test(smi_test_reg.reg(), Immediate(0x80000000)); |
| 5704 smi_test_reg.Unuse(); | 5876 smi_test_reg.Unuse(); |
| 5705 result_ok.Branch(zero, &left, &right, taken); | 5877 result_ok.Branch(zero, left, taken); |
| 5706 __ shr(left.reg()); | 5878 __ shr(left->reg()); |
| 5707 ASSERT(kSmiTag == 0); | 5879 ASSERT(kSmiTag == 0); |
| 5708 __ shl(left.reg(), kSmiTagSize); | 5880 __ shl(left->reg(), kSmiTagSize); |
| 5709 __ shl(right.reg(), kSmiTagSize); | 5881 __ shl(right->reg(), kSmiTagSize); |
| 5710 enter()->Jump(&left, &right); | 5882 enter()->Jump(left, right); |
| 5711 result_ok.Bind(&left, &right); | 5883 result_ok.Bind(left); |
| 5712 break; | 5884 break; |
| 5713 } | 5885 } |
| 5714 default: | 5886 default: |
| 5715 UNREACHABLE(); | 5887 UNREACHABLE(); |
| 5716 } | 5888 } |
| 5717 // Smi-tag the result, in left, and make answer an alias for left. | 5889 // Smi-tag the result, in left, and make answer an alias for left-> |
| 5718 answer = left; | 5890 answer = *left; |
| 5719 answer.ToRegister(); | 5891 answer.ToRegister(); |
| 5720 ASSERT(kSmiTagSize == times_2); // adjust code if not the case | 5892 ASSERT(kSmiTagSize == times_2); // adjust code if not the case |
| 5721 __ lea(answer.reg(), | 5893 __ lea(answer.reg(), |
| 5722 Operand(answer.reg(), answer.reg(), times_1, kSmiTag)); | 5894 Operand(answer.reg(), answer.reg(), times_1, kSmiTag)); |
| 5723 break; | 5895 break; |
| 5724 | 5896 |
| 5725 default: | 5897 default: |
| 5726 UNREACHABLE(); | 5898 UNREACHABLE(); |
| 5727 break; | 5899 break; |
| 5728 } | 5900 } |
| 5901 left->Unuse(); |
| 5902 right->Unuse(); |
| 5729 return answer; | 5903 return answer; |
| 5730 } | 5904 } |
| 5731 | 5905 |
| 5732 | 5906 |
| 5733 #undef __ | 5907 #undef __ |
| 5734 #define __ masm-> | 5908 #define __ masm-> |
| 5735 | 5909 |
| 5736 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { | 5910 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { |
| 5737 // Perform fast-case smi code for the operation (eax <op> ebx) and | 5911 // Perform fast-case smi code for the operation (eax <op> ebx) and |
| 5738 // leave result in register eax. | 5912 // leave result in register eax. |
| (...skipping 1181 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6920 | 7094 |
| 6921 // Slow-case: Go through the JavaScript implementation. | 7095 // Slow-case: Go through the JavaScript implementation. |
| 6922 __ bind(&slow); | 7096 __ bind(&slow); |
| 6923 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); | 7097 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); |
| 6924 } | 7098 } |
| 6925 | 7099 |
| 6926 | 7100 |
| 6927 #undef __ | 7101 #undef __ |
| 6928 | 7102 |
| 6929 } } // namespace v8::internal | 7103 } } // namespace v8::internal |
| OLD | NEW |