OLD | NEW |
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 300 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
311 loop_nesting_ -= fun->loop_nesting(); | 311 loop_nesting_ -= fun->loop_nesting(); |
312 | 312 |
313 // Code generation state must be reset. | 313 // Code generation state must be reset. |
314 ASSERT(state_ == NULL); | 314 ASSERT(state_ == NULL); |
315 ASSERT(loop_nesting() == 0); | 315 ASSERT(loop_nesting() == 0); |
316 ASSERT(!function_return_is_shadowed_); | 316 ASSERT(!function_return_is_shadowed_); |
317 function_return_.Unuse(); | 317 function_return_.Unuse(); |
318 DeleteFrame(); | 318 DeleteFrame(); |
319 | 319 |
320 // Process any deferred code using the register allocator. | 320 // Process any deferred code using the register allocator. |
321 if (HasStackOverflow()) { | 321 if (!HasStackOverflow()) { |
322 ClearDeferred(); | |
323 } else { | |
324 HistogramTimerScope deferred_timer(&Counters::deferred_code_generation); | 322 HistogramTimerScope deferred_timer(&Counters::deferred_code_generation); |
325 JumpTarget::set_compiling_deferred_code(true); | 323 JumpTarget::set_compiling_deferred_code(true); |
326 ProcessDeferred(); | 324 ProcessDeferred(); |
327 JumpTarget::set_compiling_deferred_code(false); | 325 JumpTarget::set_compiling_deferred_code(false); |
328 } | 326 } |
329 | 327 |
330 // There is no need to delete the register allocator, it is a | 328 // There is no need to delete the register allocator, it is a |
331 // stack-allocated local. | 329 // stack-allocated local. |
332 allocator_ = NULL; | 330 allocator_ = NULL; |
333 scope_ = NULL; | 331 scope_ = NULL; |
(...skipping 443 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
777 } | 775 } |
778 } | 776 } |
779 | 777 |
780 | 778 |
781 // A deferred code class implementing binary operations on likely smis. | 779 // A deferred code class implementing binary operations on likely smis. |
782 // This class generates both inline code and deferred code. | 780 // This class generates both inline code and deferred code. |
783 // The fastest path is implemented inline. Deferred code calls | 781 // The fastest path is implemented inline. Deferred code calls |
784 // the GenericBinaryOpStub stub for slow cases. | 782 // the GenericBinaryOpStub stub for slow cases. |
785 class DeferredInlineBinaryOperation: public DeferredCode { | 783 class DeferredInlineBinaryOperation: public DeferredCode { |
786 public: | 784 public: |
787 DeferredInlineBinaryOperation(CodeGenerator* generator, | 785 DeferredInlineBinaryOperation(Token::Value op, |
788 Token::Value op, | |
789 OverwriteMode mode, | 786 OverwriteMode mode, |
790 GenericBinaryFlags flags) | 787 GenericBinaryFlags flags) |
791 : DeferredCode(generator), stub_(op, mode, flags), op_(op) { | 788 : stub_(op, mode, flags), op_(op) { |
792 set_comment("[ DeferredInlineBinaryOperation"); | 789 set_comment("[ DeferredInlineBinaryOperation"); |
793 } | 790 } |
794 | 791 |
795 // Consumes its arguments, left and right, leaving them invalid. | 792 // Consumes its arguments, left and right, leaving them invalid. |
796 Result GenerateInlineCode(Result* left, Result* right); | 793 Result GenerateInlineCode(Result* left, Result* right); |
797 | 794 |
798 virtual void Generate(); | 795 virtual void Generate(); |
799 | 796 |
800 private: | 797 private: |
801 GenericBinaryOpStub stub_; | 798 GenericBinaryOpStub stub_; |
802 Token::Value op_; | 799 Token::Value op_; |
803 }; | 800 }; |
804 | 801 |
805 | 802 |
806 void DeferredInlineBinaryOperation::Generate() { | 803 void DeferredInlineBinaryOperation::Generate() { |
807 Result left; | 804 Result left; |
808 Result right; | 805 Result right; |
809 enter()->Bind(&left, &right); | 806 enter()->Bind(&left, &right); |
810 generator()->frame()->Push(&left); | 807 cgen()->frame()->Push(&left); |
811 generator()->frame()->Push(&right); | 808 cgen()->frame()->Push(&right); |
812 Result answer = generator()->frame()->CallStub(&stub_, 2); | 809 Result answer = cgen()->frame()->CallStub(&stub_, 2); |
813 exit_.Jump(&answer); | 810 exit_.Jump(&answer); |
814 } | 811 } |
815 | 812 |
816 | 813 |
817 void CodeGenerator::GenericBinaryOperation(Token::Value op, | 814 void CodeGenerator::GenericBinaryOperation(Token::Value op, |
818 SmiAnalysis* type, | 815 SmiAnalysis* type, |
819 OverwriteMode overwrite_mode) { | 816 OverwriteMode overwrite_mode) { |
820 Comment cmnt(masm_, "[ BinaryOperation"); | 817 Comment cmnt(masm_, "[ BinaryOperation"); |
821 Comment cmnt_token(masm_, Token::String(op)); | 818 Comment cmnt_token(masm_, Token::String(op)); |
822 | 819 |
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1006 } | 1003 } |
1007 | 1004 |
1008 | 1005 |
1009 void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, | 1006 void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, |
1010 Result* left, | 1007 Result* left, |
1011 Result* right, | 1008 Result* right, |
1012 OverwriteMode overwrite_mode) { | 1009 OverwriteMode overwrite_mode) { |
1013 // Implements a binary operation using a deferred code object | 1010 // Implements a binary operation using a deferred code object |
1014 // and some inline code to operate on smis quickly. | 1011 // and some inline code to operate on smis quickly. |
1015 DeferredInlineBinaryOperation* deferred = | 1012 DeferredInlineBinaryOperation* deferred = |
1016 new DeferredInlineBinaryOperation(this, op, overwrite_mode, | 1013 new DeferredInlineBinaryOperation(op, overwrite_mode, SMI_CODE_INLINED); |
1017 SMI_CODE_INLINED); | |
1018 // Generate the inline code that handles some smi operations, | 1014 // Generate the inline code that handles some smi operations, |
1019 // and jumps to the deferred code for everything else. | 1015 // and jumps to the deferred code for everything else. |
1020 Result answer = deferred->GenerateInlineCode(left, right); | 1016 Result answer = deferred->GenerateInlineCode(left, right); |
1021 deferred->BindExit(&answer); | 1017 deferred->BindExit(&answer); |
1022 frame_->Push(&answer); | 1018 frame_->Push(&answer); |
1023 } | 1019 } |
1024 | 1020 |
1025 | 1021 |
1026 class DeferredInlineSmiOperation: public DeferredCode { | 1022 class DeferredInlineSmiOperation: public DeferredCode { |
1027 public: | 1023 public: |
1028 DeferredInlineSmiOperation(CodeGenerator* generator, | 1024 DeferredInlineSmiOperation(Token::Value op, |
1029 Token::Value op, | |
1030 Smi* value, | 1025 Smi* value, |
1031 OverwriteMode overwrite_mode) | 1026 OverwriteMode overwrite_mode) |
1032 : DeferredCode(generator), | 1027 : op_(op), |
1033 op_(op), | |
1034 value_(value), | 1028 value_(value), |
1035 overwrite_mode_(overwrite_mode) { | 1029 overwrite_mode_(overwrite_mode) { |
1036 set_comment("[ DeferredInlineSmiOperation"); | 1030 set_comment("[ DeferredInlineSmiOperation"); |
1037 } | 1031 } |
1038 | 1032 |
1039 virtual void Generate(); | 1033 virtual void Generate(); |
1040 | 1034 |
1041 private: | 1035 private: |
1042 Token::Value op_; | 1036 Token::Value op_; |
1043 Smi* value_; | 1037 Smi* value_; |
1044 OverwriteMode overwrite_mode_; | 1038 OverwriteMode overwrite_mode_; |
1045 }; | 1039 }; |
1046 | 1040 |
1047 | 1041 |
1048 void DeferredInlineSmiOperation::Generate() { | 1042 void DeferredInlineSmiOperation::Generate() { |
1049 Result left; | 1043 Result left; |
1050 enter()->Bind(&left); | 1044 enter()->Bind(&left); |
1051 generator()->frame()->Push(&left); | 1045 cgen()->frame()->Push(&left); |
1052 generator()->frame()->Push(value_); | 1046 cgen()->frame()->Push(value_); |
1053 GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED); | 1047 GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED); |
1054 Result answer = generator()->frame()->CallStub(&igostub, 2); | 1048 Result answer = cgen()->frame()->CallStub(&igostub, 2); |
1055 exit_.Jump(&answer); | 1049 exit_.Jump(&answer); |
1056 } | 1050 } |
1057 | 1051 |
1058 | 1052 |
1059 class DeferredInlineSmiOperationReversed: public DeferredCode { | 1053 class DeferredInlineSmiOperationReversed: public DeferredCode { |
1060 public: | 1054 public: |
1061 DeferredInlineSmiOperationReversed(CodeGenerator* generator, | 1055 DeferredInlineSmiOperationReversed(Token::Value op, |
1062 Token::Value op, | |
1063 Smi* value, | 1056 Smi* value, |
1064 OverwriteMode overwrite_mode) | 1057 OverwriteMode overwrite_mode) |
1065 : DeferredCode(generator), | 1058 : op_(op), |
1066 op_(op), | |
1067 value_(value), | 1059 value_(value), |
1068 overwrite_mode_(overwrite_mode) { | 1060 overwrite_mode_(overwrite_mode) { |
1069 set_comment("[ DeferredInlineSmiOperationReversed"); | 1061 set_comment("[ DeferredInlineSmiOperationReversed"); |
1070 } | 1062 } |
1071 | 1063 |
1072 virtual void Generate(); | 1064 virtual void Generate(); |
1073 | 1065 |
1074 private: | 1066 private: |
1075 Token::Value op_; | 1067 Token::Value op_; |
1076 Smi* value_; | 1068 Smi* value_; |
1077 OverwriteMode overwrite_mode_; | 1069 OverwriteMode overwrite_mode_; |
1078 }; | 1070 }; |
1079 | 1071 |
1080 | 1072 |
1081 void DeferredInlineSmiOperationReversed::Generate() { | 1073 void DeferredInlineSmiOperationReversed::Generate() { |
1082 Result right; | 1074 Result right; |
1083 enter()->Bind(&right); | 1075 enter()->Bind(&right); |
1084 generator()->frame()->Push(value_); | 1076 cgen()->frame()->Push(value_); |
1085 generator()->frame()->Push(&right); | 1077 cgen()->frame()->Push(&right); |
1086 GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED); | 1078 GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED); |
1087 Result answer = generator()->frame()->CallStub(&igostub, 2); | 1079 Result answer = cgen()->frame()->CallStub(&igostub, 2); |
1088 exit_.Jump(&answer); | 1080 exit_.Jump(&answer); |
1089 } | 1081 } |
1090 | 1082 |
1091 | 1083 |
1092 class DeferredInlineSmiAdd: public DeferredCode { | 1084 class DeferredInlineSmiAdd: public DeferredCode { |
1093 public: | 1085 public: |
1094 DeferredInlineSmiAdd(CodeGenerator* generator, | 1086 DeferredInlineSmiAdd(Smi* value, |
1095 Smi* value, | |
1096 OverwriteMode overwrite_mode) | 1087 OverwriteMode overwrite_mode) |
1097 : DeferredCode(generator), | 1088 : value_(value), |
1098 value_(value), | |
1099 overwrite_mode_(overwrite_mode) { | 1089 overwrite_mode_(overwrite_mode) { |
1100 set_comment("[ DeferredInlineSmiAdd"); | 1090 set_comment("[ DeferredInlineSmiAdd"); |
1101 } | 1091 } |
1102 | 1092 |
1103 virtual void Generate(); | 1093 virtual void Generate(); |
1104 | 1094 |
1105 private: | 1095 private: |
1106 Smi* value_; | 1096 Smi* value_; |
1107 OverwriteMode overwrite_mode_; | 1097 OverwriteMode overwrite_mode_; |
1108 }; | 1098 }; |
1109 | 1099 |
1110 | 1100 |
1111 void DeferredInlineSmiAdd::Generate() { | |
1112 // Undo the optimistic add operation and call the shared stub. | |
1113 Result left; // Initially left + value_. | |
1114 enter()->Bind(&left); | |
1115 left.ToRegister(); | |
1116 generator()->frame()->Spill(left.reg()); | |
1117 __ sub(Operand(left.reg()), Immediate(value_)); | |
1118 generator()->frame()->Push(&left); | |
1119 generator()->frame()->Push(value_); | |
1120 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); | |
1121 Result answer = generator()->frame()->CallStub(&igostub, 2); | |
1122 exit_.Jump(&answer); | |
1123 } | |
1124 | |
1125 | |
1126 class DeferredInlineSmiAddReversed: public DeferredCode { | 1101 class DeferredInlineSmiAddReversed: public DeferredCode { |
1127 public: | 1102 public: |
1128 DeferredInlineSmiAddReversed(CodeGenerator* generator, | 1103 DeferredInlineSmiAddReversed(Smi* value, |
1129 Smi* value, | |
1130 OverwriteMode overwrite_mode) | 1104 OverwriteMode overwrite_mode) |
1131 : DeferredCode(generator), | 1105 : value_(value), |
1132 value_(value), | |
1133 overwrite_mode_(overwrite_mode) { | 1106 overwrite_mode_(overwrite_mode) { |
1134 set_comment("[ DeferredInlineSmiAddReversed"); | 1107 set_comment("[ DeferredInlineSmiAddReversed"); |
1135 } | 1108 } |
1136 | 1109 |
1137 virtual void Generate(); | 1110 virtual void Generate(); |
1138 | 1111 |
1139 private: | 1112 private: |
1140 Smi* value_; | 1113 Smi* value_; |
1141 OverwriteMode overwrite_mode_; | 1114 OverwriteMode overwrite_mode_; |
1142 }; | 1115 }; |
1143 | 1116 |
1144 | 1117 |
1145 void DeferredInlineSmiAddReversed::Generate() { | |
1146 // Undo the optimistic add operation and call the shared stub. | |
1147 Result right; // Initially value_ + right. | |
1148 enter()->Bind(&right); | |
1149 right.ToRegister(); | |
1150 generator()->frame()->Spill(right.reg()); | |
1151 __ sub(Operand(right.reg()), Immediate(value_)); | |
1152 generator()->frame()->Push(value_); | |
1153 generator()->frame()->Push(&right); | |
1154 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); | |
1155 Result answer = generator()->frame()->CallStub(&igostub, 2); | |
1156 exit_.Jump(&answer); | |
1157 } | |
1158 | |
1159 | |
1160 class DeferredInlineSmiSub: public DeferredCode { | 1118 class DeferredInlineSmiSub: public DeferredCode { |
1161 public: | 1119 public: |
1162 DeferredInlineSmiSub(CodeGenerator* generator, | 1120 DeferredInlineSmiSub(Smi* value, |
1163 Smi* value, | |
1164 OverwriteMode overwrite_mode) | 1121 OverwriteMode overwrite_mode) |
1165 : DeferredCode(generator), | 1122 : value_(value), |
1166 value_(value), | |
1167 overwrite_mode_(overwrite_mode) { | 1123 overwrite_mode_(overwrite_mode) { |
1168 set_comment("[ DeferredInlineSmiSub"); | 1124 set_comment("[ DeferredInlineSmiSub"); |
1169 } | 1125 } |
1170 | 1126 |
1171 virtual void Generate(); | 1127 virtual void Generate(); |
1172 | 1128 |
1173 private: | 1129 private: |
1174 Smi* value_; | 1130 Smi* value_; |
1175 OverwriteMode overwrite_mode_; | 1131 OverwriteMode overwrite_mode_; |
1176 }; | 1132 }; |
1177 | 1133 |
1178 | 1134 |
| 1135 #undef __ |
| 1136 #define __ ACCESS_MASM(cgen()->masm()) |
| 1137 |
| 1138 |
| 1139 void DeferredInlineSmiAdd::Generate() { |
| 1140 // Undo the optimistic add operation and call the shared stub. |
| 1141 Result left; // Initially left + value_. |
| 1142 enter()->Bind(&left); |
| 1143 left.ToRegister(); |
| 1144 cgen()->frame()->Spill(left.reg()); |
| 1145 __ sub(Operand(left.reg()), Immediate(value_)); |
| 1146 cgen()->frame()->Push(&left); |
| 1147 cgen()->frame()->Push(value_); |
| 1148 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); |
| 1149 Result answer = cgen()->frame()->CallStub(&igostub, 2); |
| 1150 exit_.Jump(&answer); |
| 1151 } |
| 1152 |
| 1153 |
| 1154 void DeferredInlineSmiAddReversed::Generate() { |
| 1155 // Undo the optimistic add operation and call the shared stub. |
| 1156 Result right; // Initially value_ + right. |
| 1157 enter()->Bind(&right); |
| 1158 right.ToRegister(); |
| 1159 cgen()->frame()->Spill(right.reg()); |
| 1160 __ sub(Operand(right.reg()), Immediate(value_)); |
| 1161 cgen()->frame()->Push(value_); |
| 1162 cgen()->frame()->Push(&right); |
| 1163 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); |
| 1164 Result answer = cgen()->frame()->CallStub(&igostub, 2); |
| 1165 exit_.Jump(&answer); |
| 1166 } |
| 1167 |
| 1168 |
1179 void DeferredInlineSmiSub::Generate() { | 1169 void DeferredInlineSmiSub::Generate() { |
1180 // Undo the optimistic sub operation and call the shared stub. | 1170 // Undo the optimistic sub operation and call the shared stub. |
1181 Result left; // Initially left - value_. | 1171 Result left; // Initially left - value_. |
1182 enter()->Bind(&left); | 1172 enter()->Bind(&left); |
1183 left.ToRegister(); | 1173 left.ToRegister(); |
1184 generator()->frame()->Spill(left.reg()); | 1174 cgen()->frame()->Spill(left.reg()); |
1185 __ add(Operand(left.reg()), Immediate(value_)); | 1175 __ add(Operand(left.reg()), Immediate(value_)); |
1186 generator()->frame()->Push(&left); | 1176 cgen()->frame()->Push(&left); |
1187 generator()->frame()->Push(value_); | 1177 cgen()->frame()->Push(value_); |
1188 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); | 1178 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); |
1189 Result answer = generator()->frame()->CallStub(&igostub, 2); | 1179 Result answer = cgen()->frame()->CallStub(&igostub, 2); |
1190 exit_.Jump(&answer); | 1180 exit_.Jump(&answer); |
1191 } | 1181 } |
1192 | 1182 |
1193 | 1183 |
| 1184 #undef __ |
| 1185 #define __ ACCESS_MASM(masm_) |
| 1186 |
| 1187 |
1194 class DeferredInlineSmiSubReversed: public DeferredCode { | 1188 class DeferredInlineSmiSubReversed: public DeferredCode { |
1195 public: | 1189 public: |
1196 DeferredInlineSmiSubReversed(CodeGenerator* generator, | 1190 DeferredInlineSmiSubReversed(Smi* value, |
1197 Smi* value, | |
1198 OverwriteMode overwrite_mode) | 1191 OverwriteMode overwrite_mode) |
1199 : DeferredCode(generator), | 1192 : value_(value), |
1200 value_(value), | |
1201 overwrite_mode_(overwrite_mode) { | 1193 overwrite_mode_(overwrite_mode) { |
1202 set_comment("[ DeferredInlineSmiSubReversed"); | 1194 set_comment("[ DeferredInlineSmiSubReversed"); |
1203 } | 1195 } |
1204 | 1196 |
1205 virtual void Generate(); | 1197 virtual void Generate(); |
1206 | 1198 |
1207 private: | 1199 private: |
1208 Smi* value_; | 1200 Smi* value_; |
1209 OverwriteMode overwrite_mode_; | 1201 OverwriteMode overwrite_mode_; |
1210 }; | 1202 }; |
1211 | 1203 |
1212 | 1204 |
1213 void DeferredInlineSmiSubReversed::Generate() { | 1205 void DeferredInlineSmiSubReversed::Generate() { |
1214 // Call the shared stub. | 1206 // Call the shared stub. |
1215 Result right; | 1207 Result right; |
1216 enter()->Bind(&right); | 1208 enter()->Bind(&right); |
1217 generator()->frame()->Push(value_); | 1209 cgen()->frame()->Push(value_); |
1218 generator()->frame()->Push(&right); | 1210 cgen()->frame()->Push(&right); |
1219 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); | 1211 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); |
1220 Result answer = generator()->frame()->CallStub(&igostub, 2); | 1212 Result answer = cgen()->frame()->CallStub(&igostub, 2); |
1221 exit_.Jump(&answer); | 1213 exit_.Jump(&answer); |
1222 } | 1214 } |
1223 | 1215 |
1224 | 1216 |
1225 void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, | 1217 void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, |
1226 Result* operand, | 1218 Result* operand, |
1227 Handle<Object> value, | 1219 Handle<Object> value, |
1228 SmiAnalysis* type, | 1220 SmiAnalysis* type, |
1229 bool reversed, | 1221 bool reversed, |
1230 OverwriteMode overwrite_mode) { | 1222 OverwriteMode overwrite_mode) { |
(...skipping 22 matching lines...) Expand all Loading... |
1253 int int_value = smi_value->value(); | 1245 int int_value = smi_value->value(); |
1254 | 1246 |
1255 switch (op) { | 1247 switch (op) { |
1256 case Token::ADD: { | 1248 case Token::ADD: { |
1257 operand->ToRegister(); | 1249 operand->ToRegister(); |
1258 frame_->Spill(operand->reg()); | 1250 frame_->Spill(operand->reg()); |
1259 __ add(Operand(operand->reg()), Immediate(value)); | 1251 __ add(Operand(operand->reg()), Immediate(value)); |
1260 | 1252 |
1261 DeferredCode* deferred = NULL; | 1253 DeferredCode* deferred = NULL; |
1262 if (reversed) { | 1254 if (reversed) { |
1263 deferred = new DeferredInlineSmiAddReversed(this, smi_value, | 1255 deferred = new DeferredInlineSmiAddReversed(smi_value, overwrite_mode); |
1264 overwrite_mode); | |
1265 } else { | 1256 } else { |
1266 deferred = new DeferredInlineSmiAdd(this, smi_value, overwrite_mode); | 1257 deferred = new DeferredInlineSmiAdd(smi_value, overwrite_mode); |
1267 } | 1258 } |
1268 deferred->SetEntryFrame(operand); | 1259 deferred->SetEntryFrame(operand); |
1269 deferred->enter()->Branch(overflow, operand, not_taken); | 1260 deferred->enter()->Branch(overflow, operand, not_taken); |
1270 __ test(operand->reg(), Immediate(kSmiTagMask)); | 1261 __ test(operand->reg(), Immediate(kSmiTagMask)); |
1271 deferred->enter()->Branch(not_zero, operand, not_taken); | 1262 deferred->enter()->Branch(not_zero, operand, not_taken); |
1272 deferred->BindExit(operand); | 1263 deferred->BindExit(operand); |
1273 frame_->Push(operand); | 1264 frame_->Push(operand); |
1274 break; | 1265 break; |
1275 } | 1266 } |
1276 | 1267 |
1277 case Token::SUB: { | 1268 case Token::SUB: { |
1278 DeferredCode* deferred = NULL; | 1269 DeferredCode* deferred = NULL; |
1279 Result answer; // Only allocate a new register if reversed. | 1270 Result answer; // Only allocate a new register if reversed. |
1280 if (reversed) { | 1271 if (reversed) { |
1281 answer = allocator()->Allocate(); | 1272 answer = allocator()->Allocate(); |
1282 ASSERT(answer.is_valid()); | 1273 ASSERT(answer.is_valid()); |
1283 deferred = new DeferredInlineSmiSubReversed(this, smi_value, | 1274 deferred = new DeferredInlineSmiSubReversed(smi_value, overwrite_mode); |
1284 overwrite_mode); | |
1285 __ Set(answer.reg(), Immediate(value)); | 1275 __ Set(answer.reg(), Immediate(value)); |
1286 // We are in the reversed case so they can't both be Smi constants. | 1276 // We are in the reversed case so they can't both be Smi constants. |
1287 ASSERT(operand->is_register()); | 1277 ASSERT(operand->is_register()); |
1288 __ sub(answer.reg(), Operand(operand->reg())); | 1278 __ sub(answer.reg(), Operand(operand->reg())); |
1289 } else { | 1279 } else { |
1290 operand->ToRegister(); | 1280 operand->ToRegister(); |
1291 frame_->Spill(operand->reg()); | 1281 frame_->Spill(operand->reg()); |
1292 deferred = new DeferredInlineSmiSub(this, smi_value, overwrite_mode); | 1282 deferred = new DeferredInlineSmiSub(smi_value, overwrite_mode); |
1293 __ sub(Operand(operand->reg()), Immediate(value)); | 1283 __ sub(Operand(operand->reg()), Immediate(value)); |
1294 answer = *operand; | 1284 answer = *operand; |
1295 } | 1285 } |
1296 deferred->SetEntryFrame(operand); | 1286 deferred->SetEntryFrame(operand); |
1297 deferred->enter()->Branch(overflow, operand, not_taken); | 1287 deferred->enter()->Branch(overflow, operand, not_taken); |
1298 __ test(answer.reg(), Immediate(kSmiTagMask)); | 1288 __ test(answer.reg(), Immediate(kSmiTagMask)); |
1299 deferred->enter()->Branch(not_zero, operand, not_taken); | 1289 deferred->enter()->Branch(not_zero, operand, not_taken); |
1300 operand->Unuse(); | 1290 operand->Unuse(); |
1301 deferred->BindExit(&answer); | 1291 deferred->BindExit(&answer); |
1302 frame_->Push(&answer); | 1292 frame_->Push(&answer); |
1303 break; | 1293 break; |
1304 } | 1294 } |
1305 | 1295 |
1306 case Token::SAR: { | 1296 case Token::SAR: { |
1307 if (reversed) { | 1297 if (reversed) { |
1308 Result constant_operand(value); | 1298 Result constant_operand(value); |
1309 LikelySmiBinaryOperation(op, &constant_operand, operand, | 1299 LikelySmiBinaryOperation(op, &constant_operand, operand, |
1310 overwrite_mode); | 1300 overwrite_mode); |
1311 } else { | 1301 } else { |
1312 // Only the least significant 5 bits of the shift value are used. | 1302 // Only the least significant 5 bits of the shift value are used. |
1313 // In the slow case, this masking is done inside the runtime call. | 1303 // In the slow case, this masking is done inside the runtime call. |
1314 int shift_value = int_value & 0x1f; | 1304 int shift_value = int_value & 0x1f; |
1315 DeferredCode* deferred = | 1305 DeferredCode* deferred = |
1316 new DeferredInlineSmiOperation(this, Token::SAR, smi_value, | 1306 new DeferredInlineSmiOperation(op, smi_value, overwrite_mode); |
1317 overwrite_mode); | |
1318 operand->ToRegister(); | 1307 operand->ToRegister(); |
1319 __ test(operand->reg(), Immediate(kSmiTagMask)); | 1308 __ test(operand->reg(), Immediate(kSmiTagMask)); |
1320 deferred->enter()->Branch(not_zero, operand, not_taken); | 1309 deferred->enter()->Branch(not_zero, operand, not_taken); |
1321 if (shift_value > 0) { | 1310 if (shift_value > 0) { |
1322 frame_->Spill(operand->reg()); | 1311 frame_->Spill(operand->reg()); |
1323 __ sar(operand->reg(), shift_value); | 1312 __ sar(operand->reg(), shift_value); |
1324 __ and_(operand->reg(), ~kSmiTagMask); | 1313 __ and_(operand->reg(), ~kSmiTagMask); |
1325 } | 1314 } |
1326 deferred->BindExit(operand); | 1315 deferred->BindExit(operand); |
1327 frame_->Push(operand); | 1316 frame_->Push(operand); |
1328 } | 1317 } |
1329 break; | 1318 break; |
1330 } | 1319 } |
1331 | 1320 |
1332 case Token::SHR: { | 1321 case Token::SHR: { |
1333 if (reversed) { | 1322 if (reversed) { |
1334 Result constant_operand(value); | 1323 Result constant_operand(value); |
1335 LikelySmiBinaryOperation(op, &constant_operand, operand, | 1324 LikelySmiBinaryOperation(op, &constant_operand, operand, |
1336 overwrite_mode); | 1325 overwrite_mode); |
1337 } else { | 1326 } else { |
1338 // Only the least significant 5 bits of the shift value are used. | 1327 // Only the least significant 5 bits of the shift value are used. |
1339 // In the slow case, this masking is done inside the runtime call. | 1328 // In the slow case, this masking is done inside the runtime call. |
1340 int shift_value = int_value & 0x1f; | 1329 int shift_value = int_value & 0x1f; |
1341 DeferredCode* deferred = | 1330 DeferredCode* deferred = |
1342 new DeferredInlineSmiOperation(this, Token::SHR, smi_value, | 1331 new DeferredInlineSmiOperation(op, smi_value, overwrite_mode); |
1343 overwrite_mode); | |
1344 operand->ToRegister(); | 1332 operand->ToRegister(); |
1345 __ test(operand->reg(), Immediate(kSmiTagMask)); | 1333 __ test(operand->reg(), Immediate(kSmiTagMask)); |
1346 deferred->enter()->Branch(not_zero, operand, not_taken); | 1334 deferred->enter()->Branch(not_zero, operand, not_taken); |
1347 Result answer = allocator()->Allocate(); | 1335 Result answer = allocator()->Allocate(); |
1348 ASSERT(answer.is_valid()); | 1336 ASSERT(answer.is_valid()); |
1349 __ mov(answer.reg(), operand->reg()); | 1337 __ mov(answer.reg(), operand->reg()); |
1350 __ sar(answer.reg(), kSmiTagSize); | 1338 __ sar(answer.reg(), kSmiTagSize); |
1351 __ shr(answer.reg(), shift_value); | 1339 __ shr(answer.reg(), shift_value); |
1352 // A negative Smi shifted right two is in the positive Smi range. | 1340 // A negative Smi shifted right two is in the positive Smi range. |
1353 if (shift_value < 2) { | 1341 if (shift_value < 2) { |
(...skipping 13 matching lines...) Expand all Loading... |
1367 case Token::SHL: { | 1355 case Token::SHL: { |
1368 if (reversed) { | 1356 if (reversed) { |
1369 Result constant_operand(value); | 1357 Result constant_operand(value); |
1370 LikelySmiBinaryOperation(op, &constant_operand, operand, | 1358 LikelySmiBinaryOperation(op, &constant_operand, operand, |
1371 overwrite_mode); | 1359 overwrite_mode); |
1372 } else { | 1360 } else { |
1373 // Only the least significant 5 bits of the shift value are used. | 1361 // Only the least significant 5 bits of the shift value are used. |
1374 // In the slow case, this masking is done inside the runtime call. | 1362 // In the slow case, this masking is done inside the runtime call. |
1375 int shift_value = int_value & 0x1f; | 1363 int shift_value = int_value & 0x1f; |
1376 DeferredCode* deferred = | 1364 DeferredCode* deferred = |
1377 new DeferredInlineSmiOperation(this, Token::SHL, smi_value, | 1365 new DeferredInlineSmiOperation(op, smi_value, overwrite_mode); |
1378 overwrite_mode); | |
1379 operand->ToRegister(); | 1366 operand->ToRegister(); |
1380 __ test(operand->reg(), Immediate(kSmiTagMask)); | 1367 __ test(operand->reg(), Immediate(kSmiTagMask)); |
1381 deferred->enter()->Branch(not_zero, operand, not_taken); | 1368 deferred->enter()->Branch(not_zero, operand, not_taken); |
1382 if (shift_value != 0) { | 1369 if (shift_value != 0) { |
1383 Result answer = allocator()->Allocate(); | 1370 Result answer = allocator()->Allocate(); |
1384 ASSERT(answer.is_valid()); | 1371 ASSERT(answer.is_valid()); |
1385 __ mov(answer.reg(), operand->reg()); | 1372 __ mov(answer.reg(), operand->reg()); |
1386 ASSERT(kSmiTag == 0); // adjust code if not the case | 1373 ASSERT(kSmiTag == 0); // adjust code if not the case |
1387 // We do no shifts, only the Smi conversion, if shift_value is 1. | 1374 // We do no shifts, only the Smi conversion, if shift_value is 1. |
1388 if (shift_value > 1) { | 1375 if (shift_value > 1) { |
(...skipping 12 matching lines...) Expand all Loading... |
1401 } | 1388 } |
1402 } | 1389 } |
1403 break; | 1390 break; |
1404 } | 1391 } |
1405 | 1392 |
1406 case Token::BIT_OR: | 1393 case Token::BIT_OR: |
1407 case Token::BIT_XOR: | 1394 case Token::BIT_XOR: |
1408 case Token::BIT_AND: { | 1395 case Token::BIT_AND: { |
1409 DeferredCode* deferred = NULL; | 1396 DeferredCode* deferred = NULL; |
1410 if (reversed) { | 1397 if (reversed) { |
1411 deferred = new DeferredInlineSmiOperationReversed(this, op, smi_value, | 1398 deferred = new DeferredInlineSmiOperationReversed(op, smi_value, |
1412 overwrite_mode); | 1399 overwrite_mode); |
1413 } else { | 1400 } else { |
1414 deferred = new DeferredInlineSmiOperation(this, op, smi_value, | 1401 deferred = new DeferredInlineSmiOperation(op, smi_value, |
1415 overwrite_mode); | 1402 overwrite_mode); |
1416 } | 1403 } |
1417 operand->ToRegister(); | 1404 operand->ToRegister(); |
1418 __ test(operand->reg(), Immediate(kSmiTagMask)); | 1405 __ test(operand->reg(), Immediate(kSmiTagMask)); |
1419 deferred->enter()->Branch(not_zero, operand, not_taken); | 1406 deferred->enter()->Branch(not_zero, operand, not_taken); |
1420 frame_->Spill(operand->reg()); | 1407 frame_->Spill(operand->reg()); |
1421 if (op == Token::BIT_AND) { | 1408 if (op == Token::BIT_AND) { |
1422 __ and_(Operand(operand->reg()), Immediate(value)); | 1409 __ and_(Operand(operand->reg()), Immediate(value)); |
1423 } else if (op == Token::BIT_XOR) { | 1410 } else if (op == Token::BIT_XOR) { |
1424 if (int_value != 0) { | 1411 if (int_value != 0) { |
(...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1690 Result answer = frame_->CallStub(&call_function, arg_count + 1); | 1677 Result answer = frame_->CallStub(&call_function, arg_count + 1); |
1691 // Restore context and replace function on the stack with the | 1678 // Restore context and replace function on the stack with the |
1692 // result of the stub invocation. | 1679 // result of the stub invocation. |
1693 frame_->RestoreContextRegister(); | 1680 frame_->RestoreContextRegister(); |
1694 frame_->SetElementAt(0, &answer); | 1681 frame_->SetElementAt(0, &answer); |
1695 } | 1682 } |
1696 | 1683 |
1697 | 1684 |
1698 class DeferredStackCheck: public DeferredCode { | 1685 class DeferredStackCheck: public DeferredCode { |
1699 public: | 1686 public: |
1700 explicit DeferredStackCheck(CodeGenerator* generator) | 1687 explicit DeferredStackCheck() { |
1701 : DeferredCode(generator) { | |
1702 set_comment("[ DeferredStackCheck"); | 1688 set_comment("[ DeferredStackCheck"); |
1703 } | 1689 } |
1704 | 1690 |
1705 virtual void Generate(); | 1691 virtual void Generate(); |
1706 }; | 1692 }; |
1707 | 1693 |
1708 | 1694 |
1709 void DeferredStackCheck::Generate() { | 1695 void DeferredStackCheck::Generate() { |
1710 enter()->Bind(); | 1696 enter()->Bind(); |
1711 StackCheckStub stub; | 1697 StackCheckStub stub; |
1712 Result ignored = generator()->frame()->CallStub(&stub, 0); | 1698 Result ignored = cgen()->frame()->CallStub(&stub, 0); |
1713 ignored.Unuse(); | 1699 ignored.Unuse(); |
1714 exit_.Jump(); | 1700 exit_.Jump(); |
1715 } | 1701 } |
1716 | 1702 |
1717 | 1703 |
1718 void CodeGenerator::CheckStack() { | 1704 void CodeGenerator::CheckStack() { |
1719 if (FLAG_check_stack) { | 1705 if (FLAG_check_stack) { |
1720 DeferredStackCheck* deferred = new DeferredStackCheck(this); | 1706 DeferredStackCheck* deferred = new DeferredStackCheck; |
1721 ExternalReference stack_guard_limit = | 1707 ExternalReference stack_guard_limit = |
1722 ExternalReference::address_of_stack_guard_limit(); | 1708 ExternalReference::address_of_stack_guard_limit(); |
1723 __ cmp(esp, Operand::StaticVariable(stack_guard_limit)); | 1709 __ cmp(esp, Operand::StaticVariable(stack_guard_limit)); |
1724 deferred->enter()->Branch(below, not_taken); | 1710 deferred->enter()->Branch(below, not_taken); |
1725 deferred->BindExit(); | 1711 deferred->BindExit(); |
1726 } | 1712 } |
1727 } | 1713 } |
1728 | 1714 |
1729 | 1715 |
1730 void CodeGenerator::VisitAndSpill(Statement* statement) { | 1716 void CodeGenerator::VisitAndSpill(Statement* statement) { |
(...skipping 1837 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3568 | 3554 |
3569 bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) { | 3555 bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) { |
3570 if (!value->IsSmi()) return false; | 3556 if (!value->IsSmi()) return false; |
3571 int int_value = Smi::cast(*value)->value(); | 3557 int int_value = Smi::cast(*value)->value(); |
3572 return !is_intn(int_value, kMaxSmiInlinedBits); | 3558 return !is_intn(int_value, kMaxSmiInlinedBits); |
3573 } | 3559 } |
3574 | 3560 |
3575 | 3561 |
3576 class DeferredRegExpLiteral: public DeferredCode { | 3562 class DeferredRegExpLiteral: public DeferredCode { |
3577 public: | 3563 public: |
3578 DeferredRegExpLiteral(CodeGenerator* generator, RegExpLiteral* node) | 3564 DeferredRegExpLiteral(RegExpLiteral* node) : node_(node) { |
3579 : DeferredCode(generator), node_(node) { | |
3580 set_comment("[ DeferredRegExpLiteral"); | 3565 set_comment("[ DeferredRegExpLiteral"); |
3581 } | 3566 } |
3582 | 3567 |
3583 virtual void Generate(); | 3568 virtual void Generate(); |
3584 | 3569 |
3585 private: | 3570 private: |
3586 RegExpLiteral* node_; | 3571 RegExpLiteral* node_; |
3587 }; | 3572 }; |
3588 | 3573 |
3589 | 3574 |
3590 void DeferredRegExpLiteral::Generate() { | 3575 void DeferredRegExpLiteral::Generate() { |
3591 Result literals; | 3576 Result literals; |
3592 enter()->Bind(&literals); | 3577 enter()->Bind(&literals); |
3593 // Since the entry is undefined we call the runtime system to | 3578 // Since the entry is undefined we call the runtime system to |
3594 // compute the literal. | 3579 // compute the literal. |
3595 | 3580 |
3596 VirtualFrame* frame = generator()->frame(); | 3581 VirtualFrame* frame = cgen()->frame(); |
3597 // Literal array (0). | 3582 // Literal array (0). |
3598 frame->Push(&literals); | 3583 frame->Push(&literals); |
3599 // Literal index (1). | 3584 // Literal index (1). |
3600 frame->Push(Smi::FromInt(node_->literal_index())); | 3585 frame->Push(Smi::FromInt(node_->literal_index())); |
3601 // RegExp pattern (2). | 3586 // RegExp pattern (2). |
3602 frame->Push(node_->pattern()); | 3587 frame->Push(node_->pattern()); |
3603 // RegExp flags (3). | 3588 // RegExp flags (3). |
3604 frame->Push(node_->flags()); | 3589 frame->Push(node_->flags()); |
3605 Result boilerplate = | 3590 Result boilerplate = |
3606 frame->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); | 3591 frame->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); |
3607 exit_.Jump(&boilerplate); | 3592 exit_.Jump(&boilerplate); |
3608 } | 3593 } |
3609 | 3594 |
3610 | 3595 |
3611 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) { | 3596 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) { |
3612 Comment cmnt(masm_, "[ RegExp Literal"); | 3597 Comment cmnt(masm_, "[ RegExp Literal"); |
3613 DeferredRegExpLiteral* deferred = new DeferredRegExpLiteral(this, node); | 3598 DeferredRegExpLiteral* deferred = new DeferredRegExpLiteral(node); |
3614 | 3599 |
3615 // Retrieve the literals array and check the allocated entry. Begin | 3600 // Retrieve the literals array and check the allocated entry. Begin |
3616 // with a writable copy of the function of this activation in a | 3601 // with a writable copy of the function of this activation in a |
3617 // register. | 3602 // register. |
3618 frame_->PushFunction(); | 3603 frame_->PushFunction(); |
3619 Result literals = frame_->Pop(); | 3604 Result literals = frame_->Pop(); |
3620 literals.ToRegister(); | 3605 literals.ToRegister(); |
3621 frame_->Spill(literals.reg()); | 3606 frame_->Spill(literals.reg()); |
3622 | 3607 |
3623 // Load the literals array of the function. | 3608 // Load the literals array of the function. |
(...skipping 20 matching lines...) Expand all Loading... |
3644 frame_->Push(&boilerplate); | 3629 frame_->Push(&boilerplate); |
3645 } | 3630 } |
3646 | 3631 |
3647 | 3632 |
3648 // This deferred code stub will be used for creating the boilerplate | 3633 // This deferred code stub will be used for creating the boilerplate |
3649 // by calling Runtime_CreateObjectLiteral. | 3634 // by calling Runtime_CreateObjectLiteral. |
3650 // Each created boilerplate is stored in the JSFunction and they are | 3635 // Each created boilerplate is stored in the JSFunction and they are |
3651 // therefore context dependent. | 3636 // therefore context dependent. |
3652 class DeferredObjectLiteral: public DeferredCode { | 3637 class DeferredObjectLiteral: public DeferredCode { |
3653 public: | 3638 public: |
3654 DeferredObjectLiteral(CodeGenerator* generator, | 3639 DeferredObjectLiteral(ObjectLiteral* node) : node_(node) { |
3655 ObjectLiteral* node) | |
3656 : DeferredCode(generator), node_(node) { | |
3657 set_comment("[ DeferredObjectLiteral"); | 3640 set_comment("[ DeferredObjectLiteral"); |
3658 } | 3641 } |
3659 | 3642 |
3660 virtual void Generate(); | 3643 virtual void Generate(); |
3661 | 3644 |
3662 private: | 3645 private: |
3663 ObjectLiteral* node_; | 3646 ObjectLiteral* node_; |
3664 }; | 3647 }; |
3665 | 3648 |
3666 | 3649 |
3667 void DeferredObjectLiteral::Generate() { | 3650 void DeferredObjectLiteral::Generate() { |
3668 Result literals; | 3651 Result literals; |
3669 enter()->Bind(&literals); | 3652 enter()->Bind(&literals); |
3670 // Since the entry is undefined we call the runtime system to | 3653 // Since the entry is undefined we call the runtime system to |
3671 // compute the literal. | 3654 // compute the literal. |
3672 | 3655 |
3673 VirtualFrame* frame = generator()->frame(); | 3656 VirtualFrame* frame = cgen()->frame(); |
3674 // Literal array (0). | 3657 // Literal array (0). |
3675 frame->Push(&literals); | 3658 frame->Push(&literals); |
3676 // Literal index (1). | 3659 // Literal index (1). |
3677 frame->Push(Smi::FromInt(node_->literal_index())); | 3660 frame->Push(Smi::FromInt(node_->literal_index())); |
3678 // Constant properties (2). | 3661 // Constant properties (2). |
3679 frame->Push(node_->constant_properties()); | 3662 frame->Push(node_->constant_properties()); |
3680 Result boilerplate = | 3663 Result boilerplate = |
3681 frame->CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3); | 3664 frame->CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3); |
3682 exit_.Jump(&boilerplate); | 3665 exit_.Jump(&boilerplate); |
3683 } | 3666 } |
3684 | 3667 |
3685 | 3668 |
3686 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { | 3669 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { |
3687 Comment cmnt(masm_, "[ ObjectLiteral"); | 3670 Comment cmnt(masm_, "[ ObjectLiteral"); |
3688 DeferredObjectLiteral* deferred = new DeferredObjectLiteral(this, node); | 3671 DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node); |
3689 | 3672 |
3690 // Retrieve the literals array and check the allocated entry. Begin | 3673 // Retrieve the literals array and check the allocated entry. Begin |
3691 // with a writable copy of the function of this activation in a | 3674 // with a writable copy of the function of this activation in a |
3692 // register. | 3675 // register. |
3693 frame_->PushFunction(); | 3676 frame_->PushFunction(); |
3694 Result literals = frame_->Pop(); | 3677 Result literals = frame_->Pop(); |
3695 literals.ToRegister(); | 3678 literals.ToRegister(); |
3696 frame_->Spill(literals.reg()); | 3679 frame_->Spill(literals.reg()); |
3697 | 3680 |
3698 // Load the literals array of the function. | 3681 // Load the literals array of the function. |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3782 } | 3765 } |
3783 } | 3766 } |
3784 | 3767 |
3785 | 3768 |
3786 // This deferred code stub will be used for creating the boilerplate | 3769 // This deferred code stub will be used for creating the boilerplate |
3787 // by calling Runtime_CreateArrayLiteralBoilerplate. | 3770 // by calling Runtime_CreateArrayLiteralBoilerplate. |
3788 // Each created boilerplate is stored in the JSFunction and they are | 3771 // Each created boilerplate is stored in the JSFunction and they are |
3789 // therefore context dependent. | 3772 // therefore context dependent. |
3790 class DeferredArrayLiteral: public DeferredCode { | 3773 class DeferredArrayLiteral: public DeferredCode { |
3791 public: | 3774 public: |
3792 DeferredArrayLiteral(CodeGenerator* generator, | 3775 DeferredArrayLiteral(ArrayLiteral* node) : node_(node) { |
3793 ArrayLiteral* node) | |
3794 : DeferredCode(generator), node_(node) { | |
3795 set_comment("[ DeferredArrayLiteral"); | 3776 set_comment("[ DeferredArrayLiteral"); |
3796 } | 3777 } |
3797 | 3778 |
3798 virtual void Generate(); | 3779 virtual void Generate(); |
3799 | 3780 |
3800 private: | 3781 private: |
3801 ArrayLiteral* node_; | 3782 ArrayLiteral* node_; |
3802 }; | 3783 }; |
3803 | 3784 |
3804 | 3785 |
3805 void DeferredArrayLiteral::Generate() { | 3786 void DeferredArrayLiteral::Generate() { |
3806 Result literals; | 3787 Result literals; |
3807 enter()->Bind(&literals); | 3788 enter()->Bind(&literals); |
3808 // Since the entry is undefined we call the runtime system to | 3789 // Since the entry is undefined we call the runtime system to |
3809 // compute the literal. | 3790 // compute the literal. |
3810 | 3791 |
3811 VirtualFrame* frame = generator()->frame(); | 3792 VirtualFrame* frame = cgen()->frame(); |
3812 // Literal array (0). | 3793 // Literal array (0). |
3813 frame->Push(&literals); | 3794 frame->Push(&literals); |
3814 // Literal index (1). | 3795 // Literal index (1). |
3815 frame->Push(Smi::FromInt(node_->literal_index())); | 3796 frame->Push(Smi::FromInt(node_->literal_index())); |
3816 // Constant properties (2). | 3797 // Constant properties (2). |
3817 frame->Push(node_->literals()); | 3798 frame->Push(node_->literals()); |
3818 Result boilerplate = | 3799 Result boilerplate = |
3819 frame->CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3); | 3800 frame->CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3); |
3820 exit_.Jump(&boilerplate); | 3801 exit_.Jump(&boilerplate); |
3821 } | 3802 } |
3822 | 3803 |
3823 | 3804 |
3824 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { | 3805 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { |
3825 Comment cmnt(masm_, "[ ArrayLiteral"); | 3806 Comment cmnt(masm_, "[ ArrayLiteral"); |
3826 DeferredArrayLiteral* deferred = new DeferredArrayLiteral(this, node); | 3807 DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node); |
3827 | 3808 |
3828 // Retrieve the literals array and check the allocated entry. Begin | 3809 // Retrieve the literals array and check the allocated entry. Begin |
3829 // with a writable copy of the function of this activation in a | 3810 // with a writable copy of the function of this activation in a |
3830 // register. | 3811 // register. |
3831 frame_->PushFunction(); | 3812 frame_->PushFunction(); |
3832 Result literals = frame_->Pop(); | 3813 Result literals = frame_->Pop(); |
3833 literals.ToRegister(); | 3814 literals.ToRegister(); |
3834 frame_->Spill(literals.reg()); | 3815 frame_->Spill(literals.reg()); |
3835 | 3816 |
3836 // Load the literals array of the function. | 3817 // Load the literals array of the function. |
(...skipping 934 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4771 | 4752 |
4772 default: | 4753 default: |
4773 UNREACHABLE(); | 4754 UNREACHABLE(); |
4774 } | 4755 } |
4775 } | 4756 } |
4776 } | 4757 } |
4777 | 4758 |
4778 | 4759 |
4779 class DeferredCountOperation: public DeferredCode { | 4760 class DeferredCountOperation: public DeferredCode { |
4780 public: | 4761 public: |
4781 DeferredCountOperation(CodeGenerator* generator, | 4762 DeferredCountOperation(bool is_postfix, |
4782 bool is_postfix, | |
4783 bool is_increment, | 4763 bool is_increment, |
4784 int target_size) | 4764 int target_size) |
4785 : DeferredCode(generator), | 4765 : is_postfix_(is_postfix), |
4786 is_postfix_(is_postfix), | |
4787 is_increment_(is_increment), | 4766 is_increment_(is_increment), |
4788 target_size_(target_size) { | 4767 target_size_(target_size) { |
4789 set_comment("[ DeferredCountOperation"); | 4768 set_comment("[ DeferredCountOperation"); |
4790 } | 4769 } |
4791 | 4770 |
4792 virtual void Generate(); | 4771 virtual void Generate(); |
4793 | 4772 |
4794 private: | 4773 private: |
4795 bool is_postfix_; | 4774 bool is_postfix_; |
4796 bool is_increment_; | 4775 bool is_increment_; |
4797 int target_size_; | 4776 int target_size_; |
4798 }; | 4777 }; |
4799 | 4778 |
4800 | 4779 |
| 4780 #undef __ |
| 4781 #define __ ACCESS_MASM(cgen()->masm()) |
| 4782 |
| 4783 |
4801 void DeferredCountOperation::Generate() { | 4784 void DeferredCountOperation::Generate() { |
4802 CodeGenerator* cgen = generator(); | |
4803 Result value; | 4785 Result value; |
4804 enter()->Bind(&value); | 4786 enter()->Bind(&value); |
4805 VirtualFrame* frame = cgen->frame(); | 4787 VirtualFrame* frame = cgen()->frame(); |
4806 // Undo the optimistic smi operation. | 4788 // Undo the optimistic smi operation. |
4807 value.ToRegister(); | 4789 value.ToRegister(); |
4808 frame->Spill(value.reg()); | 4790 frame->Spill(value.reg()); |
4809 if (is_increment_) { | 4791 if (is_increment_) { |
4810 __ sub(Operand(value.reg()), Immediate(Smi::FromInt(1))); | 4792 __ sub(Operand(value.reg()), Immediate(Smi::FromInt(1))); |
4811 } else { | 4793 } else { |
4812 __ add(Operand(value.reg()), Immediate(Smi::FromInt(1))); | 4794 __ add(Operand(value.reg()), Immediate(Smi::FromInt(1))); |
4813 } | 4795 } |
4814 frame->Push(&value); | 4796 frame->Push(&value); |
4815 value = frame->InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION, 1); | 4797 value = frame->InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION, 1); |
4816 frame->Push(&value); | 4798 frame->Push(&value); |
4817 if (is_postfix_) { // Fix up copy of old value with ToNumber(value). | 4799 if (is_postfix_) { // Fix up copy of old value with ToNumber(value). |
4818 // This is only safe because VisitCountOperation makes this frame slot | 4800 // This is only safe because VisitCountOperation makes this frame slot |
4819 // beneath the reference a register, which is spilled at the above call. | 4801 // beneath the reference a register, which is spilled at the above call. |
4820 // We cannot safely write to constants or copies below the water line. | 4802 // We cannot safely write to constants or copies below the water line. |
4821 frame->StoreToElementAt(target_size_ + 1); | 4803 frame->StoreToElementAt(target_size_ + 1); |
4822 } | 4804 } |
4823 frame->Push(Smi::FromInt(1)); | 4805 frame->Push(Smi::FromInt(1)); |
4824 if (is_increment_) { | 4806 if (is_increment_) { |
4825 value = frame->CallRuntime(Runtime::kNumberAdd, 2); | 4807 value = frame->CallRuntime(Runtime::kNumberAdd, 2); |
4826 } else { | 4808 } else { |
4827 value = frame->CallRuntime(Runtime::kNumberSub, 2); | 4809 value = frame->CallRuntime(Runtime::kNumberSub, 2); |
4828 } | 4810 } |
4829 exit_.Jump(&value); | 4811 exit_.Jump(&value); |
4830 } | 4812 } |
4831 | 4813 |
4832 | 4814 |
| 4815 #undef __ |
| 4816 #define __ ACCESS_MASM(masm_) |
| 4817 |
| 4818 |
4833 void CodeGenerator::VisitCountOperation(CountOperation* node) { | 4819 void CodeGenerator::VisitCountOperation(CountOperation* node) { |
4834 Comment cmnt(masm_, "[ CountOperation"); | 4820 Comment cmnt(masm_, "[ CountOperation"); |
4835 | 4821 |
4836 bool is_postfix = node->is_postfix(); | 4822 bool is_postfix = node->is_postfix(); |
4837 bool is_increment = node->op() == Token::INC; | 4823 bool is_increment = node->op() == Token::INC; |
4838 | 4824 |
4839 Variable* var = node->expression()->AsVariableProxy()->AsVariable(); | 4825 Variable* var = node->expression()->AsVariableProxy()->AsVariable(); |
4840 bool is_const = (var != NULL && var->mode() == Variable::CONST); | 4826 bool is_const = (var != NULL && var->mode() == Variable::CONST); |
4841 | 4827 |
4842 // Postfix operators need a stack slot under the reference to hold | 4828 // Postfix operators need a stack slot under the reference to hold |
4843 // the old value while the new one is being stored. | 4829 // the old value while the new one is being stored. |
4844 if (is_postfix) { | 4830 if (is_postfix) { |
4845 frame_->Push(Smi::FromInt(0)); | 4831 frame_->Push(Smi::FromInt(0)); |
4846 } | 4832 } |
4847 | 4833 |
4848 { Reference target(this, node->expression()); | 4834 { Reference target(this, node->expression()); |
4849 if (target.is_illegal()) { | 4835 if (target.is_illegal()) { |
4850 // Spoof the virtual frame to have the expected height (one higher | 4836 // Spoof the virtual frame to have the expected height (one higher |
4851 // than on entry). | 4837 // than on entry). |
4852 if (!is_postfix) { | 4838 if (!is_postfix) { |
4853 frame_->Push(Smi::FromInt(0)); | 4839 frame_->Push(Smi::FromInt(0)); |
4854 } | 4840 } |
4855 return; | 4841 return; |
4856 } | 4842 } |
4857 target.TakeValue(NOT_INSIDE_TYPEOF); | 4843 target.TakeValue(NOT_INSIDE_TYPEOF); |
4858 | 4844 |
4859 DeferredCountOperation* deferred = | 4845 DeferredCountOperation* deferred = |
4860 new DeferredCountOperation(this, is_postfix, | 4846 new DeferredCountOperation(is_postfix, is_increment, target.size()); |
4861 is_increment, target.size()); | |
4862 | 4847 |
4863 Result value = frame_->Pop(); | 4848 Result value = frame_->Pop(); |
4864 value.ToRegister(); | 4849 value.ToRegister(); |
4865 | 4850 |
4866 // Postfix: Store the old value as the result. | 4851 // Postfix: Store the old value as the result. |
4867 if (is_postfix) { | 4852 if (is_postfix) { |
4868 // Explicitly back the slot for the old value with a new register. | 4853 // Explicitly back the slot for the old value with a new register. |
4869 // This improves performance in some cases. | 4854 // This improves performance in some cases. |
4870 Result old_value = allocator_->Allocate(); | 4855 Result old_value = allocator_->Allocate(); |
4871 ASSERT(old_value.is_valid()); | 4856 ASSERT(old_value.is_valid()); |
(...skipping 403 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5275 && (allocator()->count(ebx) == (frame()->is_used(ebx) ? 1 : 0)) | 5260 && (allocator()->count(ebx) == (frame()->is_used(ebx) ? 1 : 0)) |
5276 && (allocator()->count(ecx) == (frame()->is_used(ecx) ? 1 : 0)) | 5261 && (allocator()->count(ecx) == (frame()->is_used(ecx) ? 1 : 0)) |
5277 && (allocator()->count(edx) == (frame()->is_used(edx) ? 1 : 0)) | 5262 && (allocator()->count(edx) == (frame()->is_used(edx) ? 1 : 0)) |
5278 && (allocator()->count(edi) == (frame()->is_used(edi) ? 1 : 0)); | 5263 && (allocator()->count(edi) == (frame()->is_used(edi) ? 1 : 0)); |
5279 } | 5264 } |
5280 #endif | 5265 #endif |
5281 | 5266 |
5282 | 5267 |
5283 class DeferredReferenceGetNamedValue: public DeferredCode { | 5268 class DeferredReferenceGetNamedValue: public DeferredCode { |
5284 public: | 5269 public: |
5285 DeferredReferenceGetNamedValue(CodeGenerator* cgen, Handle<String> name) | 5270 DeferredReferenceGetNamedValue(Handle<String> name) : name_(name) { |
5286 : DeferredCode(cgen), name_(name) { | |
5287 set_comment("[ DeferredReferenceGetNamedValue"); | 5271 set_comment("[ DeferredReferenceGetNamedValue"); |
5288 } | 5272 } |
5289 | 5273 |
5290 virtual void Generate(); | 5274 virtual void Generate(); |
5291 | 5275 |
5292 Label* patch_site() { return &patch_site_; } | 5276 Label* patch_site() { return &patch_site_; } |
5293 | 5277 |
5294 private: | 5278 private: |
5295 Label patch_site_; | 5279 Label patch_site_; |
5296 Handle<String> name_; | 5280 Handle<String> name_; |
5297 }; | 5281 }; |
5298 | 5282 |
5299 | 5283 |
5300 void DeferredReferenceGetNamedValue::Generate() { | |
5301 CodeGenerator* cgen = generator(); | |
5302 Result receiver; | |
5303 enter()->Bind(&receiver); | |
5304 | |
5305 cgen->frame()->Push(&receiver); | |
5306 cgen->frame()->Push(name_); | |
5307 Result answer = cgen->frame()->CallLoadIC(RelocInfo::CODE_TARGET); | |
5308 // The call must be followed by a test eax instruction to indicate | |
5309 // that the inobject property case was inlined. | |
5310 ASSERT(answer.is_register() && answer.reg().is(eax)); | |
5311 // Store the delta to the map check instruction here in the test instruction. | |
5312 // Use masm_-> instead of the double underscore macro since the latter can't | |
5313 // return a value. | |
5314 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); | |
5315 // Here we use masm_-> instead of the double underscore macro because | |
5316 // this is the instruction that gets patched and coverage code gets in | |
5317 // the way. | |
5318 masm_->test(answer.reg(), Immediate(-delta_to_patch_site)); | |
5319 __ IncrementCounter(&Counters::named_load_inline_miss, 1); | |
5320 receiver = cgen->frame()->Pop(); | |
5321 exit_.Jump(&receiver, &answer); | |
5322 } | |
5323 | |
5324 | |
5325 class DeferredReferenceGetKeyedValue: public DeferredCode { | 5284 class DeferredReferenceGetKeyedValue: public DeferredCode { |
5326 public: | 5285 public: |
5327 DeferredReferenceGetKeyedValue(CodeGenerator* generator, bool is_global) | 5286 DeferredReferenceGetKeyedValue(bool is_global) : is_global_(is_global) { |
5328 : DeferredCode(generator), is_global_(is_global) { | |
5329 set_comment("[ DeferredReferenceGetKeyedValue"); | 5287 set_comment("[ DeferredReferenceGetKeyedValue"); |
5330 } | 5288 } |
5331 | 5289 |
5332 virtual void Generate(); | 5290 virtual void Generate(); |
5333 | 5291 |
5334 Label* patch_site() { return &patch_site_; } | 5292 Label* patch_site() { return &patch_site_; } |
5335 | 5293 |
5336 private: | 5294 private: |
5337 Label patch_site_; | 5295 Label patch_site_; |
5338 bool is_global_; | 5296 bool is_global_; |
5339 }; | 5297 }; |
5340 | 5298 |
5341 | 5299 |
| 5300 #undef __ |
| 5301 #define __ ACCESS_MASM(cgen()->masm()) |
| 5302 |
| 5303 |
| 5304 void DeferredReferenceGetNamedValue::Generate() { |
| 5305 Result receiver; |
| 5306 enter()->Bind(&receiver); |
| 5307 |
| 5308 cgen()->frame()->Push(&receiver); |
| 5309 cgen()->frame()->Push(name_); |
| 5310 Result answer = cgen()->frame()->CallLoadIC(RelocInfo::CODE_TARGET); |
| 5311 // The call must be followed by a test eax instruction to indicate |
| 5312 // that the inobject property case was inlined. |
| 5313 ASSERT(answer.is_register() && answer.reg().is(eax)); |
| 5314 // Store the delta to the map check instruction here in the test |
| 5315 // instruction. Use cgen()->masm()-> instead of the __ macro since |
| 5316 // the latter can't return a value. |
| 5317 int delta_to_patch_site = |
| 5318 cgen()->masm()->SizeOfCodeGeneratedSince(patch_site()); |
| 5319 // Here we use cgen()->masm()-> instead of the __ macro because this |
| 5320 // is the instruction that gets patched and coverage code gets in the |
| 5321 // way. |
| 5322 cgen()->masm()->test(answer.reg(), Immediate(-delta_to_patch_site)); |
| 5323 __ IncrementCounter(&Counters::named_load_inline_miss, 1); |
| 5324 receiver = cgen()->frame()->Pop(); |
| 5325 exit_.Jump(&receiver, &answer); |
| 5326 } |
| 5327 |
| 5328 |
5342 void DeferredReferenceGetKeyedValue::Generate() { | 5329 void DeferredReferenceGetKeyedValue::Generate() { |
5343 CodeGenerator* cgen = generator(); | |
5344 Result receiver; | 5330 Result receiver; |
5345 Result key; | 5331 Result key; |
5346 enter()->Bind(&receiver, &key); | 5332 enter()->Bind(&receiver, &key); |
5347 cgen->frame()->Push(&receiver); // First IC argument. | 5333 cgen()->frame()->Push(&receiver); // First IC argument. |
5348 cgen->frame()->Push(&key); // Second IC argument. | 5334 cgen()->frame()->Push(&key); // Second IC argument. |
5349 | 5335 |
5350 // Calculate the delta from the IC call instruction to the map check | 5336 // Calculate the delta from the IC call instruction to the map check |
5351 // cmp instruction in the inlined version. This delta is stored in | 5337 // cmp instruction in the inlined version. This delta is stored in |
5352 // a test(eax, delta) instruction after the call so that we can find | 5338 // a test(eax, delta) instruction after the call so that we can find |
5353 // it in the IC initialization code and patch the cmp instruction. | 5339 // it in the IC initialization code and patch the cmp instruction. |
5354 // This means that we cannot allow test instructions after calls to | 5340 // This means that we cannot allow test instructions after calls to |
5355 // KeyedLoadIC stubs in other places. | 5341 // KeyedLoadIC stubs in other places. |
5356 RelocInfo::Mode mode = is_global_ | 5342 RelocInfo::Mode mode = is_global_ |
5357 ? RelocInfo::CODE_TARGET_CONTEXT | 5343 ? RelocInfo::CODE_TARGET_CONTEXT |
5358 : RelocInfo::CODE_TARGET; | 5344 : RelocInfo::CODE_TARGET; |
5359 Result value = cgen->frame()->CallKeyedLoadIC(mode); | 5345 Result value = cgen()->frame()->CallKeyedLoadIC(mode); |
5360 // The result needs to be specifically the eax register because the | 5346 // The result needs to be specifically the eax register because the |
5361 // offset to the patch site will be expected in a test eax | 5347 // offset to the patch site will be expected in a test eax |
5362 // instruction. | 5348 // instruction. |
5363 ASSERT(value.is_register() && value.reg().is(eax)); | 5349 ASSERT(value.is_register() && value.reg().is(eax)); |
5364 // The delta from the start of the map-compare instruction to the | 5350 // The delta from the start of the map-compare instruction to the test |
5365 // test instruction. We use masm_ directly here instead of the | 5351 // instruction. We use cgen()->masm() directly here instead of the __ |
5366 // double underscore macro because the macro sometimes uses macro | 5352 // macro because the macro sometimes uses macro expansion to turn into |
5367 // expansion to turn into something that can't return a value. This | 5353 // something that can't return a value. This is encountered when |
5368 // is encountered when doing generated code coverage tests. | 5354 // doing generated code coverage tests. |
5369 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); | 5355 int delta_to_patch_site = |
5370 // Here we use masm_-> instead of the double underscore macro because this | 5356 cgen()->masm()->SizeOfCodeGeneratedSince(patch_site()); |
5371 // is the instruction that gets patched and coverage code gets in the way. | 5357 // Here we use cgen()->masm()-> instead of the __ macro because this |
5372 masm_->test(value.reg(), Immediate(-delta_to_patch_site)); | 5358 // is the instruction that gets patched and coverage code gets in the |
| 5359 // way. |
| 5360 cgen()->masm()->test(value.reg(), Immediate(-delta_to_patch_site)); |
5373 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1); | 5361 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1); |
5374 | 5362 |
5375 // The receiver and key were spilled by the call, so their state as | 5363 // The receiver and key were spilled by the call, so their state as |
5376 // constants or copies has been changed. Thus, they need to be | 5364 // constants or copies has been changed. Thus, they need to be |
5377 // "mergable" in the block at the exit label and are therefore | 5365 // "mergable" in the block at the exit label and are therefore |
5378 // passed as return results here. | 5366 // passed as return results here. |
5379 key = cgen->frame()->Pop(); | 5367 key = cgen()->frame()->Pop(); |
5380 receiver = cgen->frame()->Pop(); | 5368 receiver = cgen()->frame()->Pop(); |
5381 exit_.Jump(&receiver, &key, &value); | 5369 exit_.Jump(&receiver, &key, &value); |
5382 } | 5370 } |
5383 | 5371 |
5384 | 5372 |
5385 #undef __ | 5373 #undef __ |
5386 #define __ ACCESS_MASM(masm) | 5374 #define __ ACCESS_MASM(masm) |
5387 | 5375 |
5388 Handle<String> Reference::GetName() { | 5376 Handle<String> Reference::GetName() { |
5389 ASSERT(type_ == NAMED); | 5377 ASSERT(type_ == NAMED); |
5390 Property* property = expression_->AsProperty(); | 5378 Property* property = expression_->AsProperty(); |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5443 Result answer = cgen_->frame()->CallLoadIC(mode); | 5431 Result answer = cgen_->frame()->CallLoadIC(mode); |
5444 // A test eax instruction following the call signals that the | 5432 // A test eax instruction following the call signals that the |
5445 // inobject property case was inlined. Ensure that there is not | 5433 // inobject property case was inlined. Ensure that there is not |
5446 // a test eax instruction here. | 5434 // a test eax instruction here. |
5447 __ nop(); | 5435 __ nop(); |
5448 cgen_->frame()->Push(&answer); | 5436 cgen_->frame()->Push(&answer); |
5449 } else { | 5437 } else { |
5450 // Inline the inobject property case. | 5438 // Inline the inobject property case. |
5451 Comment cmnt(masm, "[ Inlined named property load"); | 5439 Comment cmnt(masm, "[ Inlined named property load"); |
5452 DeferredReferenceGetNamedValue* deferred = | 5440 DeferredReferenceGetNamedValue* deferred = |
5453 new DeferredReferenceGetNamedValue(cgen_, GetName()); | 5441 new DeferredReferenceGetNamedValue(GetName()); |
5454 Result receiver = cgen_->frame()->Pop(); | 5442 Result receiver = cgen_->frame()->Pop(); |
5455 receiver.ToRegister(); | 5443 receiver.ToRegister(); |
5456 | 5444 |
5457 // Try to preallocate the value register so that all frames | 5445 // Try to preallocate the value register so that all frames |
5458 // reaching the deferred code are identical. | 5446 // reaching the deferred code are identical. |
5459 Result value = cgen_->allocator()->AllocateWithoutSpilling(); | 5447 Result value = cgen_->allocator()->AllocateWithoutSpilling(); |
5460 if (value.is_valid()) { | 5448 if (value.is_valid()) { |
5461 deferred->SetEntryFrame(&receiver); | 5449 deferred->SetEntryFrame(&receiver); |
5462 } | 5450 } |
5463 | 5451 |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5508 Variable* var = expression_->AsVariableProxy()->AsVariable(); | 5496 Variable* var = expression_->AsVariableProxy()->AsVariable(); |
5509 bool is_global = var != NULL; | 5497 bool is_global = var != NULL; |
5510 ASSERT(!is_global || var->is_global()); | 5498 ASSERT(!is_global || var->is_global()); |
5511 // Inline array load code if inside of a loop. We do not know | 5499 // Inline array load code if inside of a loop. We do not know |
5512 // the receiver map yet, so we initially generate the code with | 5500 // the receiver map yet, so we initially generate the code with |
5513 // a check against an invalid map. In the inline cache code, we | 5501 // a check against an invalid map. In the inline cache code, we |
5514 // patch the map check if appropriate. | 5502 // patch the map check if appropriate. |
5515 if (cgen_->loop_nesting() > 0) { | 5503 if (cgen_->loop_nesting() > 0) { |
5516 Comment cmnt(masm, "[ Inlined array index load"); | 5504 Comment cmnt(masm, "[ Inlined array index load"); |
5517 DeferredReferenceGetKeyedValue* deferred = | 5505 DeferredReferenceGetKeyedValue* deferred = |
5518 new DeferredReferenceGetKeyedValue(cgen_, is_global); | 5506 new DeferredReferenceGetKeyedValue(is_global); |
5519 | 5507 |
5520 Result key = cgen_->frame()->Pop(); | 5508 Result key = cgen_->frame()->Pop(); |
5521 Result receiver = cgen_->frame()->Pop(); | 5509 Result receiver = cgen_->frame()->Pop(); |
5522 key.ToRegister(); | 5510 key.ToRegister(); |
5523 receiver.ToRegister(); | 5511 receiver.ToRegister(); |
5524 | 5512 |
5525 // Try to preallocate the elements and index scratch registers | 5513 // Try to preallocate the elements and index scratch registers |
5526 // so that all frames reaching the deferred code are identical. | 5514 // so that all frames reaching the deferred code are identical. |
5527 Result elements = cgen_->allocator()->AllocateWithoutSpilling(); | 5515 Result elements = cgen_->allocator()->AllocateWithoutSpilling(); |
5528 Result index = cgen_->allocator()->AllocateWithoutSpilling(); | 5516 Result index = cgen_->allocator()->AllocateWithoutSpilling(); |
(...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5736 // Return 1/0 for true/false in eax. | 5724 // Return 1/0 for true/false in eax. |
5737 __ bind(&true_result); | 5725 __ bind(&true_result); |
5738 __ mov(eax, 1); | 5726 __ mov(eax, 1); |
5739 __ ret(1 * kPointerSize); | 5727 __ ret(1 * kPointerSize); |
5740 __ bind(&false_result); | 5728 __ bind(&false_result); |
5741 __ mov(eax, 0); | 5729 __ mov(eax, 0); |
5742 __ ret(1 * kPointerSize); | 5730 __ ret(1 * kPointerSize); |
5743 } | 5731 } |
5744 | 5732 |
5745 | 5733 |
5746 #undef __ | |
5747 #define __ ACCESS_MASM(masm_) | |
5748 | |
5749 Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left, | 5734 Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left, |
5750 Result* right) { | 5735 Result* right) { |
| 5736 MacroAssembler* masm = cgen()->masm(); |
5751 // Perform fast-case smi code for the operation (left <op> right) and | 5737 // Perform fast-case smi code for the operation (left <op> right) and |
5752 // returns the result in a Result. | 5738 // returns the result in a Result. |
5753 // If any fast-case tests fail, it jumps to the slow-case deferred code, | 5739 // If any fast-case tests fail, it jumps to the slow-case deferred code, |
5754 // which calls the binary operation stub, with the arguments (in registers) | 5740 // which calls the binary operation stub, with the arguments (in registers) |
5755 // on top of the frame. | 5741 // on top of the frame. |
5756 // Consumes its arguments (sets left and right to invalid and frees their | 5742 // Consumes its arguments (sets left and right to invalid and frees their |
5757 // registers). | 5743 // registers). |
5758 | 5744 |
5759 left->ToRegister(); | 5745 left->ToRegister(); |
5760 right->ToRegister(); | 5746 right->ToRegister(); |
5761 // A newly allocated register answer is used to hold the answer. | 5747 // A newly allocated register answer is used to hold the answer. |
5762 // The registers containing left and right are not modified in | 5748 // The registers containing left and right are not modified in |
5763 // most cases, so they usually don't need to be spilled in the fast case. | 5749 // most cases, so they usually don't need to be spilled in the fast case. |
5764 Result answer = generator()->allocator()->Allocate(); | 5750 Result answer = cgen()->allocator()->Allocate(); |
5765 | 5751 |
5766 ASSERT(answer.is_valid()); | 5752 ASSERT(answer.is_valid()); |
5767 // Perform the smi check. | 5753 // Perform the smi check. |
5768 if (left->reg().is(right->reg())) { | 5754 if (left->reg().is(right->reg())) { |
5769 __ test(left->reg(), Immediate(kSmiTagMask)); | 5755 __ test(left->reg(), Immediate(kSmiTagMask)); |
5770 } else { | 5756 } else { |
5771 __ mov(answer.reg(), left->reg()); | 5757 __ mov(answer.reg(), left->reg()); |
5772 __ or_(answer.reg(), Operand(right->reg())); | 5758 __ or_(answer.reg(), Operand(right->reg())); |
5773 ASSERT(kSmiTag == 0); // adjust zero check if not the case | 5759 ASSERT(kSmiTag == 0); // adjust zero check if not the case |
5774 __ test(answer.reg(), Immediate(kSmiTagMask)); | 5760 __ test(answer.reg(), Immediate(kSmiTagMask)); |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5825 // Div and mod use the registers eax and edx. Left and right must | 5811 // Div and mod use the registers eax and edx. Left and right must |
5826 // be preserved, because the original operands are needed if we switch | 5812 // be preserved, because the original operands are needed if we switch |
5827 // to the slow case. Move them if either is in eax or edx. | 5813 // to the slow case. Move them if either is in eax or edx. |
5828 // The Result answer should be changed into an alias for eax. | 5814 // The Result answer should be changed into an alias for eax. |
5829 // Precondition: | 5815 // Precondition: |
5830 // The Results left and right are valid. They may be the same register, | 5816 // The Results left and right are valid. They may be the same register, |
5831 // and may be unspilled. The Result answer is valid and is distinct | 5817 // and may be unspilled. The Result answer is valid and is distinct |
5832 // from left and right, and is spilled. | 5818 // from left and right, and is spilled. |
5833 // The value in left is copied to answer. | 5819 // The value in left is copied to answer. |
5834 | 5820 |
5835 Result reg_eax = generator()->allocator()->Allocate(eax); | 5821 Result reg_eax = cgen()->allocator()->Allocate(eax); |
5836 Result reg_edx = generator()->allocator()->Allocate(edx); | 5822 Result reg_edx = cgen()->allocator()->Allocate(edx); |
5837 // These allocations may have failed, if one of left, right, or answer | 5823 // These allocations may have failed, if one of left, right, or answer |
5838 // is in register eax or edx. | 5824 // is in register eax or edx. |
5839 bool left_copied_to_eax = false; // We will make sure this becomes true. | 5825 bool left_copied_to_eax = false; // We will make sure this becomes true. |
5840 | 5826 |
5841 // Part 1: Get eax | 5827 // Part 1: Get eax |
5842 if (answer.reg().is(eax)) { | 5828 if (answer.reg().is(eax)) { |
5843 reg_eax = answer; | 5829 reg_eax = answer; |
5844 left_copied_to_eax = true; | 5830 left_copied_to_eax = true; |
5845 } else if (right->reg().is(eax) || left->reg().is(eax)) { | 5831 } else if (right->reg().is(eax) || left->reg().is(eax)) { |
5846 // We need a non-edx register to move one or both of left and right to. | 5832 // We need a non-edx register to move one or both of left and right to. |
5847 // We use answer if it is not edx, otherwise we allocate one. | 5833 // We use answer if it is not edx, otherwise we allocate one. |
5848 if (answer.reg().is(edx)) { | 5834 if (answer.reg().is(edx)) { |
5849 reg_edx = answer; | 5835 reg_edx = answer; |
5850 answer = generator()->allocator()->Allocate(); | 5836 answer = cgen()->allocator()->Allocate(); |
5851 ASSERT(answer.is_valid()); | 5837 ASSERT(answer.is_valid()); |
5852 } | 5838 } |
5853 | 5839 |
5854 if (left->reg().is(eax)) { | 5840 if (left->reg().is(eax)) { |
5855 reg_eax = *left; | 5841 reg_eax = *left; |
5856 left_copied_to_eax = true; | 5842 left_copied_to_eax = true; |
5857 *left = answer; | 5843 *left = answer; |
5858 } | 5844 } |
5859 if (right->reg().is(eax)) { | 5845 if (right->reg().is(eax)) { |
5860 reg_eax = *right; | 5846 reg_eax = *right; |
(...skipping 10 matching lines...) Expand all Loading... |
5871 // Part 2: Get edx | 5857 // Part 2: Get edx |
5872 // reg_edx is invalid if and only if either left, right, | 5858 // reg_edx is invalid if and only if either left, right, |
5873 // or answer is in edx. If edx is valid, then either edx | 5859 // or answer is in edx. If edx is valid, then either edx |
5874 // was free, or it was answer, but answer was reallocated. | 5860 // was free, or it was answer, but answer was reallocated. |
5875 if (answer.reg().is(edx)) { | 5861 if (answer.reg().is(edx)) { |
5876 reg_edx = answer; | 5862 reg_edx = answer; |
5877 } else if (right->reg().is(edx) || left->reg().is(edx)) { | 5863 } else if (right->reg().is(edx) || left->reg().is(edx)) { |
5878 // Is answer used? | 5864 // Is answer used? |
5879 if (answer.reg().is(eax) || answer.reg().is(left->reg()) || | 5865 if (answer.reg().is(eax) || answer.reg().is(left->reg()) || |
5880 answer.reg().is(right->reg())) { | 5866 answer.reg().is(right->reg())) { |
5881 answer = generator()->allocator()->Allocate(); | 5867 answer = cgen()->allocator()->Allocate(); |
5882 ASSERT(answer.is_valid()); // We cannot hit both Allocate() calls. | 5868 ASSERT(answer.is_valid()); // We cannot hit both Allocate() calls. |
5883 } | 5869 } |
5884 if (left->reg().is(edx)) { | 5870 if (left->reg().is(edx)) { |
5885 reg_edx = *left; | 5871 reg_edx = *left; |
5886 *left = answer; | 5872 *left = answer; |
5887 } | 5873 } |
5888 if (right->reg().is(edx)) { | 5874 if (right->reg().is(edx)) { |
5889 reg_edx = *right; | 5875 reg_edx = *right; |
5890 *right = answer; | 5876 *right = answer; |
5891 } | 5877 } |
5892 __ mov(answer.reg(), edx); | 5878 __ mov(answer.reg(), edx); |
5893 } | 5879 } |
5894 // End of Part 2 | 5880 // End of Part 2 |
5895 ASSERT(reg_edx.is_valid()); | 5881 ASSERT(reg_edx.is_valid()); |
5896 ASSERT(!left->reg().is(eax)); | 5882 ASSERT(!left->reg().is(eax)); |
5897 ASSERT(!right->reg().is(eax)); | 5883 ASSERT(!right->reg().is(eax)); |
5898 | 5884 |
5899 answer = reg_eax; // May free answer, if it was never used. | 5885 answer = reg_eax; // May free answer, if it was never used. |
5900 generator()->frame()->Spill(eax); | 5886 cgen()->frame()->Spill(eax); |
5901 if (!left_copied_to_eax) { | 5887 if (!left_copied_to_eax) { |
5902 __ mov(eax, left->reg()); | 5888 __ mov(eax, left->reg()); |
5903 left_copied_to_eax = true; | 5889 left_copied_to_eax = true; |
5904 } | 5890 } |
5905 generator()->frame()->Spill(edx); | 5891 cgen()->frame()->Spill(edx); |
5906 | 5892 |
5907 // Postcondition: | 5893 // Postcondition: |
5908 // reg_eax, reg_edx are valid, correct, and spilled. | 5894 // reg_eax, reg_edx are valid, correct, and spilled. |
5909 // reg_eax contains the value originally in left | 5895 // reg_eax contains the value originally in left |
5910 // left and right are not eax or edx. They may or may not be | 5896 // left and right are not eax or edx. They may or may not be |
5911 // spilled or distinct. | 5897 // spilled or distinct. |
5912 // answer is an alias for reg_eax. | 5898 // answer is an alias for reg_eax. |
5913 | 5899 |
5914 // Sign extend eax into edx:eax. | 5900 // Sign extend eax into edx:eax. |
5915 __ cdq(); | 5901 __ cdq(); |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5985 // Left is in two registers already, so even if left or answer is ecx, | 5971 // Left is in two registers already, so even if left or answer is ecx, |
5986 // we can move right to it, and use the other one. | 5972 // we can move right to it, and use the other one. |
5987 // Right operand must be in register cl because x86 likes it that way. | 5973 // Right operand must be in register cl because x86 likes it that way. |
5988 if (right->reg().is(ecx)) { | 5974 if (right->reg().is(ecx)) { |
5989 // Right is already in the right place. Left may be in the | 5975 // Right is already in the right place. Left may be in the |
5990 // same register, which causes problems. Always use answer | 5976 // same register, which causes problems. Always use answer |
5991 // instead of left, even if left is not ecx, since this avoids | 5977 // instead of left, even if left is not ecx, since this avoids |
5992 // spilling left. | 5978 // spilling left. |
5993 *left = answer; | 5979 *left = answer; |
5994 } else if (left->reg().is(ecx)) { | 5980 } else if (left->reg().is(ecx)) { |
5995 generator()->frame()->Spill(left->reg()); | 5981 cgen()->frame()->Spill(left->reg()); |
5996 __ mov(left->reg(), right->reg()); | 5982 __ mov(left->reg(), right->reg()); |
5997 *right = *left; | 5983 *right = *left; |
5998 *left = answer; // Use copy of left in answer as left. | 5984 *left = answer; // Use copy of left in answer as left. |
5999 } else if (answer.reg().is(ecx)) { | 5985 } else if (answer.reg().is(ecx)) { |
6000 __ mov(answer.reg(), right->reg()); | 5986 __ mov(answer.reg(), right->reg()); |
6001 *right = answer; | 5987 *right = answer; |
6002 } else { | 5988 } else { |
6003 Result reg_ecx = generator()->allocator()->Allocate(ecx); | 5989 Result reg_ecx = cgen()->allocator()->Allocate(ecx); |
6004 ASSERT(reg_ecx.is_valid()); | 5990 ASSERT(reg_ecx.is_valid()); |
6005 __ mov(ecx, right->reg()); | 5991 __ mov(ecx, right->reg()); |
6006 *right = reg_ecx; | 5992 *right = reg_ecx; |
6007 // Answer and left both contain the left operand. Use answer, so | 5993 // Answer and left both contain the left operand. Use answer, so |
6008 // left is not spilled. | 5994 // left is not spilled. |
6009 *left = answer; | 5995 *left = answer; |
6010 } | 5996 } |
6011 ASSERT(left->reg().is_valid()); | 5997 ASSERT(left->reg().is_valid()); |
6012 ASSERT(!left->reg().is(ecx)); | 5998 ASSERT(!left->reg().is(ecx)); |
6013 ASSERT(right->reg().is(ecx)); | 5999 ASSERT(right->reg().is(ecx)); |
6014 answer.Unuse(); // Answer may now be being used for left or right. | 6000 answer.Unuse(); // Answer may now be being used for left or right. |
6015 // We will modify left and right, which we do not do in any other | 6001 // We will modify left and right, which we do not do in any other |
6016 // binary operation. The exits to slow code need to restore the | 6002 // binary operation. The exits to slow code need to restore the |
6017 // original values of left and right, or at least values that give | 6003 // original values of left and right, or at least values that give |
6018 // the same answer. | 6004 // the same answer. |
6019 | 6005 |
6020 // We are modifying left and right. They must be spilled! | 6006 // We are modifying left and right. They must be spilled! |
6021 generator()->frame()->Spill(left->reg()); | 6007 cgen()->frame()->Spill(left->reg()); |
6022 generator()->frame()->Spill(right->reg()); | 6008 cgen()->frame()->Spill(right->reg()); |
6023 | 6009 |
6024 // Remove tags from operands (but keep sign). | 6010 // Remove tags from operands (but keep sign). |
6025 __ sar(left->reg(), kSmiTagSize); | 6011 __ sar(left->reg(), kSmiTagSize); |
6026 __ sar(ecx, kSmiTagSize); | 6012 __ sar(ecx, kSmiTagSize); |
6027 // Perform the operation. | 6013 // Perform the operation. |
6028 switch (op_) { | 6014 switch (op_) { |
6029 case Token::SAR: | 6015 case Token::SAR: |
6030 __ sar(left->reg()); | 6016 __ sar(left->reg()); |
6031 // No checks of result necessary | 6017 // No checks of result necessary |
6032 break; | 6018 break; |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6082 default: | 6068 default: |
6083 UNREACHABLE(); | 6069 UNREACHABLE(); |
6084 break; | 6070 break; |
6085 } | 6071 } |
6086 left->Unuse(); | 6072 left->Unuse(); |
6087 right->Unuse(); | 6073 right->Unuse(); |
6088 return answer; | 6074 return answer; |
6089 } | 6075 } |
6090 | 6076 |
6091 | 6077 |
6092 #undef __ | |
6093 #define __ ACCESS_MASM(masm) | |
6094 | |
6095 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { | 6078 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { |
6096 // Perform fast-case smi code for the operation (eax <op> ebx) and | 6079 // Perform fast-case smi code for the operation (eax <op> ebx) and |
6097 // leave result in register eax. | 6080 // leave result in register eax. |
6098 | 6081 |
6099 // Prepare the smi check of both operands by or'ing them together | 6082 // Prepare the smi check of both operands by or'ing them together |
6100 // before checking against the smi mask. | 6083 // before checking against the smi mask. |
6101 __ mov(ecx, Operand(ebx)); | 6084 __ mov(ecx, Operand(ebx)); |
6102 __ or_(ecx, Operand(eax)); | 6085 __ or_(ecx, Operand(eax)); |
6103 | 6086 |
6104 switch (op_) { | 6087 switch (op_) { |
(...skipping 1200 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7305 | 7288 |
7306 // Slow-case: Go through the JavaScript implementation. | 7289 // Slow-case: Go through the JavaScript implementation. |
7307 __ bind(&slow); | 7290 __ bind(&slow); |
7308 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); | 7291 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); |
7309 } | 7292 } |
7310 | 7293 |
7311 | 7294 |
7312 #undef __ | 7295 #undef __ |
7313 | 7296 |
7314 } } // namespace v8::internal | 7297 } } // namespace v8::internal |
OLD | NEW |