Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(21)

Side by Side Diff: src/ia32/codegen-ia32.cc

Issue 118226: Simplify the processing of deferred code in the code generator. Our... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 11 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/codegen-inl.h ('k') | src/ia32/codegen-ia32-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. 1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 24 matching lines...) Expand all
35 #include "register-allocator-inl.h" 35 #include "register-allocator-inl.h"
36 #include "runtime.h" 36 #include "runtime.h"
37 #include "scopes.h" 37 #include "scopes.h"
38 38
39 namespace v8 { 39 namespace v8 {
40 namespace internal { 40 namespace internal {
41 41
42 #define __ ACCESS_MASM(masm_) 42 #define __ ACCESS_MASM(masm_)
43 43
44 // ------------------------------------------------------------------------- 44 // -------------------------------------------------------------------------
45 // Platform-specific DeferredCode functions.
46
47 void DeferredCode::SaveRegisters() {
48 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
49 int action = registers_[i];
50 if (action == kPush) {
51 __ push(RegisterAllocator::ToRegister(i));
52 } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
53 __ mov(Operand(ebp, action), RegisterAllocator::ToRegister(i));
54 }
55 }
56 }
57
58
59 void DeferredCode::RestoreRegisters() {
60 // Restore registers in reverse order due to the stack.
61 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
62 int action = registers_[i];
63 if (action == kPush) {
64 __ pop(RegisterAllocator::ToRegister(i));
65 } else if (action != kIgnore) {
66 action &= ~kSyncedFlag;
67 __ mov(RegisterAllocator::ToRegister(i), Operand(ebp, action));
68 }
69 }
70 }
71
72
73 // -------------------------------------------------------------------------
45 // CodeGenState implementation. 74 // CodeGenState implementation.
46 75
47 CodeGenState::CodeGenState(CodeGenerator* owner) 76 CodeGenState::CodeGenState(CodeGenerator* owner)
48 : owner_(owner), 77 : owner_(owner),
49 typeof_state_(NOT_INSIDE_TYPEOF), 78 typeof_state_(NOT_INSIDE_TYPEOF),
50 destination_(NULL), 79 destination_(NULL),
51 previous_(NULL) { 80 previous_(NULL) {
52 owner_->set_state(this); 81 owner_->set_state(this);
53 } 82 }
54 83
(...skipping 11 matching lines...) Expand all
66 95
67 CodeGenState::~CodeGenState() { 96 CodeGenState::~CodeGenState() {
68 ASSERT(owner_->state() == this); 97 ASSERT(owner_->state() == this);
69 owner_->set_state(previous_); 98 owner_->set_state(previous_);
70 } 99 }
71 100
72 101
73 // ------------------------------------------------------------------------- 102 // -------------------------------------------------------------------------
74 // CodeGenerator implementation 103 // CodeGenerator implementation
75 104
76 CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script, 105 CodeGenerator::CodeGenerator(int buffer_size,
106 Handle<Script> script,
77 bool is_eval) 107 bool is_eval)
78 : is_eval_(is_eval), 108 : is_eval_(is_eval),
79 script_(script), 109 script_(script),
80 deferred_(8), 110 deferred_(8),
81 masm_(new MacroAssembler(NULL, buffer_size)), 111 masm_(new MacroAssembler(NULL, buffer_size)),
82 scope_(NULL), 112 scope_(NULL),
83 frame_(NULL), 113 frame_(NULL),
84 allocator_(NULL), 114 allocator_(NULL),
85 state_(NULL), 115 state_(NULL),
86 loop_nesting_(0), 116 loop_nesting_(0),
(...skipping 685 matching lines...) Expand 10 before | Expand all | Expand 10 after
772 case Token::SHL: return "GenericBinaryOpStub_SHL"; 802 case Token::SHL: return "GenericBinaryOpStub_SHL";
773 case Token::SHR: return "GenericBinaryOpStub_SHR"; 803 case Token::SHR: return "GenericBinaryOpStub_SHR";
774 default: return "GenericBinaryOpStub"; 804 default: return "GenericBinaryOpStub";
775 } 805 }
776 } 806 }
777 807
778 808
779 // Call the specialized stub for a binary operation. 809 // Call the specialized stub for a binary operation.
780 class DeferredInlineBinaryOperation: public DeferredCode { 810 class DeferredInlineBinaryOperation: public DeferredCode {
781 public: 811 public:
782 DeferredInlineBinaryOperation(Token::Value op, OverwriteMode mode) 812 DeferredInlineBinaryOperation(Token::Value op,
783 : op_(op), mode_(mode) { 813 Register dst,
814 Register left,
815 Register right,
816 OverwriteMode mode)
817 : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
784 set_comment("[ DeferredInlineBinaryOperation"); 818 set_comment("[ DeferredInlineBinaryOperation");
785 } 819 }
786 820
787 virtual void Generate(); 821 virtual void Generate();
788 822
789 private: 823 private:
790 Token::Value op_; 824 Token::Value op_;
825 Register dst_;
826 Register left_;
827 Register right_;
791 OverwriteMode mode_; 828 OverwriteMode mode_;
792 }; 829 };
793 830
794 831
795 void DeferredInlineBinaryOperation::Generate() { 832 void DeferredInlineBinaryOperation::Generate() {
796 Result left; 833 __ push(left_);
797 Result right; 834 __ push(right_);
798 enter()->Bind(&left, &right);
799 cgen()->frame()->Push(&left);
800 cgen()->frame()->Push(&right);
801 GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED); 835 GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
802 Result answer = cgen()->frame()->CallStub(&stub, 2); 836 __ CallStub(&stub);
803 exit_.Jump(&answer); 837 if (!dst_.is(eax)) __ mov(dst_, eax);
804 } 838 }
805 839
806 840
807 void CodeGenerator::GenericBinaryOperation(Token::Value op, 841 void CodeGenerator::GenericBinaryOperation(Token::Value op,
808 SmiAnalysis* type, 842 SmiAnalysis* type,
809 OverwriteMode overwrite_mode) { 843 OverwriteMode overwrite_mode) {
810 Comment cmnt(masm_, "[ BinaryOperation"); 844 Comment cmnt(masm_, "[ BinaryOperation");
811 Comment cmnt_token(masm_, Token::String(op)); 845 Comment cmnt_token(masm_, Token::String(op));
812 846
813 if (op == Token::COMMA) { 847 if (op == Token::COMMA) {
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after
989 break; 1023 break;
990 } 1024 }
991 if (answer_object == Heap::undefined_value()) { 1025 if (answer_object == Heap::undefined_value()) {
992 return false; 1026 return false;
993 } 1027 }
994 frame_->Push(Handle<Object>(answer_object)); 1028 frame_->Push(Handle<Object>(answer_object));
995 return true; 1029 return true;
996 } 1030 }
997 1031
998 1032
1033 // Implements a binary operation using a deferred code object and some
1034 // inline code to operate on smis quickly.
999 void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, 1035 void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
1000 Result* left, 1036 Result* left,
1001 Result* right, 1037 Result* right,
1002 OverwriteMode overwrite_mode) { 1038 OverwriteMode overwrite_mode) {
1003 // Implements a binary operation using a deferred code object and some
1004 // inline code to operate on smis quickly.
1005 DeferredInlineBinaryOperation* deferred =
1006 new DeferredInlineBinaryOperation(op, overwrite_mode);
1007
1008 // Special handling of div and mod because they use fixed registers. 1039 // Special handling of div and mod because they use fixed registers.
1009 if (op == Token::DIV || op == Token::MOD) { 1040 if (op == Token::DIV || op == Token::MOD) {
1010 // We need eax as the quotient register, edx as the remainder 1041 // We need eax as the quotient register, edx as the remainder
1011 // register, neither left nor right in eax or edx, and left copied 1042 // register, neither left nor right in eax or edx, and left copied
1012 // to eax. 1043 // to eax.
1013 Result quotient; 1044 Result quotient;
1014 Result remainder; 1045 Result remainder;
1015 bool left_is_in_eax = false; 1046 bool left_is_in_eax = false;
1016 // Step 1: get eax for quotient. 1047 // Step 1: get eax for quotient.
1017 if ((left->is_register() && left->reg().is(eax)) || 1048 if ((left->is_register() && left->reg().is(eax)) ||
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
1062 // Neither left nor right is in edx. 1093 // Neither left nor right is in edx.
1063 remainder = allocator_->Allocate(edx); 1094 remainder = allocator_->Allocate(edx);
1064 } 1095 }
1065 } 1096 }
1066 ASSERT(remainder.is_register() && remainder.reg().is(edx)); 1097 ASSERT(remainder.is_register() && remainder.reg().is(edx));
1067 ASSERT(!(left->is_register() && left->reg().is(edx))); 1098 ASSERT(!(left->is_register() && left->reg().is(edx)));
1068 ASSERT(!(right->is_register() && right->reg().is(edx))); 1099 ASSERT(!(right->is_register() && right->reg().is(edx)));
1069 1100
1070 left->ToRegister(); 1101 left->ToRegister();
1071 right->ToRegister(); 1102 right->ToRegister();
1072 frame_->Spill(quotient.reg()); 1103 frame_->Spill(eax);
1073 frame_->Spill(remainder.reg()); 1104 frame_->Spill(edx);
1074 1105
1075 // Check that left and right are smi tagged. 1106 // Check that left and right are smi tagged.
1107 DeferredInlineBinaryOperation* deferred =
1108 new DeferredInlineBinaryOperation(op,
1109 (op == Token::DIV) ? eax : edx,
1110 left->reg(),
1111 right->reg(),
1112 overwrite_mode);
1076 if (left->reg().is(right->reg())) { 1113 if (left->reg().is(right->reg())) {
1077 __ test(left->reg(), Immediate(kSmiTagMask)); 1114 __ test(left->reg(), Immediate(kSmiTagMask));
1078 } else { 1115 } else {
1079 // Use the quotient register as a scratch for the tag check. 1116 // Use the quotient register as a scratch for the tag check.
1080 if (!left_is_in_eax) __ mov(quotient.reg(), left->reg()); 1117 if (!left_is_in_eax) __ mov(eax, left->reg());
1081 left_is_in_eax = false; 1118 left_is_in_eax = false; // About to destroy the value in eax.
1082 __ or_(quotient.reg(), Operand(right->reg())); 1119 __ or_(eax, Operand(right->reg()));
1083 ASSERT(kSmiTag == 0); // Adjust test if not the case. 1120 ASSERT(kSmiTag == 0); // Adjust test if not the case.
1084 __ test(quotient.reg(), Immediate(kSmiTagMask)); 1121 __ test(eax, Immediate(kSmiTagMask));
1085 } 1122 }
1086 deferred->SetEntryFrame(left, right); 1123 deferred->Branch(not_zero);
1087 deferred->enter()->Branch(not_zero, left, right);
1088 1124
1089 if (!left_is_in_eax) __ mov(quotient.reg(), left->reg()); 1125 if (!left_is_in_eax) __ mov(eax, left->reg());
1090
1091 // Sign extend eax into edx:eax. 1126 // Sign extend eax into edx:eax.
1092 __ cdq(); 1127 __ cdq();
1093 // Check for 0 divisor. 1128 // Check for 0 divisor.
1094 __ test(right->reg(), Operand(right->reg())); 1129 __ test(right->reg(), Operand(right->reg()));
1095 deferred->enter()->Branch(zero, left, right); 1130 deferred->Branch(zero);
1096 // Divide edx:eax by the right operand. 1131 // Divide edx:eax by the right operand.
1097 __ idiv(right->reg()); 1132 __ idiv(right->reg());
1098 1133
1099 // Complete the operation. 1134 // Complete the operation.
1100 if (op == Token::DIV) { 1135 if (op == Token::DIV) {
1101 // Check for negative zero result. If result is zero, and divisor 1136 // Check for negative zero result. If result is zero, and divisor
1102 // is negative, return a floating point negative zero. The 1137 // is negative, return a floating point negative zero. The
1103 // virtual frame is unchanged in this block, so local control flow 1138 // virtual frame is unchanged in this block, so local control flow
1104 // can use a Label rather than a JumpTarget. 1139 // can use a Label rather than a JumpTarget.
1105 Label non_zero_result; 1140 Label non_zero_result;
1106 __ test(left->reg(), Operand(left->reg())); 1141 __ test(left->reg(), Operand(left->reg()));
1107 __ j(not_zero, &non_zero_result); 1142 __ j(not_zero, &non_zero_result);
1108 __ test(right->reg(), Operand(right->reg())); 1143 __ test(right->reg(), Operand(right->reg()));
1109 deferred->enter()->Branch(negative, left, right); 1144 deferred->Branch(negative);
1110 __ bind(&non_zero_result); 1145 __ bind(&non_zero_result);
1111 // Check for the corner case of dividing the most negative smi by 1146 // Check for the corner case of dividing the most negative smi by
1112 // -1. We cannot use the overflow flag, since it is not set by 1147 // -1. We cannot use the overflow flag, since it is not set by
1113 // idiv instruction. 1148 // idiv instruction.
1114 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); 1149 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1115 __ cmp(quotient.reg(), 0x40000000); 1150 __ cmp(eax, 0x40000000);
1116 deferred->enter()->Branch(equal, left, right); 1151 deferred->Branch(equal);
1117 // Check that the remainder is zero. 1152 // Check that the remainder is zero.
1118 __ test(remainder.reg(), Operand(remainder.reg())); 1153 __ test(edx, Operand(edx));
1119 remainder.Unuse(); 1154 deferred->Branch(not_zero);
1120 deferred->enter()->Branch(not_zero, left, right); 1155 // Tag the result and store it in the quotient register.
1156 ASSERT(kSmiTagSize == times_2); // adjust code if not the case
1157 __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
1158 deferred->BindExit();
1121 left->Unuse(); 1159 left->Unuse();
1122 right->Unuse(); 1160 right->Unuse();
1123 // Tag the result and store it in the quotient register.
1124 ASSERT(kSmiTagSize == times_2); // adjust code if not the case
1125 __ lea(quotient.reg(),
1126 Operand(quotient.reg(), quotient.reg(), times_1, kSmiTag));
1127 deferred->BindExit(&quotient);
1128 frame_->Push(&quotient); 1161 frame_->Push(&quotient);
1129 } else { 1162 } else {
1130 ASSERT(op == Token::MOD); 1163 ASSERT(op == Token::MOD);
1131 quotient.Unuse();
1132 // Check for a negative zero result. If the result is zero, and 1164 // Check for a negative zero result. If the result is zero, and
1133 // the dividend is negative, return a floating point negative 1165 // the dividend is negative, return a floating point negative
1134 // zero. The frame is unchanged in this block, so local control 1166 // zero. The frame is unchanged in this block, so local control
1135 // flow can use a Label rather than a JumpTarget. 1167 // flow can use a Label rather than a JumpTarget.
1136 Label non_zero_result; 1168 Label non_zero_result;
1137 __ test(remainder.reg(), Operand(remainder.reg())); 1169 __ test(edx, Operand(edx));
1138 __ j(not_zero, &non_zero_result, taken); 1170 __ j(not_zero, &non_zero_result, taken);
1139 __ test(left->reg(), Operand(left->reg())); 1171 __ test(left->reg(), Operand(left->reg()));
1140 deferred->enter()->Branch(negative, left, right); 1172 deferred->Branch(negative);
1173 __ bind(&non_zero_result);
1174 deferred->BindExit();
1141 left->Unuse(); 1175 left->Unuse();
1142 right->Unuse(); 1176 right->Unuse();
1143 __ bind(&non_zero_result);
1144 deferred->BindExit(&remainder);
1145 frame_->Push(&remainder); 1177 frame_->Push(&remainder);
1146 } 1178 }
1147 return; 1179 return;
1148 } 1180 }
1149 1181
1150 // Special handling of shift operations because they use fixed 1182 // Special handling of shift operations because they use fixed
1151 // registers. 1183 // registers.
1152 if (op == Token::SHL || op == Token::SHR || op == Token::SAR) { 1184 if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
1153 // Move left out of ecx if necessary. 1185 // Move left out of ecx if necessary.
1154 if (left->is_register() && left->reg().is(ecx)) { 1186 if (left->is_register() && left->reg().is(ecx)) {
1155 *left = allocator_->Allocate(); 1187 *left = allocator_->Allocate();
1156 ASSERT(left->is_valid()); 1188 ASSERT(left->is_valid());
1157 __ mov(left->reg(), ecx); 1189 __ mov(left->reg(), ecx);
1158 } 1190 }
1159 right->ToRegister(ecx); 1191 right->ToRegister(ecx);
1160 left->ToRegister(); 1192 left->ToRegister();
1161 ASSERT(left->is_register() && !left->reg().is(ecx)); 1193 ASSERT(left->is_register() && !left->reg().is(ecx));
1162 ASSERT(right->is_register() && right->reg().is(ecx)); 1194 ASSERT(right->is_register() && right->reg().is(ecx));
1163 1195
1164 // We will modify right, it must be spilled. 1196 // We will modify right, it must be spilled.
1165 frame_->Spill(ecx); 1197 frame_->Spill(ecx);
1166 1198
1167 // Use a fresh answer register to avoid spilling the left operand. 1199 // Use a fresh answer register to avoid spilling the left operand.
1168 Result answer = allocator_->Allocate(); 1200 Result answer = allocator_->Allocate();
1169 ASSERT(answer.is_valid()); 1201 ASSERT(answer.is_valid());
1170 // Check that both operands are smis using the answer register as a 1202 // Check that both operands are smis using the answer register as a
1171 // temporary. 1203 // temporary.
1204 DeferredInlineBinaryOperation* deferred =
1205 new DeferredInlineBinaryOperation(op,
1206 answer.reg(),
1207 left->reg(),
1208 ecx,
1209 overwrite_mode);
1172 __ mov(answer.reg(), left->reg()); 1210 __ mov(answer.reg(), left->reg());
1173 __ or_(answer.reg(), Operand(ecx)); 1211 __ or_(answer.reg(), Operand(ecx));
1174 __ test(answer.reg(), Immediate(kSmiTagMask)); 1212 __ test(answer.reg(), Immediate(kSmiTagMask));
1175 deferred->enter()->Branch(not_zero, left, right); 1213 deferred->Branch(not_zero);
1176 1214
1177 // Untag both operands. 1215 // Untag both operands.
1178 __ mov(answer.reg(), left->reg()); 1216 __ mov(answer.reg(), left->reg());
1179 __ sar(answer.reg(), kSmiTagSize); 1217 __ sar(answer.reg(), kSmiTagSize);
1180 __ sar(ecx, kSmiTagSize); 1218 __ sar(ecx, kSmiTagSize);
1181 // Perform the operation. 1219 // Perform the operation.
1182 switch (op) { 1220 switch (op) {
1183 case Token::SAR: 1221 case Token::SAR:
1184 __ sar(answer.reg()); 1222 __ sar(answer.reg());
1185 // No checks of result necessary 1223 // No checks of result necessary
1186 break; 1224 break;
1187 case Token::SHR: { 1225 case Token::SHR: {
1188 JumpTarget result_ok; 1226 Label result_ok;
1189 __ shr(answer.reg()); 1227 __ shr(answer.reg());
1190 // Check that the *unsigned* result fits in a smi. Neither of 1228 // Check that the *unsigned* result fits in a smi. Neither of
1191 // the two high-order bits can be set: 1229 // the two high-order bits can be set:
1192 // * 0x80000000: high bit would be lost when smi tagging. 1230 // * 0x80000000: high bit would be lost when smi tagging.
1193 // * 0x40000000: this number would convert to negative when smi 1231 // * 0x40000000: this number would convert to negative when smi
1194 // tagging. 1232 // tagging.
1195 // These two cases can only happen with shifts by 0 or 1 when 1233 // These two cases can only happen with shifts by 0 or 1 when
1196 // handed a valid smi. If the answer cannot be represented by a 1234 // handed a valid smi. If the answer cannot be represented by a
1197 // smi, restore the left and right arguments, and jump to slow 1235 // smi, restore the left and right arguments, and jump to slow
1198 // case. The low bit of the left argument may be lost, but only 1236 // case. The low bit of the left argument may be lost, but only
1199 // in a case where it is dropped anyway. 1237 // in a case where it is dropped anyway.
1200 __ test(answer.reg(), Immediate(0xc0000000)); 1238 __ test(answer.reg(), Immediate(0xc0000000));
1201 result_ok.Branch(zero, &answer); 1239 __ j(zero, &result_ok);
1202 ASSERT(kSmiTag == 0); 1240 ASSERT(kSmiTag == 0);
1203 __ shl(ecx, kSmiTagSize); 1241 __ shl(ecx, kSmiTagSize);
1204 answer.Unuse(); 1242 deferred->Jump();
1205 deferred->enter()->Jump(left, right); 1243 __ bind(&result_ok);
1206 result_ok.Bind(&answer);
1207 break; 1244 break;
1208 } 1245 }
1209 case Token::SHL: { 1246 case Token::SHL: {
1210 JumpTarget result_ok; 1247 Label result_ok;
1211 __ shl(answer.reg()); 1248 __ shl(answer.reg());
1212 // Check that the *signed* result fits in a smi. 1249 // Check that the *signed* result fits in a smi.
1213 __ cmp(answer.reg(), 0xc0000000); 1250 __ cmp(answer.reg(), 0xc0000000);
1214 result_ok.Branch(positive, &answer); 1251 __ j(positive, &result_ok);
1215 ASSERT(kSmiTag == 0); 1252 ASSERT(kSmiTag == 0);
1216 __ shl(ecx, kSmiTagSize); 1253 __ shl(ecx, kSmiTagSize);
1217 answer.Unuse(); 1254 deferred->Jump();
1218 deferred->enter()->Jump(left, right); 1255 __ bind(&result_ok);
1219 result_ok.Bind(&answer);
1220 break; 1256 break;
1221 } 1257 }
1222 default: 1258 default:
1223 UNREACHABLE(); 1259 UNREACHABLE();
1224 } 1260 }
1225 left->Unuse();
1226 right->Unuse();
1227 // Smi-tag the result in answer. 1261 // Smi-tag the result in answer.
1228 ASSERT(kSmiTagSize == 1); // Adjust code if not the case. 1262 ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
1229 __ lea(answer.reg(), 1263 __ lea(answer.reg(),
1230 Operand(answer.reg(), answer.reg(), times_1, kSmiTag)); 1264 Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
1231 deferred->BindExit(&answer); 1265 deferred->BindExit();
1266 left->Unuse();
1267 right->Unuse();
1232 frame_->Push(&answer); 1268 frame_->Push(&answer);
1233 return; 1269 return;
1234 } 1270 }
1235 1271
1236 // Handle the other binary operations. 1272 // Handle the other binary operations.
1237 left->ToRegister(); 1273 left->ToRegister();
1238 right->ToRegister(); 1274 right->ToRegister();
1239 // A newly allocated register answer is used to hold the answer. The 1275 // A newly allocated register answer is used to hold the answer. The
1240 // registers containing left and right are not modified so they don't 1276 // registers containing left and right are not modified so they don't
1241 // need to be spilled in the fast case. 1277 // need to be spilled in the fast case.
1242 Result answer = allocator_->Allocate(); 1278 Result answer = allocator_->Allocate();
1279 ASSERT(answer.is_valid());
1243 1280
1244 ASSERT(answer.is_valid());
1245 // Perform the smi tag check. 1281 // Perform the smi tag check.
1282 DeferredInlineBinaryOperation* deferred =
1283 new DeferredInlineBinaryOperation(op,
1284 answer.reg(),
1285 left->reg(),
1286 right->reg(),
1287 overwrite_mode);
1246 if (left->reg().is(right->reg())) { 1288 if (left->reg().is(right->reg())) {
1247 __ test(left->reg(), Immediate(kSmiTagMask)); 1289 __ test(left->reg(), Immediate(kSmiTagMask));
1248 } else { 1290 } else {
1249 __ mov(answer.reg(), left->reg()); 1291 __ mov(answer.reg(), left->reg());
1250 __ or_(answer.reg(), Operand(right->reg())); 1292 __ or_(answer.reg(), Operand(right->reg()));
1251 ASSERT(kSmiTag == 0); // Adjust test if not the case. 1293 ASSERT(kSmiTag == 0); // Adjust test if not the case.
1252 __ test(answer.reg(), Immediate(kSmiTagMask)); 1294 __ test(answer.reg(), Immediate(kSmiTagMask));
1253 } 1295 }
1296 deferred->Branch(not_zero);
1297 __ mov(answer.reg(), left->reg());
1254 switch (op) { 1298 switch (op) {
1255 case Token::ADD: 1299 case Token::ADD:
1256 deferred->SetEntryFrame(left, right);
1257 deferred->enter()->Branch(not_zero, left, right, not_taken);
1258 __ mov(answer.reg(), left->reg());
1259 __ add(answer.reg(), Operand(right->reg())); // Add optimistically. 1300 __ add(answer.reg(), Operand(right->reg())); // Add optimistically.
1260 deferred->enter()->Branch(overflow, left, right, not_taken); 1301 deferred->Branch(overflow);
1261 break; 1302 break;
1262 1303
1263 case Token::SUB: 1304 case Token::SUB:
1264 deferred->SetEntryFrame(left, right);
1265 deferred->enter()->Branch(not_zero, left, right, not_taken);
1266 __ mov(answer.reg(), left->reg());
1267 __ sub(answer.reg(), Operand(right->reg())); // Subtract optimistically. 1305 __ sub(answer.reg(), Operand(right->reg())); // Subtract optimistically.
1268 deferred->enter()->Branch(overflow, left, right, not_taken); 1306 deferred->Branch(overflow);
1269 break; 1307 break;
1270 1308
1271 case Token::MUL: { 1309 case Token::MUL: {
1272 deferred->SetEntryFrame(left, right);
1273 deferred->enter()->Branch(not_zero, left, right, not_taken);
1274 __ mov(answer.reg(), left->reg());
1275 // If the smi tag is 0 we can just leave the tag on one operand. 1310 // If the smi tag is 0 we can just leave the tag on one operand.
1276 ASSERT(kSmiTag == 0); // Adjust code below if not the case. 1311 ASSERT(kSmiTag == 0); // Adjust code below if not the case.
1277 // Remove smi tag from the left operand (but keep sign). 1312 // Remove smi tag from the left operand (but keep sign).
1278 // Left-hand operand has been copied into answer. 1313 // Left-hand operand has been copied into answer.
1279 __ sar(answer.reg(), kSmiTagSize); 1314 __ sar(answer.reg(), kSmiTagSize);
1280 // Do multiplication of smis, leaving result in answer. 1315 // Do multiplication of smis, leaving result in answer.
1281 __ imul(answer.reg(), Operand(right->reg())); 1316 __ imul(answer.reg(), Operand(right->reg()));
1282 // Go slow on overflows. 1317 // Go slow on overflows.
1283 deferred->enter()->Branch(overflow, left, right, not_taken); 1318 deferred->Branch(overflow);
1284 // Check for negative zero result. If product is zero, and one 1319 // Check for negative zero result. If product is zero, and one
1285 // argument is negative, go to slow case. The frame is unchanged 1320 // argument is negative, go to slow case. The frame is unchanged
1286 // in this block, so local control flow can use a Label rather 1321 // in this block, so local control flow can use a Label rather
1287 // than a JumpTarget. 1322 // than a JumpTarget.
1288 Label non_zero_result; 1323 Label non_zero_result;
1289 __ test(answer.reg(), Operand(answer.reg())); 1324 __ test(answer.reg(), Operand(answer.reg()));
1290 __ j(not_zero, &non_zero_result, taken); 1325 __ j(not_zero, &non_zero_result, taken);
1291 __ mov(answer.reg(), left->reg()); 1326 __ mov(answer.reg(), left->reg());
1292 __ or_(answer.reg(), Operand(right->reg())); 1327 __ or_(answer.reg(), Operand(right->reg()));
1293 deferred->enter()->Branch(negative, left, right, not_taken); 1328 deferred->Branch(negative);
1294 __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct. 1329 __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct.
1295 __ bind(&non_zero_result); 1330 __ bind(&non_zero_result);
1296 break; 1331 break;
1297 } 1332 }
1298 1333
1299 case Token::BIT_OR: 1334 case Token::BIT_OR:
1300 deferred->enter()->Branch(not_zero, left, right, not_taken);
1301 __ mov(answer.reg(), left->reg());
1302 __ or_(answer.reg(), Operand(right->reg())); 1335 __ or_(answer.reg(), Operand(right->reg()));
1303 break; 1336 break;
1304 1337
1305 case Token::BIT_AND: 1338 case Token::BIT_AND:
1306 deferred->enter()->Branch(not_zero, left, right, not_taken);
1307 __ mov(answer.reg(), left->reg());
1308 __ and_(answer.reg(), Operand(right->reg())); 1339 __ and_(answer.reg(), Operand(right->reg()));
1309 break; 1340 break;
1310 1341
1311 case Token::BIT_XOR: 1342 case Token::BIT_XOR:
1312 deferred->enter()->Branch(not_zero, left, right, not_taken);
1313 __ mov(answer.reg(), left->reg());
1314 __ xor_(answer.reg(), Operand(right->reg())); 1343 __ xor_(answer.reg(), Operand(right->reg()));
1315 break; 1344 break;
1316 1345
1317 default: 1346 default:
1318 UNREACHABLE(); 1347 UNREACHABLE();
1319 break; 1348 break;
1320 } 1349 }
1350 deferred->BindExit();
1321 left->Unuse(); 1351 left->Unuse();
1322 right->Unuse(); 1352 right->Unuse();
1323 deferred->BindExit(&answer);
1324 frame_->Push(&answer); 1353 frame_->Push(&answer);
1325 } 1354 }
1326 1355
1327 1356
1357 // Call the appropriate binary operation stub to compute src op value
1358 // and leave the result in dst.
1328 class DeferredInlineSmiOperation: public DeferredCode { 1359 class DeferredInlineSmiOperation: public DeferredCode {
1329 public: 1360 public:
1330 DeferredInlineSmiOperation(Token::Value op, 1361 DeferredInlineSmiOperation(Token::Value op,
1362 Register dst,
1363 Register src,
1331 Smi* value, 1364 Smi* value,
1332 OverwriteMode overwrite_mode) 1365 OverwriteMode overwrite_mode)
1333 : op_(op), 1366 : op_(op),
1367 dst_(dst),
1368 src_(src),
1334 value_(value), 1369 value_(value),
1335 overwrite_mode_(overwrite_mode) { 1370 overwrite_mode_(overwrite_mode) {
1336 set_comment("[ DeferredInlineSmiOperation"); 1371 set_comment("[ DeferredInlineSmiOperation");
1337 } 1372 }
1338 1373
1339 virtual void Generate(); 1374 virtual void Generate();
1340 1375
1341 private: 1376 private:
1342 Token::Value op_; 1377 Token::Value op_;
1378 Register dst_;
1379 Register src_;
1343 Smi* value_; 1380 Smi* value_;
1344 OverwriteMode overwrite_mode_; 1381 OverwriteMode overwrite_mode_;
1345 }; 1382 };
1346 1383
1347 1384
1348 void DeferredInlineSmiOperation::Generate() { 1385 void DeferredInlineSmiOperation::Generate() {
1349 Result left; 1386 __ push(src_);
1350 enter()->Bind(&left); 1387 __ push(Immediate(value_));
1351 cgen()->frame()->Push(&left); 1388 GenericBinaryOpStub stub(op_, overwrite_mode_, SMI_CODE_INLINED);
1352 cgen()->frame()->Push(value_); 1389 __ CallStub(&stub);
1353 GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED); 1390 if (!dst_.is(eax)) __ mov(dst_, eax);
1354 Result answer = cgen()->frame()->CallStub(&igostub, 2);
1355 exit_.Jump(&answer);
1356 } 1391 }
1357 1392
1358 1393
1394 // Call the appropriate binary operation stub to compute value op src
1395 // and leave the result in dst.
1359 class DeferredInlineSmiOperationReversed: public DeferredCode { 1396 class DeferredInlineSmiOperationReversed: public DeferredCode {
1360 public: 1397 public:
1361 DeferredInlineSmiOperationReversed(Token::Value op, 1398 DeferredInlineSmiOperationReversed(Token::Value op,
1399 Register dst,
1362 Smi* value, 1400 Smi* value,
1401 Register src,
1363 OverwriteMode overwrite_mode) 1402 OverwriteMode overwrite_mode)
1364 : op_(op), 1403 : op_(op),
1404 dst_(dst),
1365 value_(value), 1405 value_(value),
1406 src_(src),
1366 overwrite_mode_(overwrite_mode) { 1407 overwrite_mode_(overwrite_mode) {
1367 set_comment("[ DeferredInlineSmiOperationReversed"); 1408 set_comment("[ DeferredInlineSmiOperationReversed");
1368 } 1409 }
1369 1410
1370 virtual void Generate(); 1411 virtual void Generate();
1371 1412
1372 private: 1413 private:
1373 Token::Value op_; 1414 Token::Value op_;
1415 Register dst_;
1374 Smi* value_; 1416 Smi* value_;
1417 Register src_;
1375 OverwriteMode overwrite_mode_; 1418 OverwriteMode overwrite_mode_;
1376 }; 1419 };
1377 1420
1378 1421
1379 void DeferredInlineSmiOperationReversed::Generate() { 1422 void DeferredInlineSmiOperationReversed::Generate() {
1380 Result right; 1423 __ push(Immediate(value_));
1381 enter()->Bind(&right); 1424 __ push(src_);
1382 cgen()->frame()->Push(value_);
1383 cgen()->frame()->Push(&right);
1384 GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED); 1425 GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
1385 Result answer = cgen()->frame()->CallStub(&igostub, 2); 1426 __ CallStub(&igostub);
1386 exit_.Jump(&answer); 1427 if (!dst_.is(eax)) __ mov(dst_, eax);
1387 } 1428 }
1388 1429
1389 1430
1431 // The result of src + value is in dst. It either overflowed or was not
1432 // smi tagged. Undo the speculative addition and call the appropriate
1433 // specialized stub for add. The result is left in dst.
1390 class DeferredInlineSmiAdd: public DeferredCode { 1434 class DeferredInlineSmiAdd: public DeferredCode {
1391 public: 1435 public:
1392 DeferredInlineSmiAdd(Smi* value, 1436 DeferredInlineSmiAdd(Register dst,
1437 Smi* value,
1393 OverwriteMode overwrite_mode) 1438 OverwriteMode overwrite_mode)
1394 : value_(value), 1439 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
1395 overwrite_mode_(overwrite_mode) {
1396 set_comment("[ DeferredInlineSmiAdd"); 1440 set_comment("[ DeferredInlineSmiAdd");
1397 } 1441 }
1398 1442
1399 virtual void Generate(); 1443 virtual void Generate();
1400 1444
1401 private: 1445 private:
1446 Register dst_;
1402 Smi* value_; 1447 Smi* value_;
1403 OverwriteMode overwrite_mode_; 1448 OverwriteMode overwrite_mode_;
1404 }; 1449 };
1405 1450
1406 1451
1452 void DeferredInlineSmiAdd::Generate() {
1453 // Undo the optimistic add operation and call the shared stub.
1454 __ sub(Operand(dst_), Immediate(value_));
1455 __ push(dst_);
1456 __ push(Immediate(value_));
1457 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
1458 __ CallStub(&igostub);
1459 if (!dst_.is(eax)) __ mov(dst_, eax);
1460 }
1461
1462
1463 // The result of value + src is in dst. It either overflowed or was not
1464 // smi tagged. Undo the speculative addition and call the appropriate
1465 // specialized stub for add. The result is left in dst.
1407 class DeferredInlineSmiAddReversed: public DeferredCode { 1466 class DeferredInlineSmiAddReversed: public DeferredCode {
1408 public: 1467 public:
1409 DeferredInlineSmiAddReversed(Smi* value, 1468 DeferredInlineSmiAddReversed(Register dst,
1469 Smi* value,
1410 OverwriteMode overwrite_mode) 1470 OverwriteMode overwrite_mode)
1411 : value_(value), 1471 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
1412 overwrite_mode_(overwrite_mode) {
1413 set_comment("[ DeferredInlineSmiAddReversed"); 1472 set_comment("[ DeferredInlineSmiAddReversed");
1414 } 1473 }
1415 1474
1416 virtual void Generate(); 1475 virtual void Generate();
1417 1476
1418 private: 1477 private:
1478 Register dst_;
1419 Smi* value_; 1479 Smi* value_;
1420 OverwriteMode overwrite_mode_; 1480 OverwriteMode overwrite_mode_;
1421 }; 1481 };
1422 1482
1423 1483
1484 void DeferredInlineSmiAddReversed::Generate() {
1485 // Undo the optimistic add operation and call the shared stub.
1486 __ sub(Operand(dst_), Immediate(value_));
1487 __ push(Immediate(value_));
1488 __ push(dst_);
1489 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
1490 __ CallStub(&igostub);
1491 if (!dst_.is(eax)) __ mov(dst_, eax);
1492 }
1493
1494
1495 // The result of src - value is in dst. It either overflowed or was not
1496 // smi tagged. Undo the speculative subtraction and call the
1497 // appropriate specialized stub for subtract. The result is left in
1498 // dst.
1424 class DeferredInlineSmiSub: public DeferredCode { 1499 class DeferredInlineSmiSub: public DeferredCode {
1425 public: 1500 public:
1426 DeferredInlineSmiSub(Smi* value, 1501 DeferredInlineSmiSub(Register dst,
1502 Smi* value,
1427 OverwriteMode overwrite_mode) 1503 OverwriteMode overwrite_mode)
1428 : value_(value), 1504 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
1429 overwrite_mode_(overwrite_mode) {
1430 set_comment("[ DeferredInlineSmiSub"); 1505 set_comment("[ DeferredInlineSmiSub");
1431 } 1506 }
1432 1507
1433 virtual void Generate(); 1508 virtual void Generate();
1434 1509
1435 private: 1510 private:
1511 Register dst_;
1436 Smi* value_; 1512 Smi* value_;
1437 OverwriteMode overwrite_mode_; 1513 OverwriteMode overwrite_mode_;
1438 }; 1514 };
1439 1515
1440 1516
1441 #undef __ 1517 void DeferredInlineSmiSub::Generate() {
1442 #define __ ACCESS_MASM(cgen()->masm()) 1518 // Undo the optimistic sub operation and call the shared stub.
1443 1519 __ add(Operand(dst_), Immediate(value_));
1444 1520 __ push(dst_);
1445 void DeferredInlineSmiAdd::Generate() { 1521 __ push(Immediate(value_));
1446 // Undo the optimistic add operation and call the shared stub. 1522 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
1447 Result left; // Initially left + value_. 1523 __ CallStub(&igostub);
1448 enter()->Bind(&left); 1524 if (!dst_.is(eax)) __ mov(dst_, eax);
1449 left.ToRegister();
1450 cgen()->frame()->Spill(left.reg());
1451 __ sub(Operand(left.reg()), Immediate(value_));
1452 cgen()->frame()->Push(&left);
1453 cgen()->frame()->Push(value_);
1454 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
1455 Result answer = cgen()->frame()->CallStub(&igostub, 2);
1456 exit_.Jump(&answer);
1457 } 1525 }
1458 1526
1459 1527
1460 void DeferredInlineSmiAddReversed::Generate() {
1461 // Undo the optimistic add operation and call the shared stub.
1462 Result right; // Initially value_ + right.
1463 enter()->Bind(&right);
1464 right.ToRegister();
1465 cgen()->frame()->Spill(right.reg());
1466 __ sub(Operand(right.reg()), Immediate(value_));
1467 cgen()->frame()->Push(value_);
1468 cgen()->frame()->Push(&right);
1469 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
1470 Result answer = cgen()->frame()->CallStub(&igostub, 2);
1471 exit_.Jump(&answer);
1472 }
1473
1474
1475 void DeferredInlineSmiSub::Generate() {
1476 // Undo the optimistic sub operation and call the shared stub.
1477 Result left; // Initially left - value_.
1478 enter()->Bind(&left);
1479 left.ToRegister();
1480 cgen()->frame()->Spill(left.reg());
1481 __ add(Operand(left.reg()), Immediate(value_));
1482 cgen()->frame()->Push(&left);
1483 cgen()->frame()->Push(value_);
1484 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
1485 Result answer = cgen()->frame()->CallStub(&igostub, 2);
1486 exit_.Jump(&answer);
1487 }
1488
1489
1490 #undef __
1491 #define __ ACCESS_MASM(masm_)
1492
1493
1494 class DeferredInlineSmiSubReversed: public DeferredCode {
1495 public:
1496 DeferredInlineSmiSubReversed(Smi* value,
1497 OverwriteMode overwrite_mode)
1498 : value_(value),
1499 overwrite_mode_(overwrite_mode) {
1500 set_comment("[ DeferredInlineSmiSubReversed");
1501 }
1502
1503 virtual void Generate();
1504
1505 private:
1506 Smi* value_;
1507 OverwriteMode overwrite_mode_;
1508 };
1509
1510
1511 void DeferredInlineSmiSubReversed::Generate() {
1512 // Call the shared stub.
1513 Result right;
1514 enter()->Bind(&right);
1515 cgen()->frame()->Push(value_);
1516 cgen()->frame()->Push(&right);
1517 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
1518 Result answer = cgen()->frame()->CallStub(&igostub, 2);
1519 exit_.Jump(&answer);
1520 }
1521
1522
1523 void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, 1528 void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
1524 Result* operand, 1529 Result* operand,
1525 Handle<Object> value, 1530 Handle<Object> value,
1526 SmiAnalysis* type, 1531 SmiAnalysis* type,
1527 bool reversed, 1532 bool reversed,
1528 OverwriteMode overwrite_mode) { 1533 OverwriteMode overwrite_mode) {
1529 // NOTE: This is an attempt to inline (a bit) more of the code for 1534 // NOTE: This is an attempt to inline (a bit) more of the code for
1530 // some possible smi operations (like + and -) when (at least) one 1535 // some possible smi operations (like + and -) when (at least) one
1531 // of the operands is a constant smi. 1536 // of the operands is a constant smi.
1532 // Consumes the argument "operand". 1537 // Consumes the argument "operand".
(...skipping 14 matching lines...) Expand all
1547 } 1552 }
1548 1553
1549 // Get the literal value. 1554 // Get the literal value.
1550 Smi* smi_value = Smi::cast(*value); 1555 Smi* smi_value = Smi::cast(*value);
1551 int int_value = smi_value->value(); 1556 int int_value = smi_value->value();
1552 1557
1553 switch (op) { 1558 switch (op) {
1554 case Token::ADD: { 1559 case Token::ADD: {
1555 operand->ToRegister(); 1560 operand->ToRegister();
1556 frame_->Spill(operand->reg()); 1561 frame_->Spill(operand->reg());
1557 __ add(Operand(operand->reg()), Immediate(value));
1558 1562
1563 // Optimistically add. Call the specialized add stub if the
1564 // result is not a smi or overflows.
1559 DeferredCode* deferred = NULL; 1565 DeferredCode* deferred = NULL;
1560 if (reversed) { 1566 if (reversed) {
1561 deferred = new DeferredInlineSmiAddReversed(smi_value, overwrite_mode); 1567 deferred = new DeferredInlineSmiAddReversed(operand->reg(),
1568 smi_value,
1569 overwrite_mode);
1562 } else { 1570 } else {
1563 deferred = new DeferredInlineSmiAdd(smi_value, overwrite_mode); 1571 deferred = new DeferredInlineSmiAdd(operand->reg(),
1572 smi_value,
1573 overwrite_mode);
1564 } 1574 }
1565 deferred->SetEntryFrame(operand); 1575 __ add(Operand(operand->reg()), Immediate(value));
1566 deferred->enter()->Branch(overflow, operand, not_taken); 1576 deferred->Branch(overflow);
1567 __ test(operand->reg(), Immediate(kSmiTagMask)); 1577 __ test(operand->reg(), Immediate(kSmiTagMask));
1568 deferred->enter()->Branch(not_zero, operand, not_taken); 1578 deferred->Branch(not_zero);
1569 deferred->BindExit(operand); 1579 deferred->BindExit();
1570 frame_->Push(operand); 1580 frame_->Push(operand);
1571 break; 1581 break;
1572 } 1582 }
1573 1583
1574 case Token::SUB: { 1584 case Token::SUB: {
1575 DeferredCode* deferred = NULL; 1585 DeferredCode* deferred = NULL;
1576 Result answer; // Only allocate a new register if reversed. 1586 Result answer; // Only allocate a new register if reversed.
1577 if (reversed) { 1587 if (reversed) {
1588 // The reversed case is only hit when the right operand is not a
1589 // constant.
1590 ASSERT(operand->is_register());
1578 answer = allocator()->Allocate(); 1591 answer = allocator()->Allocate();
1579 ASSERT(answer.is_valid()); 1592 ASSERT(answer.is_valid());
1580 deferred = new DeferredInlineSmiSubReversed(smi_value, overwrite_mode);
1581 __ Set(answer.reg(), Immediate(value)); 1593 __ Set(answer.reg(), Immediate(value));
1582 // We are in the reversed case so they can't both be Smi constants. 1594 deferred = new DeferredInlineSmiOperationReversed(op,
1583 ASSERT(operand->is_register()); 1595 answer.reg(),
1596 smi_value,
1597 operand->reg(),
1598 overwrite_mode);
1584 __ sub(answer.reg(), Operand(operand->reg())); 1599 __ sub(answer.reg(), Operand(operand->reg()));
1585 } else { 1600 } else {
1586 operand->ToRegister(); 1601 operand->ToRegister();
1587 frame_->Spill(operand->reg()); 1602 frame_->Spill(operand->reg());
1588 deferred = new DeferredInlineSmiSub(smi_value, overwrite_mode); 1603 answer = *operand;
1604 deferred = new DeferredInlineSmiSub(operand->reg(),
1605 smi_value,
1606 overwrite_mode);
1589 __ sub(Operand(operand->reg()), Immediate(value)); 1607 __ sub(Operand(operand->reg()), Immediate(value));
1590 answer = *operand;
1591 } 1608 }
1592 deferred->SetEntryFrame(operand); 1609 deferred->Branch(overflow);
1593 deferred->enter()->Branch(overflow, operand, not_taken);
1594 __ test(answer.reg(), Immediate(kSmiTagMask)); 1610 __ test(answer.reg(), Immediate(kSmiTagMask));
1595 deferred->enter()->Branch(not_zero, operand, not_taken); 1611 deferred->Branch(not_zero);
1612 deferred->BindExit();
1596 operand->Unuse(); 1613 operand->Unuse();
1597 deferred->BindExit(&answer);
1598 frame_->Push(&answer); 1614 frame_->Push(&answer);
1599 break; 1615 break;
1600 } 1616 }
1601 1617
1602 case Token::SAR: { 1618 case Token::SAR:
1603 if (reversed) { 1619 if (reversed) {
1604 Result constant_operand(value); 1620 Result constant_operand(value);
1605 LikelySmiBinaryOperation(op, &constant_operand, operand, 1621 LikelySmiBinaryOperation(op, &constant_operand, operand,
1606 overwrite_mode); 1622 overwrite_mode);
1607 } else { 1623 } else {
1608 // Only the least significant 5 bits of the shift value are used. 1624 // Only the least significant 5 bits of the shift value are used.
1609 // In the slow case, this masking is done inside the runtime call. 1625 // In the slow case, this masking is done inside the runtime call.
1610 int shift_value = int_value & 0x1f; 1626 int shift_value = int_value & 0x1f;
1611 DeferredCode* deferred =
1612 new DeferredInlineSmiOperation(op, smi_value, overwrite_mode);
1613 operand->ToRegister(); 1627 operand->ToRegister();
1628 frame_->Spill(operand->reg());
1629 DeferredInlineSmiOperation* deferred =
1630 new DeferredInlineSmiOperation(op,
1631 operand->reg(),
1632 operand->reg(),
1633 smi_value,
1634 overwrite_mode);
1614 __ test(operand->reg(), Immediate(kSmiTagMask)); 1635 __ test(operand->reg(), Immediate(kSmiTagMask));
1615 deferred->enter()->Branch(not_zero, operand, not_taken); 1636 deferred->Branch(not_zero);
1616 if (shift_value > 0) { 1637 if (shift_value > 0) {
1617 frame_->Spill(operand->reg());
1618 __ sar(operand->reg(), shift_value); 1638 __ sar(operand->reg(), shift_value);
1619 __ and_(operand->reg(), ~kSmiTagMask); 1639 __ and_(operand->reg(), ~kSmiTagMask);
1620 } 1640 }
1621 deferred->BindExit(operand); 1641 deferred->BindExit();
1622 frame_->Push(operand); 1642 frame_->Push(operand);
1623 } 1643 }
1624 break; 1644 break;
1625 }
1626 1645
1627 case Token::SHR: { 1646 case Token::SHR:
1628 if (reversed) { 1647 if (reversed) {
1629 Result constant_operand(value); 1648 Result constant_operand(value);
1630 LikelySmiBinaryOperation(op, &constant_operand, operand, 1649 LikelySmiBinaryOperation(op, &constant_operand, operand,
1631 overwrite_mode); 1650 overwrite_mode);
1632 } else { 1651 } else {
1633 // Only the least significant 5 bits of the shift value are used. 1652 // Only the least significant 5 bits of the shift value are used.
1634 // In the slow case, this masking is done inside the runtime call. 1653 // In the slow case, this masking is done inside the runtime call.
1635 int shift_value = int_value & 0x1f; 1654 int shift_value = int_value & 0x1f;
1636 DeferredCode* deferred =
1637 new DeferredInlineSmiOperation(op, smi_value, overwrite_mode);
1638 operand->ToRegister(); 1655 operand->ToRegister();
1639 __ test(operand->reg(), Immediate(kSmiTagMask));
1640 deferred->enter()->Branch(not_zero, operand, not_taken);
1641 Result answer = allocator()->Allocate(); 1656 Result answer = allocator()->Allocate();
1642 ASSERT(answer.is_valid()); 1657 ASSERT(answer.is_valid());
1658 DeferredInlineSmiOperation* deferred =
1659 new DeferredInlineSmiOperation(op,
1660 answer.reg(),
1661 operand->reg(),
1662 smi_value,
1663 overwrite_mode);
1664 __ test(operand->reg(), Immediate(kSmiTagMask));
1665 deferred->Branch(not_zero);
1643 __ mov(answer.reg(), operand->reg()); 1666 __ mov(answer.reg(), operand->reg());
1644 __ sar(answer.reg(), kSmiTagSize); 1667 __ sar(answer.reg(), kSmiTagSize);
1645 __ shr(answer.reg(), shift_value); 1668 __ shr(answer.reg(), shift_value);
1646 // A negative Smi shifted right two is in the positive Smi range. 1669 // A negative Smi shifted right two is in the positive Smi range.
1647 if (shift_value < 2) { 1670 if (shift_value < 2) {
1648 __ test(answer.reg(), Immediate(0xc0000000)); 1671 __ test(answer.reg(), Immediate(0xc0000000));
1649 deferred->enter()->Branch(not_zero, operand, not_taken); 1672 deferred->Branch(not_zero);
1650 } 1673 }
1651 operand->Unuse(); 1674 operand->Unuse();
1652 ASSERT(kSmiTagSize == times_2); // Adjust the code if not true. 1675 ASSERT(kSmiTagSize == times_2); // Adjust the code if not true.
1653 __ lea(answer.reg(), 1676 __ lea(answer.reg(),
1654 Operand(answer.reg(), answer.reg(), times_1, kSmiTag)); 1677 Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
1655 deferred->BindExit(&answer); 1678 deferred->BindExit();
1656 frame_->Push(&answer); 1679 frame_->Push(&answer);
1657 } 1680 }
1658 break; 1681 break;
1659 }
1660 1682
1661 case Token::SHL: { 1683 case Token::SHL:
1662 if (reversed) { 1684 if (reversed) {
1663 Result constant_operand(value); 1685 Result constant_operand(value);
1664 LikelySmiBinaryOperation(op, &constant_operand, operand, 1686 LikelySmiBinaryOperation(op, &constant_operand, operand,
1665 overwrite_mode); 1687 overwrite_mode);
1666 } else { 1688 } else {
1667 // Only the least significant 5 bits of the shift value are used. 1689 // Only the least significant 5 bits of the shift value are used.
1668 // In the slow case, this masking is done inside the runtime call. 1690 // In the slow case, this masking is done inside the runtime call.
1669 int shift_value = int_value & 0x1f; 1691 int shift_value = int_value & 0x1f;
1670 DeferredCode* deferred =
1671 new DeferredInlineSmiOperation(op, smi_value, overwrite_mode);
1672 operand->ToRegister(); 1692 operand->ToRegister();
1673 __ test(operand->reg(), Immediate(kSmiTagMask)); 1693 if (shift_value == 0) {
1674 deferred->enter()->Branch(not_zero, operand, not_taken); 1694 DeferredInlineSmiOperation* deferred =
1675 if (shift_value != 0) { 1695 new DeferredInlineSmiOperation(op,
1696 operand->reg(),
1697 operand->reg(),
1698 smi_value,
1699 overwrite_mode);
1700 __ test(operand->reg(), Immediate(kSmiTagMask));
1701 deferred->Branch(not_zero);
1702 deferred->BindExit();
1703 frame_->Push(operand);
1704 } else {
1705 // Use a fresh temporary for nonzero shift values.
1676 Result answer = allocator()->Allocate(); 1706 Result answer = allocator()->Allocate();
1677 ASSERT(answer.is_valid()); 1707 ASSERT(answer.is_valid());
1708 DeferredInlineSmiOperation* deferred =
1709 new DeferredInlineSmiOperation(op,
1710 answer.reg(),
1711 operand->reg(),
1712 smi_value,
1713 overwrite_mode);
1714 __ test(operand->reg(), Immediate(kSmiTagMask));
1715 deferred->Branch(not_zero);
1678 __ mov(answer.reg(), operand->reg()); 1716 __ mov(answer.reg(), operand->reg());
1679 ASSERT(kSmiTag == 0); // adjust code if not the case 1717 ASSERT(kSmiTag == 0); // adjust code if not the case
1680 // We do no shifts, only the Smi conversion, if shift_value is 1. 1718 // We do no shifts, only the Smi conversion, if shift_value is 1.
1681 if (shift_value > 1) { 1719 if (shift_value > 1) {
1682 __ shl(answer.reg(), shift_value - 1); 1720 __ shl(answer.reg(), shift_value - 1);
1683 } 1721 }
1684 // Convert int result to Smi, checking that it is in int range. 1722 // Convert int result to Smi, checking that it is in int range.
1685 ASSERT(kSmiTagSize == times_2); // adjust code if not the case 1723 ASSERT(kSmiTagSize == 1); // adjust code if not the case
1686 __ add(answer.reg(), Operand(answer.reg())); 1724 __ add(answer.reg(), Operand(answer.reg()));
1687 deferred->enter()->Branch(overflow, operand, not_taken); 1725 deferred->Branch(overflow);
1726 deferred->BindExit();
1688 operand->Unuse(); 1727 operand->Unuse();
1689 deferred->BindExit(&answer);
1690 frame_->Push(&answer); 1728 frame_->Push(&answer);
1691 } else {
1692 deferred->BindExit(operand);
1693 frame_->Push(operand);
1694 } 1729 }
1695 } 1730 }
1696 break; 1731 break;
1697 }
1698 1732
1699 case Token::BIT_OR: 1733 case Token::BIT_OR:
1700 case Token::BIT_XOR: 1734 case Token::BIT_XOR:
1701 case Token::BIT_AND: { 1735 case Token::BIT_AND: {
1736 operand->ToRegister();
1737 frame_->Spill(operand->reg());
1702 DeferredCode* deferred = NULL; 1738 DeferredCode* deferred = NULL;
1703 if (reversed) { 1739 if (reversed) {
1704 deferred = new DeferredInlineSmiOperationReversed(op, smi_value, 1740 deferred = new DeferredInlineSmiOperationReversed(op,
1741 operand->reg(),
1742 smi_value,
1743 operand->reg(),
1705 overwrite_mode); 1744 overwrite_mode);
1706 } else { 1745 } else {
1707 deferred = new DeferredInlineSmiOperation(op, smi_value, 1746 deferred = new DeferredInlineSmiOperation(op,
1747 operand->reg(),
1748 operand->reg(),
1749 smi_value,
1708 overwrite_mode); 1750 overwrite_mode);
1709 } 1751 }
1710 operand->ToRegister();
1711 __ test(operand->reg(), Immediate(kSmiTagMask)); 1752 __ test(operand->reg(), Immediate(kSmiTagMask));
1712 deferred->enter()->Branch(not_zero, operand, not_taken); 1753 deferred->Branch(not_zero);
1713 frame_->Spill(operand->reg());
1714 if (op == Token::BIT_AND) { 1754 if (op == Token::BIT_AND) {
1715 __ and_(Operand(operand->reg()), Immediate(value)); 1755 __ and_(Operand(operand->reg()), Immediate(value));
1716 } else if (op == Token::BIT_XOR) { 1756 } else if (op == Token::BIT_XOR) {
1717 if (int_value != 0) { 1757 if (int_value != 0) {
1718 __ xor_(Operand(operand->reg()), Immediate(value)); 1758 __ xor_(Operand(operand->reg()), Immediate(value));
1719 } 1759 }
1720 } else { 1760 } else {
1721 ASSERT(op == Token::BIT_OR); 1761 ASSERT(op == Token::BIT_OR);
1722 if (int_value != 0) { 1762 if (int_value != 0) {
1723 __ or_(Operand(operand->reg()), Immediate(value)); 1763 __ or_(Operand(operand->reg()), Immediate(value));
1724 } 1764 }
1725 } 1765 }
1726 deferred->BindExit(operand); 1766 deferred->BindExit();
1727 frame_->Push(operand); 1767 frame_->Push(operand);
1728 break; 1768 break;
1729 } 1769 }
1730 1770
1731 default: { 1771 default: {
1732 Result constant_operand(value); 1772 Result constant_operand(value);
1733 if (reversed) { 1773 if (reversed) {
1734 LikelySmiBinaryOperation(op, &constant_operand, operand, 1774 LikelySmiBinaryOperation(op, &constant_operand, operand,
1735 overwrite_mode); 1775 overwrite_mode);
1736 } else { 1776 } else {
(...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after
1983 Result answer = frame_->CallStub(&call_function, arg_count + 1); 2023 Result answer = frame_->CallStub(&call_function, arg_count + 1);
1984 // Restore context and replace function on the stack with the 2024 // Restore context and replace function on the stack with the
1985 // result of the stub invocation. 2025 // result of the stub invocation.
1986 frame_->RestoreContextRegister(); 2026 frame_->RestoreContextRegister();
1987 frame_->SetElementAt(0, &answer); 2027 frame_->SetElementAt(0, &answer);
1988 } 2028 }
1989 2029
1990 2030
1991 class DeferredStackCheck: public DeferredCode { 2031 class DeferredStackCheck: public DeferredCode {
1992 public: 2032 public:
1993 explicit DeferredStackCheck() { 2033 DeferredStackCheck() {
1994 set_comment("[ DeferredStackCheck"); 2034 set_comment("[ DeferredStackCheck");
1995 } 2035 }
1996 2036
1997 virtual void Generate(); 2037 virtual void Generate();
1998 }; 2038 };
1999 2039
2000 2040
2001 void DeferredStackCheck::Generate() { 2041 void DeferredStackCheck::Generate() {
2002 enter()->Bind();
2003 StackCheckStub stub; 2042 StackCheckStub stub;
2004 Result ignored = cgen()->frame()->CallStub(&stub, 0); 2043 __ CallStub(&stub);
2005 ignored.Unuse();
2006 exit_.Jump();
2007 } 2044 }
2008 2045
2009 2046
2010 void CodeGenerator::CheckStack() { 2047 void CodeGenerator::CheckStack() {
2011 if (FLAG_check_stack) { 2048 if (FLAG_check_stack) {
2012 DeferredStackCheck* deferred = new DeferredStackCheck; 2049 DeferredStackCheck* deferred = new DeferredStackCheck;
2013 ExternalReference stack_guard_limit = 2050 ExternalReference stack_guard_limit =
2014 ExternalReference::address_of_stack_guard_limit(); 2051 ExternalReference::address_of_stack_guard_limit();
2015 __ cmp(esp, Operand::StaticVariable(stack_guard_limit)); 2052 __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
2016 deferred->enter()->Branch(below, not_taken); 2053 deferred->Branch(below);
2017 deferred->BindExit(); 2054 deferred->BindExit();
2018 } 2055 }
2019 } 2056 }
2020 2057
2021 2058
2022 void CodeGenerator::VisitAndSpill(Statement* statement) { 2059 void CodeGenerator::VisitAndSpill(Statement* statement) {
2023 ASSERT(in_spilled_code()); 2060 ASSERT(in_spilled_code());
2024 set_in_spilled_code(false); 2061 set_in_spilled_code(false);
2025 Visit(statement); 2062 Visit(statement);
2026 if (frame_ != NULL) { 2063 if (frame_ != NULL) {
(...skipping 1831 matching lines...) Expand 10 before | Expand all | Expand 10 after
3858 } 3895 }
3859 3896
3860 3897
3861 bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) { 3898 bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
3862 if (!value->IsSmi()) return false; 3899 if (!value->IsSmi()) return false;
3863 int int_value = Smi::cast(*value)->value(); 3900 int int_value = Smi::cast(*value)->value();
3864 return !is_intn(int_value, kMaxSmiInlinedBits); 3901 return !is_intn(int_value, kMaxSmiInlinedBits);
3865 } 3902 }
3866 3903
3867 3904
3905 // Materialize the regexp literal 'node' in the literals array
3906 // 'literals' of the function. Leave the regexp boilerplate in
3907 // 'boilerplate'.
3868 class DeferredRegExpLiteral: public DeferredCode { 3908 class DeferredRegExpLiteral: public DeferredCode {
3869 public: 3909 public:
3870 explicit DeferredRegExpLiteral(RegExpLiteral* node) : node_(node) { 3910 DeferredRegExpLiteral(Register boilerplate,
3911 Register literals,
3912 RegExpLiteral* node)
3913 : boilerplate_(boilerplate), literals_(literals), node_(node) {
3871 set_comment("[ DeferredRegExpLiteral"); 3914 set_comment("[ DeferredRegExpLiteral");
3872 } 3915 }
3873 3916
3874 virtual void Generate(); 3917 void Generate();
3875 3918
3876 private: 3919 private:
3920 Register boilerplate_;
3921 Register literals_;
3877 RegExpLiteral* node_; 3922 RegExpLiteral* node_;
3878 }; 3923 };
3879 3924
3880 3925
3881 void DeferredRegExpLiteral::Generate() { 3926 void DeferredRegExpLiteral::Generate() {
3882 Result literals;
3883 enter()->Bind(&literals);
3884 // Since the entry is undefined we call the runtime system to 3927 // Since the entry is undefined we call the runtime system to
3885 // compute the literal. 3928 // compute the literal.
3886
3887 VirtualFrame* frame = cgen()->frame();
3888 // Literal array (0). 3929 // Literal array (0).
3889 frame->Push(&literals); 3930 __ push(literals_);
3890 // Literal index (1). 3931 // Literal index (1).
3891 frame->Push(Smi::FromInt(node_->literal_index())); 3932 __ push(Immediate(Smi::FromInt(node_->literal_index())));
3892 // RegExp pattern (2). 3933 // RegExp pattern (2).
3893 frame->Push(node_->pattern()); 3934 __ push(Immediate(node_->pattern()));
3894 // RegExp flags (3). 3935 // RegExp flags (3).
3895 frame->Push(node_->flags()); 3936 __ push(Immediate(node_->flags()));
3896 Result boilerplate = 3937 __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
3897 frame->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); 3938 if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
3898 exit_.Jump(&boilerplate);
3899 } 3939 }
3900 3940
3901 3941
3902 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) { 3942 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
3903 Comment cmnt(masm_, "[ RegExp Literal"); 3943 Comment cmnt(masm_, "[ RegExp Literal");
3904 DeferredRegExpLiteral* deferred = new DeferredRegExpLiteral(node);
3905 3944
3906 // Retrieve the literals array and check the allocated entry. Begin 3945 // Retrieve the literals array and check the allocated entry. Begin
3907 // with a writable copy of the function of this activation in a 3946 // with a writable copy of the function of this activation in a
3908 // register. 3947 // register.
3909 frame_->PushFunction(); 3948 frame_->PushFunction();
3910 Result literals = frame_->Pop(); 3949 Result literals = frame_->Pop();
3911 literals.ToRegister(); 3950 literals.ToRegister();
3912 frame_->Spill(literals.reg()); 3951 frame_->Spill(literals.reg());
3913 3952
3914 // Load the literals array of the function. 3953 // Load the literals array of the function.
3915 __ mov(literals.reg(), 3954 __ mov(literals.reg(),
3916 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset)); 3955 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
3917 3956
3918 // Load the literal at the ast saved index. 3957 // Load the literal at the ast saved index.
3958 Result boilerplate = allocator_->Allocate();
3959 ASSERT(boilerplate.is_valid());
3919 int literal_offset = 3960 int literal_offset =
3920 FixedArray::kHeaderSize + node->literal_index() * kPointerSize; 3961 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
3921 Result boilerplate = allocator_->Allocate();
3922 ASSERT(boilerplate.is_valid());
3923 __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset)); 3962 __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
3924 3963
3925 // Check whether we need to materialize the RegExp object. If so, 3964 // Check whether we need to materialize the RegExp object. If so,
3926 // jump to the deferred code passing the literals array. 3965 // jump to the deferred code passing the literals array.
3966 DeferredRegExpLiteral* deferred =
3967 new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
3927 __ cmp(boilerplate.reg(), Factory::undefined_value()); 3968 __ cmp(boilerplate.reg(), Factory::undefined_value());
3928 deferred->enter()->Branch(equal, &literals, not_taken); 3969 deferred->Branch(equal);
3929 3970 deferred->BindExit();
3930 literals.Unuse(); 3971 literals.Unuse();
3931 // The deferred code returns the boilerplate object.
3932 deferred->BindExit(&boilerplate);
3933 3972
3934 // Push the boilerplate object. 3973 // Push the boilerplate object.
3935 frame_->Push(&boilerplate); 3974 frame_->Push(&boilerplate);
3936 } 3975 }
3937 3976
3938 3977
3939 // This deferred code stub will be used for creating the boilerplate 3978 // Materialize the object literal 'node' in the literals array
3940 // by calling Runtime_CreateObjectLiteral. 3979 // 'literals' of the function. Leave the object boilerplate in
3941 // Each created boilerplate is stored in the JSFunction and they are 3980 // 'boilerplate'.
3942 // therefore context dependent.
3943 class DeferredObjectLiteral: public DeferredCode { 3981 class DeferredObjectLiteral: public DeferredCode {
3944 public: 3982 public:
3945 explicit DeferredObjectLiteral(ObjectLiteral* node) : node_(node) { 3983 DeferredObjectLiteral(Register boilerplate,
3984 Register literals,
3985 ObjectLiteral* node)
3986 : boilerplate_(boilerplate), literals_(literals), node_(node) {
3946 set_comment("[ DeferredObjectLiteral"); 3987 set_comment("[ DeferredObjectLiteral");
3947 } 3988 }
3948 3989
3949 virtual void Generate(); 3990 void Generate();
3950 3991
3951 private: 3992 private:
3993 Register boilerplate_;
3994 Register literals_;
3952 ObjectLiteral* node_; 3995 ObjectLiteral* node_;
3953 }; 3996 };
3954 3997
3955 3998
3956 void DeferredObjectLiteral::Generate() { 3999 void DeferredObjectLiteral::Generate() {
3957 Result literals;
3958 enter()->Bind(&literals);
3959 // Since the entry is undefined we call the runtime system to 4000 // Since the entry is undefined we call the runtime system to
3960 // compute the literal. 4001 // compute the literal.
3961
3962 VirtualFrame* frame = cgen()->frame();
3963 // Literal array (0). 4002 // Literal array (0).
3964 frame->Push(&literals); 4003 __ push(literals_);
3965 // Literal index (1). 4004 // Literal index (1).
3966 frame->Push(Smi::FromInt(node_->literal_index())); 4005 __ push(Immediate(Smi::FromInt(node_->literal_index())));
3967 // Constant properties (2). 4006 // Constant properties (2).
3968 frame->Push(node_->constant_properties()); 4007 __ push(Immediate(node_->constant_properties()));
3969 Result boilerplate = 4008 __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
3970 frame->CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3); 4009 if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
3971 exit_.Jump(&boilerplate);
3972 } 4010 }
3973 4011
3974 4012
3975 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { 4013 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
3976 Comment cmnt(masm_, "[ ObjectLiteral"); 4014 Comment cmnt(masm_, "[ ObjectLiteral");
3977 DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node);
3978 4015
3979 // Retrieve the literals array and check the allocated entry. Begin 4016 // Retrieve the literals array and check the allocated entry. Begin
3980 // with a writable copy of the function of this activation in a 4017 // with a writable copy of the function of this activation in a
3981 // register. 4018 // register.
3982 frame_->PushFunction(); 4019 frame_->PushFunction();
3983 Result literals = frame_->Pop(); 4020 Result literals = frame_->Pop();
3984 literals.ToRegister(); 4021 literals.ToRegister();
3985 frame_->Spill(literals.reg()); 4022 frame_->Spill(literals.reg());
3986 4023
3987 // Load the literals array of the function. 4024 // Load the literals array of the function.
3988 __ mov(literals.reg(), 4025 __ mov(literals.reg(),
3989 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset)); 4026 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
3990 4027
3991 // Load the literal at the ast saved index. 4028 // Load the literal at the ast saved index.
4029 Result boilerplate = allocator_->Allocate();
4030 ASSERT(boilerplate.is_valid());
3992 int literal_offset = 4031 int literal_offset =
3993 FixedArray::kHeaderSize + node->literal_index() * kPointerSize; 4032 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
3994 Result boilerplate = allocator_->Allocate();
3995 ASSERT(boilerplate.is_valid());
3996 __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset)); 4033 __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
3997 4034
3998 // Check whether we need to materialize the object literal boilerplate. 4035 // Check whether we need to materialize the object literal boilerplate.
3999 // If so, jump to the deferred code passing the literals array. 4036 // If so, jump to the deferred code passing the literals array.
4037 DeferredObjectLiteral* deferred =
4038 new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
4000 __ cmp(boilerplate.reg(), Factory::undefined_value()); 4039 __ cmp(boilerplate.reg(), Factory::undefined_value());
4001 deferred->enter()->Branch(equal, &literals, not_taken); 4040 deferred->Branch(equal);
4002 4041 deferred->BindExit();
4003 literals.Unuse(); 4042 literals.Unuse();
4004 // The deferred code returns the boilerplate object.
4005 deferred->BindExit(&boilerplate);
4006 4043
4007 // Push the boilerplate object. 4044 // Push the boilerplate object.
4008 frame_->Push(&boilerplate); 4045 frame_->Push(&boilerplate);
4009 // Clone the boilerplate object. 4046 // Clone the boilerplate object.
4010 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate; 4047 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
4011 if (node->depth() == 1) { 4048 if (node->depth() == 1) {
4012 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate; 4049 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
4013 } 4050 }
4014 Result clone = frame_->CallRuntime(clone_function_id, 1); 4051 Result clone = frame_->CallRuntime(clone_function_id, 1);
4015 // Push the newly cloned literal object as the result. 4052 // Push the newly cloned literal object as the result.
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
4065 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4); 4102 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
4066 // Ignore the result. 4103 // Ignore the result.
4067 break; 4104 break;
4068 } 4105 }
4069 default: UNREACHABLE(); 4106 default: UNREACHABLE();
4070 } 4107 }
4071 } 4108 }
4072 } 4109 }
4073 4110
4074 4111
4075 // This deferred code stub will be used for creating the boilerplate 4112 // Materialize the array literal 'node' in the literals array 'literals'
4076 // by calling Runtime_CreateArrayLiteralBoilerplate. 4113 // of the function. Leave the array boilerplate in 'boilerplate'.
4077 // Each created boilerplate is stored in the JSFunction and they are
4078 // therefore context dependent.
4079 class DeferredArrayLiteral: public DeferredCode { 4114 class DeferredArrayLiteral: public DeferredCode {
4080 public: 4115 public:
4081 explicit DeferredArrayLiteral(ArrayLiteral* node) : node_(node) { 4116 DeferredArrayLiteral(Register boilerplate,
4117 Register literals,
4118 ArrayLiteral* node)
4119 : boilerplate_(boilerplate), literals_(literals), node_(node) {
4082 set_comment("[ DeferredArrayLiteral"); 4120 set_comment("[ DeferredArrayLiteral");
4083 } 4121 }
4084 4122
4085 virtual void Generate(); 4123 void Generate();
4086 4124
4087 private: 4125 private:
4126 Register boilerplate_;
4127 Register literals_;
4088 ArrayLiteral* node_; 4128 ArrayLiteral* node_;
4089 }; 4129 };
4090 4130
4091 4131
4092 void DeferredArrayLiteral::Generate() { 4132 void DeferredArrayLiteral::Generate() {
4093 Result literals;
4094 enter()->Bind(&literals);
4095 // Since the entry is undefined we call the runtime system to 4133 // Since the entry is undefined we call the runtime system to
4096 // compute the literal. 4134 // compute the literal.
4097
4098 VirtualFrame* frame = cgen()->frame();
4099 // Literal array (0). 4135 // Literal array (0).
4100 frame->Push(&literals); 4136 __ push(literals_);
4101 // Literal index (1). 4137 // Literal index (1).
4102 frame->Push(Smi::FromInt(node_->literal_index())); 4138 __ push(Immediate(Smi::FromInt(node_->literal_index())));
4103 // Constant properties (2). 4139 // Constant properties (2).
4104 frame->Push(node_->literals()); 4140 __ push(Immediate(node_->literals()));
4105 Result boilerplate = 4141 __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
4106 frame->CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3); 4142 if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
4107 exit_.Jump(&boilerplate);
4108 } 4143 }
4109 4144
4110 4145
4111 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { 4146 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
4112 Comment cmnt(masm_, "[ ArrayLiteral"); 4147 Comment cmnt(masm_, "[ ArrayLiteral");
4113 DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node);
4114 4148
4115 // Retrieve the literals array and check the allocated entry. Begin 4149 // Retrieve the literals array and check the allocated entry. Begin
4116 // with a writable copy of the function of this activation in a 4150 // with a writable copy of the function of this activation in a
4117 // register. 4151 // register.
4118 frame_->PushFunction(); 4152 frame_->PushFunction();
4119 Result literals = frame_->Pop(); 4153 Result literals = frame_->Pop();
4120 literals.ToRegister(); 4154 literals.ToRegister();
4121 frame_->Spill(literals.reg()); 4155 frame_->Spill(literals.reg());
4122 4156
4123 // Load the literals array of the function. 4157 // Load the literals array of the function.
4124 __ mov(literals.reg(), 4158 __ mov(literals.reg(),
4125 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset)); 4159 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
4126 4160
4127 // Load the literal at the ast saved index. 4161 // Load the literal at the ast saved index.
4162 Result boilerplate = allocator_->Allocate();
4163 ASSERT(boilerplate.is_valid());
4128 int literal_offset = 4164 int literal_offset =
4129 FixedArray::kHeaderSize + node->literal_index() * kPointerSize; 4165 FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
4130 Result boilerplate = allocator_->Allocate();
4131 ASSERT(boilerplate.is_valid());
4132 __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset)); 4166 __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
4133 4167
4134 // Check whether we need to materialize the object literal boilerplate. 4168 // Check whether we need to materialize the object literal boilerplate.
4135 // If so, jump to the deferred code passing the literals array. 4169 // If so, jump to the deferred code passing the literals array.
4170 DeferredArrayLiteral* deferred =
4171 new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
4136 __ cmp(boilerplate.reg(), Factory::undefined_value()); 4172 __ cmp(boilerplate.reg(), Factory::undefined_value());
4137 deferred->enter()->Branch(equal, &literals, not_taken); 4173 deferred->Branch(equal);
4174 deferred->BindExit();
4175 literals.Unuse();
4138 4176
4139 literals.Unuse(); 4177 // Push the resulting array literal boilerplate on the stack.
4140 // The deferred code returns the boilerplate object.
4141 deferred->BindExit(&boilerplate);
4142
4143 // Push the resulting array literal on the stack.
4144 frame_->Push(&boilerplate); 4178 frame_->Push(&boilerplate);
4145
4146 // Clone the boilerplate object. 4179 // Clone the boilerplate object.
4147 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate; 4180 Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
4148 if (node->depth() == 1) { 4181 if (node->depth() == 1) {
4149 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate; 4182 clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
4150 } 4183 }
4151 Result clone = frame_->CallRuntime(clone_function_id, 1); 4184 Result clone = frame_->CallRuntime(clone_function_id, 1);
4152 // Push the newly cloned literal object as the result. 4185 // Push the newly cloned literal object as the result.
4153 frame_->Push(&clone); 4186 frame_->Push(&clone);
4154 4187
4155 // Generate code to set the elements in the array that are not 4188 // Generate code to set the elements in the array that are not
(...skipping 900 matching lines...) Expand 10 before | Expand all | Expand 10 after
5056 break; 5089 break;
5057 } 5090 }
5058 5091
5059 default: 5092 default:
5060 UNREACHABLE(); 5093 UNREACHABLE();
5061 } 5094 }
5062 } 5095 }
5063 } 5096 }
5064 5097
5065 5098
5066 class DeferredCountOperation: public DeferredCode { 5099 // The value in dst was optimistically incremented or decremented. The
5100 // result overflowed or was not smi tagged. Undo the operation, call
5101 // into the runtime to convert the argument to a number, and call the
5102 // specialized add or subtract stub. The result is left in dst.
5103 class DeferredPrefixCountOperation: public DeferredCode {
5067 public: 5104 public:
5068 DeferredCountOperation(bool is_postfix, 5105 DeferredPrefixCountOperation(Register dst, bool is_increment)
5069 bool is_increment, 5106 : dst_(dst), is_increment_(is_increment) {
5070 int target_size)
5071 : is_postfix_(is_postfix),
5072 is_increment_(is_increment),
5073 target_size_(target_size) {
5074 set_comment("[ DeferredCountOperation"); 5107 set_comment("[ DeferredCountOperation");
5075 } 5108 }
5076 5109
5077 virtual void Generate(); 5110 virtual void Generate();
5078 5111
5079 private: 5112 private:
5080 bool is_postfix_; 5113 Register dst_;
5081 bool is_increment_; 5114 bool is_increment_;
5082 int target_size_;
5083 }; 5115 };
5084 5116
5085 5117
5086 #undef __ 5118 void DeferredPrefixCountOperation::Generate() {
5087 #define __ ACCESS_MASM(cgen()->masm())
5088
5089
5090 void DeferredCountOperation::Generate() {
5091 Result value;
5092 enter()->Bind(&value);
5093 VirtualFrame* frame = cgen()->frame();
5094 // Undo the optimistic smi operation. 5119 // Undo the optimistic smi operation.
5095 value.ToRegister();
5096 frame->Spill(value.reg());
5097 if (is_increment_) { 5120 if (is_increment_) {
5098 __ sub(Operand(value.reg()), Immediate(Smi::FromInt(1))); 5121 __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
5099 } else { 5122 } else {
5100 __ add(Operand(value.reg()), Immediate(Smi::FromInt(1))); 5123 __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
5101 } 5124 }
5102 frame->Push(&value); 5125 __ push(dst_);
5103 value = frame->InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION, 1); 5126 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
5104 frame->Push(&value); 5127 __ push(eax);
5105 if (is_postfix_) { // Fix up copy of old value with ToNumber(value). 5128 __ push(Immediate(Smi::FromInt(1)));
5106 // This is only safe because VisitCountOperation makes this frame slot 5129 if (is_increment_) {
5107 // beneath the reference a register, which is spilled at the above call. 5130 __ CallRuntime(Runtime::kNumberAdd, 2);
5108 // We cannot safely write to constants or copies below the water line. 5131 } else {
5109 frame->StoreToElementAt(target_size_ + 1); 5132 __ CallRuntime(Runtime::kNumberSub, 2);
5110 } 5133 }
5111 frame->Push(Smi::FromInt(1)); 5134 if (!dst_.is(eax)) __ mov(dst_, eax);
5112 if (is_increment_) {
5113 value = frame->CallRuntime(Runtime::kNumberAdd, 2);
5114 } else {
5115 value = frame->CallRuntime(Runtime::kNumberSub, 2);
5116 }
5117 exit_.Jump(&value);
5118 } 5135 }
5119 5136
5120 5137
5121 #undef __ 5138 // The value in dst was optimistically incremented or decremented. The
5122 #define __ ACCESS_MASM(masm_) 5139 // result overflowed or was not smi tagged. Undo the operation and call
5140 // into the runtime to convert the argument to a number. Update the
5141 // original value in old. Call the specialized add or subtract stub.
5142 // The result is left in dst.
5143 class DeferredPostfixCountOperation: public DeferredCode {
5144 public:
5145 DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
5146 : dst_(dst), old_(old), is_increment_(is_increment) {
5147 set_comment("[ DeferredCountOperation");
5148 }
5149
5150 virtual void Generate();
5151
5152 private:
5153 Register dst_;
5154 Register old_;
5155 bool is_increment_;
5156 };
5157
5158
5159 void DeferredPostfixCountOperation::Generate() {
5160 // Undo the optimistic smi operation.
5161 if (is_increment_) {
5162 __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
5163 } else {
5164 __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
5165 }
5166 __ push(dst_);
5167 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
5168
5169 // Save the result of ToNumber to use as the old value.
5170 __ push(eax);
5171
5172 // Call the runtime for the addition or subtraction.
5173 __ push(eax);
5174 __ push(Immediate(Smi::FromInt(1)));
5175 if (is_increment_) {
5176 __ CallRuntime(Runtime::kNumberAdd, 2);
5177 } else {
5178 __ CallRuntime(Runtime::kNumberSub, 2);
5179 }
5180 if (!dst_.is(eax)) __ mov(dst_, eax);
5181 __ pop(old_);
5182 }
5123 5183
5124 5184
5125 void CodeGenerator::VisitCountOperation(CountOperation* node) { 5185 void CodeGenerator::VisitCountOperation(CountOperation* node) {
5126 Comment cmnt(masm_, "[ CountOperation"); 5186 Comment cmnt(masm_, "[ CountOperation");
5127 5187
5128 bool is_postfix = node->is_postfix(); 5188 bool is_postfix = node->is_postfix();
5129 bool is_increment = node->op() == Token::INC; 5189 bool is_increment = node->op() == Token::INC;
5130 5190
5131 Variable* var = node->expression()->AsVariableProxy()->AsVariable(); 5191 Variable* var = node->expression()->AsVariableProxy()->AsVariable();
5132 bool is_const = (var != NULL && var->mode() == Variable::CONST); 5192 bool is_const = (var != NULL && var->mode() == Variable::CONST);
5133 5193
5134 // Postfix operators need a stack slot under the reference to hold 5194 // Postfix operations need a stack slot under the reference to hold
5135 // the old value while the new one is being stored. 5195 // the old value while the new value is being stored. This is so that
5136 if (is_postfix) { 5196 // in the case that storing the new value requires a call, the old
5137 frame_->Push(Smi::FromInt(0)); 5197 // value will be in the frame to be spilled.
5138 } 5198 if (is_postfix) frame_->Push(Smi::FromInt(0));
5139 5199
5140 { Reference target(this, node->expression()); 5200 { Reference target(this, node->expression());
5141 if (target.is_illegal()) { 5201 if (target.is_illegal()) {
5142 // Spoof the virtual frame to have the expected height (one higher 5202 // Spoof the virtual frame to have the expected height (one higher
5143 // than on entry). 5203 // than on entry).
5144 if (!is_postfix) { 5204 if (!is_postfix) frame_->Push(Smi::FromInt(0));
5145 frame_->Push(Smi::FromInt(0));
5146 }
5147 return; 5205 return;
5148 } 5206 }
5149 target.TakeValue(NOT_INSIDE_TYPEOF); 5207 target.TakeValue(NOT_INSIDE_TYPEOF);
5150 5208
5151 DeferredCountOperation* deferred = 5209 Result new_value = frame_->Pop();
5152 new DeferredCountOperation(is_postfix, is_increment, target.size()); 5210 new_value.ToRegister();
5153 5211
5154 Result value = frame_->Pop(); 5212 Result old_value; // Only allocated in the postfix case.
5155 value.ToRegister(); 5213 if (is_postfix) {
5214 // Allocate a temporary to preserve the old value.
5215 old_value = allocator_->Allocate();
5216 ASSERT(old_value.is_valid());
5217 __ mov(old_value.reg(), new_value.reg());
5218 }
5219 // Ensure the new value is writable.
5220 frame_->Spill(new_value.reg());
5156 5221
5157 // Postfix: Store the old value as the result. 5222 // In order to combine the overflow and the smi tag check, we need
5158 if (is_postfix) { 5223 // to be able to allocate a byte register. We attempt to do so
5159 // Explicitly back the slot for the old value with a new register. 5224 // without spilling. If we fail, we will generate separate overflow
5160 // This improves performance in some cases. 5225 // and smi tag checks.
5161 Result old_value = allocator_->Allocate();
5162 ASSERT(old_value.is_valid());
5163 __ mov(old_value.reg(), value.reg());
5164 // SetElement must not create a constant element or a copy in this slot,
5165 // since we will write to it, below the waterline, in deferred code.
5166 frame_->SetElementAt(target.size(), &old_value);
5167 }
5168
5169 // Perform optimistic increment/decrement. Ensure the value is
5170 // writable.
5171 frame_->Spill(value.reg());
5172 ASSERT(allocator_->count(value.reg()) == 1);
5173
5174 // In order to combine the overflow and the smi check, we need to
5175 // be able to allocate a byte register. We attempt to do so
5176 // without spilling. If we fail, we will generate separate
5177 // overflow and smi checks.
5178 // 5226 //
5179 // We need to allocate and clear the temporary byte register 5227 // We allocate and clear the temporary byte register before
5180 // before performing the count operation since clearing the 5228 // performing the count operation since clearing the register using
5181 // register using xor will clear the overflow flag. 5229 // xor will clear the overflow flag.
5182 Result tmp = allocator_->AllocateByteRegisterWithoutSpilling(); 5230 Result tmp = allocator_->AllocateByteRegisterWithoutSpilling();
5183 if (tmp.is_valid()) { 5231 if (tmp.is_valid()) {
5184 __ Set(tmp.reg(), Immediate(0)); 5232 __ Set(tmp.reg(), Immediate(0));
5185 } 5233 }
5186 5234
5187 if (is_increment) { 5235 DeferredCode* deferred = NULL;
5188 __ add(Operand(value.reg()), Immediate(Smi::FromInt(1))); 5236 if (is_postfix) {
5237 deferred = new DeferredPostfixCountOperation(new_value.reg(),
5238 old_value.reg(),
5239 is_increment);
5189 } else { 5240 } else {
5190 __ sub(Operand(value.reg()), Immediate(Smi::FromInt(1))); 5241 deferred = new DeferredPrefixCountOperation(new_value.reg(),
5242 is_increment);
5191 } 5243 }
5192 5244
5193 // If the count operation didn't overflow and the result is a 5245 if (is_increment) {
5194 // valid smi, we're done. Otherwise, we jump to the deferred 5246 __ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
5195 // slow-case code. 5247 } else {
5196 // 5248 __ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
5197 // We combine the overflow and the smi check if we could 5249 }
5198 // successfully allocate a temporary byte register. 5250
5251 // If the count operation didn't overflow and the result is a valid
5252 // smi, we're done. Otherwise, we jump to the deferred slow-case
5253 // code.
5199 if (tmp.is_valid()) { 5254 if (tmp.is_valid()) {
5255 // We combine the overflow and the smi tag check if we could
5256 // successfully allocate a temporary byte register.
5200 __ setcc(overflow, tmp.reg()); 5257 __ setcc(overflow, tmp.reg());
5201 __ or_(Operand(tmp.reg()), value.reg()); 5258 __ or_(Operand(tmp.reg()), new_value.reg());
5202 __ test(tmp.reg(), Immediate(kSmiTagMask)); 5259 __ test(tmp.reg(), Immediate(kSmiTagMask));
5203 tmp.Unuse(); 5260 tmp.Unuse();
5204 deferred->enter()->Branch(not_zero, &value, not_taken); 5261 deferred->Branch(not_zero);
5205 } else { // Otherwise we test separately for overflow and smi check. 5262 } else {
5206 deferred->SetEntryFrame(&value); 5263 // Otherwise we test separately for overflow and smi tag.
5207 deferred->enter()->Branch(overflow, &value, not_taken); 5264 deferred->Branch(overflow);
5208 __ test(value.reg(), Immediate(kSmiTagMask)); 5265 __ test(new_value.reg(), Immediate(kSmiTagMask));
5209 deferred->enter()->Branch(not_zero, &value, not_taken); 5266 deferred->Branch(not_zero);
5210 } 5267 }
5268 deferred->BindExit();
5211 5269
5212 // Store the new value in the target if not const. 5270 // Postfix: store the old value in the allocated slot under the
5213 deferred->BindExit(&value); 5271 // reference.
5214 frame_->Push(&value); 5272 if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
5215 if (!is_const) { 5273
5216 target.SetValue(NOT_CONST_INIT); 5274 frame_->Push(&new_value);
5217 } 5275 // Non-constant: update the reference.
5276 if (!is_const) target.SetValue(NOT_CONST_INIT);
5218 } 5277 }
5219 5278
5220 // Postfix: Discard the new value and use the old. 5279 // Postfix: drop the new value and use the old.
5221 if (is_postfix) { 5280 if (is_postfix) frame_->Drop();
5222 frame_->Drop();
5223 }
5224 } 5281 }
5225 5282
5226 5283
5227 void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { 5284 void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
5228 // Note that due to an optimization in comparison operations (typeof 5285 // Note that due to an optimization in comparison operations (typeof
5229 // compared to a string literal), we can evaluate a binary expression such 5286 // compared to a string literal), we can evaluate a binary expression such
5230 // as AND or OR and not leave a value on the frame or in the cc register. 5287 // as AND or OR and not leave a value on the frame or in the cc register.
5231 Comment cmnt(masm_, "[ BinaryOperation"); 5288 Comment cmnt(masm_, "[ BinaryOperation");
5232 Token::Value op = node->op(); 5289 Token::Value op = node->op();
5233 5290
(...skipping 330 matching lines...) Expand 10 before | Expand all | Expand 10 after
5564 bool CodeGenerator::HasValidEntryRegisters() { 5621 bool CodeGenerator::HasValidEntryRegisters() {
5565 return (allocator()->count(eax) == (frame()->is_used(eax) ? 1 : 0)) 5622 return (allocator()->count(eax) == (frame()->is_used(eax) ? 1 : 0))
5566 && (allocator()->count(ebx) == (frame()->is_used(ebx) ? 1 : 0)) 5623 && (allocator()->count(ebx) == (frame()->is_used(ebx) ? 1 : 0))
5567 && (allocator()->count(ecx) == (frame()->is_used(ecx) ? 1 : 0)) 5624 && (allocator()->count(ecx) == (frame()->is_used(ecx) ? 1 : 0))
5568 && (allocator()->count(edx) == (frame()->is_used(edx) ? 1 : 0)) 5625 && (allocator()->count(edx) == (frame()->is_used(edx) ? 1 : 0))
5569 && (allocator()->count(edi) == (frame()->is_used(edi) ? 1 : 0)); 5626 && (allocator()->count(edi) == (frame()->is_used(edi) ? 1 : 0));
5570 } 5627 }
5571 #endif 5628 #endif
5572 5629
5573 5630
5631 // Emit a LoadIC call to get the value from receiver and leave it in
5632 // dst. The receiver register is restored after the call.
5574 class DeferredReferenceGetNamedValue: public DeferredCode { 5633 class DeferredReferenceGetNamedValue: public DeferredCode {
5575 public: 5634 public:
5576 explicit DeferredReferenceGetNamedValue(Handle<String> name) : name_(name) { 5635 DeferredReferenceGetNamedValue(Register dst,
5636 Register receiver,
5637 Handle<String> name)
5638 : dst_(dst), receiver_(receiver), name_(name) {
5577 set_comment("[ DeferredReferenceGetNamedValue"); 5639 set_comment("[ DeferredReferenceGetNamedValue");
5578 } 5640 }
5579 5641
5580 virtual void Generate(); 5642 virtual void Generate();
5581 5643
5582 Label* patch_site() { return &patch_site_; } 5644 Label* patch_site() { return &patch_site_; }
5583 5645
5584 private: 5646 private:
5585 Label patch_site_; 5647 Label patch_site_;
5648 Register dst_;
5649 Register receiver_;
5586 Handle<String> name_; 5650 Handle<String> name_;
5587 }; 5651 };
5588 5652
5589 5653
5654 void DeferredReferenceGetNamedValue::Generate() {
5655 __ push(receiver_);
5656 __ Set(ecx, Immediate(name_));
5657 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
5658 __ call(ic, RelocInfo::CODE_TARGET);
5659 // The call must be followed by a test eax instruction to indicate
5660 // that the inobject property case was inlined.
5661 //
5662 // Store the delta to the map check instruction here in the test
5663 // instruction. Use masm_-> instead of the __ macro since the
5664 // latter can't return a value.
5665 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
5666 // Here we use masm_-> instead of the __ macro because this is the
5667 // instruction that gets patched and coverage code gets in the way.
5668 masm_->test(eax, Immediate(-delta_to_patch_site));
5669 __ IncrementCounter(&Counters::named_load_inline_miss, 1);
5670
5671 if (!dst_.is(eax)) __ mov(dst_, eax);
5672 __ pop(receiver_);
5673 }
5674
5675
5590 class DeferredReferenceGetKeyedValue: public DeferredCode { 5676 class DeferredReferenceGetKeyedValue: public DeferredCode {
5591 public: 5677 public:
5592 explicit DeferredReferenceGetKeyedValue(bool is_global) 5678 explicit DeferredReferenceGetKeyedValue(Register dst,
5593 : is_global_(is_global) { 5679 Register receiver,
5680 Register key,
5681 bool is_global)
5682 : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
5594 set_comment("[ DeferredReferenceGetKeyedValue"); 5683 set_comment("[ DeferredReferenceGetKeyedValue");
5595 } 5684 }
5596 5685
5597 virtual void Generate(); 5686 virtual void Generate();
5598 5687
5599 Label* patch_site() { return &patch_site_; } 5688 Label* patch_site() { return &patch_site_; }
5600 5689
5601 private: 5690 private:
5602 Label patch_site_; 5691 Label patch_site_;
5692 Register dst_;
5693 Register receiver_;
5694 Register key_;
5603 bool is_global_; 5695 bool is_global_;
5604 }; 5696 };
5605 5697
5606 5698
5607 #undef __
5608 #define __ ACCESS_MASM(cgen()->masm())
5609
5610
5611 void DeferredReferenceGetNamedValue::Generate() {
5612 Result receiver;
5613 enter()->Bind(&receiver);
5614
5615 cgen()->frame()->Push(&receiver);
5616 cgen()->frame()->Push(name_);
5617 Result answer = cgen()->frame()->CallLoadIC(RelocInfo::CODE_TARGET);
5618 // The call must be followed by a test eax instruction to indicate
5619 // that the inobject property case was inlined.
5620 ASSERT(answer.is_register() && answer.reg().is(eax));
5621 // Store the delta to the map check instruction here in the test
5622 // instruction. Use cgen()->masm()-> instead of the __ macro since
5623 // the latter can't return a value.
5624 int delta_to_patch_site =
5625 cgen()->masm()->SizeOfCodeGeneratedSince(patch_site());
5626 // Here we use cgen()->masm()-> instead of the __ macro because this
5627 // is the instruction that gets patched and coverage code gets in the
5628 // way.
5629 cgen()->masm()->test(answer.reg(), Immediate(-delta_to_patch_site));
5630 __ IncrementCounter(&Counters::named_load_inline_miss, 1);
5631 receiver = cgen()->frame()->Pop();
5632 exit_.Jump(&receiver, &answer);
5633 }
5634
5635
5636 void DeferredReferenceGetKeyedValue::Generate() { 5699 void DeferredReferenceGetKeyedValue::Generate() {
5637 Result receiver; 5700 __ push(receiver_); // First IC argument.
5638 Result key; 5701 __ push(key_); // Second IC argument.
5639 enter()->Bind(&receiver, &key);
5640 cgen()->frame()->Push(&receiver); // First IC argument.
5641 cgen()->frame()->Push(&key); // Second IC argument.
5642 5702
5643 // Calculate the delta from the IC call instruction to the map check 5703 // Calculate the delta from the IC call instruction to the map check
5644 // cmp instruction in the inlined version. This delta is stored in 5704 // cmp instruction in the inlined version. This delta is stored in
5645 // a test(eax, delta) instruction after the call so that we can find 5705 // a test(eax, delta) instruction after the call so that we can find
5646 // it in the IC initialization code and patch the cmp instruction. 5706 // it in the IC initialization code and patch the cmp instruction.
5647 // This means that we cannot allow test instructions after calls to 5707 // This means that we cannot allow test instructions after calls to
5648 // KeyedLoadIC stubs in other places. 5708 // KeyedLoadIC stubs in other places.
5709 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
5649 RelocInfo::Mode mode = is_global_ 5710 RelocInfo::Mode mode = is_global_
5650 ? RelocInfo::CODE_TARGET_CONTEXT 5711 ? RelocInfo::CODE_TARGET_CONTEXT
5651 : RelocInfo::CODE_TARGET; 5712 : RelocInfo::CODE_TARGET;
5652 Result value = cgen()->frame()->CallKeyedLoadIC(mode); 5713 __ call(ic, mode);
5653 // The result needs to be specifically the eax register because the 5714 // The delta from the start of the map-compare instruction to the
5654 // offset to the patch site will be expected in a test eax 5715 // test instruction. We use masm_-> directly here instead of the __
5655 // instruction. 5716 // macro because the macro sometimes uses macro expansion to turn
5656 ASSERT(value.is_register() && value.reg().is(eax)); 5717 // into something that can't return a value. This is encountered
5657 // The delta from the start of the map-compare instruction to the test 5718 // when doing generated code coverage tests.
5658 // instruction. We use cgen()->masm() directly here instead of the __ 5719 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
5659 // macro because the macro sometimes uses macro expansion to turn into 5720 // Here we use masm_-> instead of the __ macro because this is the
5660 // something that can't return a value. This is encountered when 5721 // instruction that gets patched and coverage code gets in the way.
5661 // doing generated code coverage tests. 5722 masm_->test(eax, Immediate(-delta_to_patch_site));
5662 int delta_to_patch_site =
5663 cgen()->masm()->SizeOfCodeGeneratedSince(patch_site());
5664 // Here we use cgen()->masm()-> instead of the __ macro because this
5665 // is the instruction that gets patched and coverage code gets in the
5666 // way.
5667 cgen()->masm()->test(value.reg(), Immediate(-delta_to_patch_site));
5668 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1); 5723 __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
5669 5724
5670 // The receiver and key were spilled by the call, so their state as 5725 if (!dst_.is(eax)) __ mov(dst_, eax);
5671 // constants or copies has been changed. Thus, they need to be 5726 __ pop(key_);
5672 // "mergable" in the block at the exit label and are therefore 5727 __ pop(receiver_);
5673 // passed as return results here.
5674 key = cgen()->frame()->Pop();
5675 receiver = cgen()->frame()->Pop();
5676 exit_.Jump(&receiver, &key, &value);
5677 } 5728 }
5678 5729
5679 5730
5680 #undef __ 5731 #undef __
5681 #define __ ACCESS_MASM(masm) 5732 #define __ ACCESS_MASM(masm)
5682 5733
5683 Handle<String> Reference::GetName() { 5734 Handle<String> Reference::GetName() {
5684 ASSERT(type_ == NAMED); 5735 ASSERT(type_ == NAMED);
5685 Property* property = expression_->AsProperty(); 5736 Property* property = expression_->AsProperty();
5686 if (property == NULL) { 5737 if (property == NULL) {
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
5737 : RelocInfo::CODE_TARGET; 5788 : RelocInfo::CODE_TARGET;
5738 Result answer = cgen_->frame()->CallLoadIC(mode); 5789 Result answer = cgen_->frame()->CallLoadIC(mode);
5739 // A test eax instruction following the call signals that the 5790 // A test eax instruction following the call signals that the
5740 // inobject property case was inlined. Ensure that there is not 5791 // inobject property case was inlined. Ensure that there is not
5741 // a test eax instruction here. 5792 // a test eax instruction here.
5742 __ nop(); 5793 __ nop();
5743 cgen_->frame()->Push(&answer); 5794 cgen_->frame()->Push(&answer);
5744 } else { 5795 } else {
5745 // Inline the inobject property case. 5796 // Inline the inobject property case.
5746 Comment cmnt(masm, "[ Inlined named property load"); 5797 Comment cmnt(masm, "[ Inlined named property load");
5747 DeferredReferenceGetNamedValue* deferred =
5748 new DeferredReferenceGetNamedValue(GetName());
5749 Result receiver = cgen_->frame()->Pop(); 5798 Result receiver = cgen_->frame()->Pop();
5750 receiver.ToRegister(); 5799 receiver.ToRegister();
5751 5800
5752 // Try to preallocate the value register so that all frames 5801 Result value = cgen_->allocator()->Allocate();
5753 // reaching the deferred code are identical. 5802 ASSERT(value.is_valid());
5754 Result value = cgen_->allocator()->AllocateWithoutSpilling(); 5803 DeferredReferenceGetNamedValue* deferred =
5755 if (value.is_valid()) { 5804 new DeferredReferenceGetNamedValue(value.reg(),
5756 deferred->SetEntryFrame(&receiver); 5805 receiver.reg(),
5757 } 5806 GetName());
5758 5807
5759 // Check that the receiver is a heap object. 5808 // Check that the receiver is a heap object.
5760 __ test(receiver.reg(), Immediate(kSmiTagMask)); 5809 __ test(receiver.reg(), Immediate(kSmiTagMask));
5761 deferred->enter()->Branch(zero, &receiver, not_taken); 5810 deferred->Branch(zero);
5762
5763 // Do not allocate the value register after binding the patch
5764 // site label. The distance from the patch site to the offset
5765 // must be constant.
5766 if (!value.is_valid()) {
5767 value = cgen_->allocator()->Allocate();
5768 ASSERT(value.is_valid());
5769 }
5770 5811
5771 __ bind(deferred->patch_site()); 5812 __ bind(deferred->patch_site());
5772 // This is the map check instruction that will be patched (so we can't 5813 // This is the map check instruction that will be patched (so we can't
5773 // use the double underscore macro that may insert instructions). 5814 // use the double underscore macro that may insert instructions).
5774 // Initially use an invalid map to force a failure. 5815 // Initially use an invalid map to force a failure.
5775 masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset), 5816 masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
5776 Immediate(Factory::null_value())); 5817 Immediate(Factory::null_value()));
5777 // This branch is always a forwards branch so it's always a fixed 5818 // This branch is always a forwards branch so it's always a fixed
5778 // size which allows the assert below to succeed and patching to work. 5819 // size which allows the assert below to succeed and patching to work.
5779 deferred->enter()->Branch(not_equal, &receiver, not_taken); 5820 deferred->Branch(not_equal);
5780 5821
5781 // The delta from the patch label to the load offset must be 5822 // The delta from the patch label to the load offset must be
5782 // statically known. 5823 // statically known.
5783 ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) == 5824 ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
5784 LoadIC::kOffsetToLoadInstruction); 5825 LoadIC::kOffsetToLoadInstruction);
5785 // The initial (invalid) offset has to be large enough to force 5826 // The initial (invalid) offset has to be large enough to force
5786 // a 32-bit instruction encoding to allow patching with an 5827 // a 32-bit instruction encoding to allow patching with an
5787 // arbitrary offset. Use kMaxInt (minus kHeapObjectTag). 5828 // arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
5788 int offset = kMaxInt; 5829 int offset = kMaxInt;
5789 masm->mov(value.reg(), FieldOperand(receiver.reg(), offset)); 5830 masm->mov(value.reg(), FieldOperand(receiver.reg(), offset));
5790 5831
5791 __ IncrementCounter(&Counters::named_load_inline, 1); 5832 __ IncrementCounter(&Counters::named_load_inline, 1);
5792 deferred->BindExit(&receiver, &value); 5833 deferred->BindExit();
5793 cgen_->frame()->Push(&receiver); 5834 cgen_->frame()->Push(&receiver);
5794 cgen_->frame()->Push(&value); 5835 cgen_->frame()->Push(&value);
5795 } 5836 }
5796 break; 5837 break;
5797 } 5838 }
5798 5839
5799 case KEYED: { 5840 case KEYED: {
5800 // TODO(1241834): Make sure that this it is safe to ignore the 5841 // TODO(1241834): Make sure that this it is safe to ignore the
5801 // distinction between expressions in a typeof and not in a typeof. 5842 // distinction between expressions in a typeof and not in a typeof.
5802 Comment cmnt(masm, "[ Load from keyed Property"); 5843 Comment cmnt(masm, "[ Load from keyed Property");
5803 Variable* var = expression_->AsVariableProxy()->AsVariable(); 5844 Variable* var = expression_->AsVariableProxy()->AsVariable();
5804 bool is_global = var != NULL; 5845 bool is_global = var != NULL;
5805 ASSERT(!is_global || var->is_global()); 5846 ASSERT(!is_global || var->is_global());
5806 // Inline array load code if inside of a loop. We do not know 5847 // Inline array load code if inside of a loop. We do not know
5807 // the receiver map yet, so we initially generate the code with 5848 // the receiver map yet, so we initially generate the code with
5808 // a check against an invalid map. In the inline cache code, we 5849 // a check against an invalid map. In the inline cache code, we
5809 // patch the map check if appropriate. 5850 // patch the map check if appropriate.
5810 if (cgen_->loop_nesting() > 0) { 5851 if (cgen_->loop_nesting() > 0) {
5811 Comment cmnt(masm, "[ Inlined array index load"); 5852 Comment cmnt(masm, "[ Inlined array index load");
5812 DeferredReferenceGetKeyedValue* deferred =
5813 new DeferredReferenceGetKeyedValue(is_global);
5814 5853
5815 Result key = cgen_->frame()->Pop(); 5854 Result key = cgen_->frame()->Pop();
5816 Result receiver = cgen_->frame()->Pop(); 5855 Result receiver = cgen_->frame()->Pop();
5817 key.ToRegister(); 5856 key.ToRegister();
5818 receiver.ToRegister(); 5857 receiver.ToRegister();
5819 5858
5820 // Try to preallocate the elements and index scratch registers 5859 // Use a fresh temporary to load the elements without destroying
5821 // so that all frames reaching the deferred code are identical. 5860 // the receiver which is needed for the deferred slow case.
5822 Result elements = cgen_->allocator()->AllocateWithoutSpilling(); 5861 Result elements = cgen_->allocator()->Allocate();
5823 Result index = cgen_->allocator()->AllocateWithoutSpilling(); 5862 ASSERT(elements.is_valid());
5824 if (elements.is_valid() && index.is_valid()) { 5863
5825 deferred->SetEntryFrame(&receiver, &key); 5864 // Use a fresh temporary for the index and later the loaded
5826 } 5865 // value.
5866 Result index = cgen_->allocator()->Allocate();
5867 ASSERT(index.is_valid());
5868
5869 DeferredReferenceGetKeyedValue* deferred =
5870 new DeferredReferenceGetKeyedValue(index.reg(),
5871 receiver.reg(),
5872 key.reg(),
5873 is_global);
5827 5874
5828 // Check that the receiver is not a smi (only needed if this 5875 // Check that the receiver is not a smi (only needed if this
5829 // is not a load from the global context) and that it has the 5876 // is not a load from the global context) and that it has the
5830 // expected map. 5877 // expected map.
5831 if (!is_global) { 5878 if (!is_global) {
5832 __ test(receiver.reg(), Immediate(kSmiTagMask)); 5879 __ test(receiver.reg(), Immediate(kSmiTagMask));
5833 deferred->enter()->Branch(zero, &receiver, &key, not_taken); 5880 deferred->Branch(zero);
5834 } 5881 }
5835 5882
5836 // Initially, use an invalid map. The map is patched in the IC 5883 // Initially, use an invalid map. The map is patched in the IC
5837 // initialization code. 5884 // initialization code.
5838 __ bind(deferred->patch_site()); 5885 __ bind(deferred->patch_site());
5839 // Use masm-> here instead of the double underscore macro since extra 5886 // Use masm-> here instead of the double underscore macro since extra
5840 // coverage code can interfere with the patching. 5887 // coverage code can interfere with the patching.
5841 masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset), 5888 masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
5842 Immediate(Factory::null_value())); 5889 Immediate(Factory::null_value()));
5843 deferred->enter()->Branch(not_equal, &receiver, &key, not_taken); 5890 deferred->Branch(not_equal);
5844 5891
5845 // Check that the key is a smi. 5892 // Check that the key is a smi.
5846 __ test(key.reg(), Immediate(kSmiTagMask)); 5893 __ test(key.reg(), Immediate(kSmiTagMask));
5847 deferred->enter()->Branch(not_zero, &receiver, &key, not_taken); 5894 deferred->Branch(not_zero);
5848 5895
5849 // Get the elements array from the receiver and check that it 5896 // Get the elements array from the receiver and check that it
5850 // is not a dictionary. 5897 // is not a dictionary.
5851 if (!elements.is_valid()) {
5852 elements = cgen_->allocator()->Allocate();
5853 ASSERT(elements.is_valid());
5854 }
5855 __ mov(elements.reg(), 5898 __ mov(elements.reg(),
5856 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); 5899 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
5857 __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), 5900 __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
5858 Immediate(Factory::hash_table_map())); 5901 Immediate(Factory::hash_table_map()));
5859 deferred->enter()->Branch(equal, &receiver, &key, not_taken); 5902 deferred->Branch(equal);
5860 5903
5861 // Shift the key to get the actual index value and check that 5904 // Shift the key to get the actual index value and check that
5862 // it is within bounds. 5905 // it is within bounds.
5863 if (!index.is_valid()) {
5864 index = cgen_->allocator()->Allocate();
5865 ASSERT(index.is_valid());
5866 }
5867 __ mov(index.reg(), key.reg()); 5906 __ mov(index.reg(), key.reg());
5868 __ sar(index.reg(), kSmiTagSize); 5907 __ sar(index.reg(), kSmiTagSize);
5869 __ cmp(index.reg(), 5908 __ cmp(index.reg(),
5870 FieldOperand(elements.reg(), Array::kLengthOffset)); 5909 FieldOperand(elements.reg(), Array::kLengthOffset));
5871 deferred->enter()->Branch(above_equal, &receiver, &key, not_taken); 5910 deferred->Branch(above_equal);
5872 5911
5873 // Load and check that the result is not the hole. We could 5912 // Load and check that the result is not the hole. We could
5874 // reuse the index or elements register for the value. 5913 // reuse the index or elements register for the value.
5875 // 5914 //
5876 // TODO(206): Consider whether it makes sense to try some 5915 // TODO(206): Consider whether it makes sense to try some
5877 // heuristic about which register to reuse. For example, if 5916 // heuristic about which register to reuse. For example, if
5878 // one is eax, the we can reuse that one because the value 5917 // one is eax, the we can reuse that one because the value
5879 // coming from the deferred code will be in eax. 5918 // coming from the deferred code will be in eax.
5880 Result value = index; 5919 Result value = index;
5881 __ mov(value.reg(), Operand(elements.reg(), 5920 __ mov(value.reg(), Operand(elements.reg(),
5882 index.reg(), 5921 index.reg(),
5883 times_4, 5922 times_4,
5884 Array::kHeaderSize - kHeapObjectTag)); 5923 Array::kHeaderSize - kHeapObjectTag));
5885 elements.Unuse(); 5924 elements.Unuse();
5886 index.Unuse(); 5925 index.Unuse();
5887 __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value())); 5926 __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
5888 deferred->enter()->Branch(equal, &receiver, &key, not_taken); 5927 deferred->Branch(equal);
5889 __ IncrementCounter(&Counters::keyed_load_inline, 1); 5928 __ IncrementCounter(&Counters::keyed_load_inline, 1);
5890 5929
5930 deferred->BindExit();
5891 // Restore the receiver and key to the frame and push the 5931 // Restore the receiver and key to the frame and push the
5892 // result on top of it. 5932 // result on top of it.
5893 deferred->BindExit(&receiver, &key, &value);
5894 cgen_->frame()->Push(&receiver); 5933 cgen_->frame()->Push(&receiver);
5895 cgen_->frame()->Push(&key); 5934 cgen_->frame()->Push(&key);
5896 cgen_->frame()->Push(&value); 5935 cgen_->frame()->Push(&value);
5897 5936
5898 } else { 5937 } else {
5899 Comment cmnt(masm, "[ Load from keyed Property"); 5938 Comment cmnt(masm, "[ Load from keyed Property");
5900 RelocInfo::Mode mode = is_global 5939 RelocInfo::Mode mode = is_global
5901 ? RelocInfo::CODE_TARGET_CONTEXT 5940 ? RelocInfo::CODE_TARGET_CONTEXT
5902 : RelocInfo::CODE_TARGET; 5941 : RelocInfo::CODE_TARGET;
5903 Result answer = cgen_->frame()->CallKeyedLoadIC(mode); 5942 Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
(...skipping 1368 matching lines...) Expand 10 before | Expand all | Expand 10 after
7272 7311
7273 // Slow-case: Go through the JavaScript implementation. 7312 // Slow-case: Go through the JavaScript implementation.
7274 __ bind(&slow); 7313 __ bind(&slow);
7275 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); 7314 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
7276 } 7315 }
7277 7316
7278 7317
7279 #undef __ 7318 #undef __
7280 7319
7281 } } // namespace v8::internal 7320 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/codegen-inl.h ('k') | src/ia32/codegen-ia32-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698