| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 19 matching lines...) Expand all Loading... |
| 30 #if defined(V8_TARGET_ARCH_X64) | 30 #if defined(V8_TARGET_ARCH_X64) |
| 31 | 31 |
| 32 #include "bootstrapper.h" | 32 #include "bootstrapper.h" |
| 33 #include "code-stubs.h" | 33 #include "code-stubs.h" |
| 34 #include "regexp-macro-assembler.h" | 34 #include "regexp-macro-assembler.h" |
| 35 | 35 |
| 36 namespace v8 { | 36 namespace v8 { |
| 37 namespace internal { | 37 namespace internal { |
| 38 | 38 |
| 39 #define __ ACCESS_MASM(masm) | 39 #define __ ACCESS_MASM(masm) |
| 40 |
| 41 void ToNumberStub::Generate(MacroAssembler* masm) { |
| 42 // The ToNumber stub takes one argument in eax. |
| 43 NearLabel check_heap_number, call_builtin; |
| 44 __ SmiTest(rax); |
| 45 __ j(not_zero, &check_heap_number); |
| 46 __ Ret(); |
| 47 |
| 48 __ bind(&check_heap_number); |
| 49 __ Move(rbx, FACTORY->heap_number_map()); |
| 50 __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); |
| 51 __ j(not_equal, &call_builtin); |
| 52 __ Ret(); |
| 53 |
| 54 __ bind(&call_builtin); |
| 55 __ pop(rcx); // Pop return address. |
| 56 __ push(rax); |
| 57 __ push(rcx); // Push return address. |
| 58 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); |
| 59 } |
| 60 |
| 61 |
| 40 void FastNewClosureStub::Generate(MacroAssembler* masm) { | 62 void FastNewClosureStub::Generate(MacroAssembler* masm) { |
| 41 // Create a new closure from the given function info in new | 63 // Create a new closure from the given function info in new |
| 42 // space. Set the context to the current context in rsi. | 64 // space. Set the context to the current context in rsi. |
| 43 Label gc; | 65 Label gc; |
| 44 __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT); | 66 __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT); |
| 45 | 67 |
| 46 // Get the function info from the stack. | 68 // Get the function info from the stack. |
| 47 __ movq(rdx, Operand(rsp, 1 * kPointerSize)); | 69 __ movq(rdx, Operand(rsp, 1 * kPointerSize)); |
| 48 | 70 |
| 49 // Compute the function map in the current global context and set that | 71 // Compute the function map in the current global context and set that |
| (...skipping 959 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1009 | 1031 |
| 1010 // Patch the caller to an appropriate specialized stub and return the | 1032 // Patch the caller to an appropriate specialized stub and return the |
| 1011 // operation result to the caller of the stub. | 1033 // operation result to the caller of the stub. |
| 1012 __ TailCallExternalReference( | 1034 __ TailCallExternalReference( |
| 1013 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)), | 1035 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)), |
| 1014 5, | 1036 5, |
| 1015 1); | 1037 1); |
| 1016 } | 1038 } |
| 1017 | 1039 |
| 1018 | 1040 |
| 1019 // Prepare for a type transition runtime call when the args are already on | |
| 1020 // the stack, under the return address. | |
| 1021 void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs( | |
| 1022 MacroAssembler* masm) { | |
| 1023 __ pop(rcx); // Save return address. | |
| 1024 // Left and right arguments are already on top of the stack. | |
| 1025 // Push this stub's key. Although the operation and the type info are | |
| 1026 // encoded into the key, the encoding is opaque, so push them too. | |
| 1027 __ Push(Smi::FromInt(MinorKey())); | |
| 1028 __ Push(Smi::FromInt(op_)); | |
| 1029 __ Push(Smi::FromInt(operands_type_)); | |
| 1030 | |
| 1031 __ push(rcx); // Push return address. | |
| 1032 | |
| 1033 // Patch the caller to an appropriate specialized stub and return the | |
| 1034 // operation result to the caller of the stub. | |
| 1035 __ TailCallExternalReference( | |
| 1036 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)), | |
| 1037 5, | |
| 1038 1); | |
| 1039 } | |
| 1040 | |
| 1041 | |
| 1042 void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) { | 1041 void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) { |
| 1043 switch (operands_type_) { | 1042 switch (operands_type_) { |
| 1044 case TRBinaryOpIC::UNINITIALIZED: | 1043 case TRBinaryOpIC::UNINITIALIZED: |
| 1045 GenerateTypeTransition(masm); | 1044 GenerateTypeTransition(masm); |
| 1046 break; | 1045 break; |
| 1047 case TRBinaryOpIC::SMI: | 1046 case TRBinaryOpIC::SMI: |
| 1048 GenerateSmiStub(masm); | 1047 GenerateSmiStub(masm); |
| 1049 break; | 1048 break; |
| 1050 case TRBinaryOpIC::INT32: | 1049 case TRBinaryOpIC::INT32: |
| 1051 GenerateInt32Stub(masm); | 1050 UNREACHABLE(); |
| 1051 // The int32 case is identical to the Smi case. We avoid creating this |
| 1052 // ic state on x64. |
| 1052 break; | 1053 break; |
| 1053 case TRBinaryOpIC::HEAP_NUMBER: | 1054 case TRBinaryOpIC::HEAP_NUMBER: |
| 1054 GenerateHeapNumberStub(masm); | 1055 GenerateHeapNumberStub(masm); |
| 1055 break; | 1056 break; |
| 1056 case TRBinaryOpIC::STRING: | 1057 case TRBinaryOpIC::STRING: |
| 1057 GenerateStringStub(masm); | 1058 GenerateStringStub(masm); |
| 1058 break; | 1059 break; |
| 1059 case TRBinaryOpIC::GENERIC: | 1060 case TRBinaryOpIC::GENERIC: |
| 1060 GenerateGeneric(masm); | 1061 GenerateGeneric(masm); |
| 1061 break; | 1062 break; |
| (...skipping 23 matching lines...) Expand all Loading... |
| 1085 op_name, | 1086 op_name, |
| 1086 overwrite_name, | 1087 overwrite_name, |
| 1087 TRBinaryOpIC::GetName(operands_type_)); | 1088 TRBinaryOpIC::GetName(operands_type_)); |
| 1088 return name_; | 1089 return name_; |
| 1089 } | 1090 } |
| 1090 | 1091 |
| 1091 | 1092 |
| 1092 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, | 1093 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
| 1093 Label* slow, | 1094 Label* slow, |
| 1094 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { | 1095 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { |
| 1095 UNIMPLEMENTED(); | 1096 |
| 1097 // We only generate heapnumber answers for overflowing calculations |
| 1098 // for the four basic arithmetic operations. |
| 1099 bool generate_inline_heapnumber_results = |
| 1100 (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) && |
| 1101 (op_ == Token::ADD || op_ == Token::SUB || |
| 1102 op_ == Token::MUL || op_ == Token::DIV); |
| 1103 |
| 1104 // Arguments to TypeRecordingBinaryOpStub are in rdx and rax. |
| 1105 Register left = rdx; |
| 1106 Register right = rax; |
| 1107 |
| 1108 |
| 1109 // Smi check of both operands. If op is BIT_OR, the check is delayed |
| 1110 // until after the OR operation. |
| 1111 Label not_smis; |
| 1112 Label use_fp_on_smis; |
| 1113 Label restore_MOD_registers; // Only used if op_ == Token::MOD. |
| 1114 |
| 1115 if (op_ != Token::BIT_OR) { |
| 1116 Comment smi_check_comment(masm, "-- Smi check arguments"); |
| 1117 __ JumpIfNotBothSmi(left, right, ¬_smis); |
| 1118 } |
| 1119 |
| 1120 // Perform the operation. |
| 1121 Comment perform_smi(masm, "-- Perform smi operation"); |
| 1122 switch (op_) { |
| 1123 case Token::ADD: |
| 1124 ASSERT(right.is(rax)); |
| 1125 __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative. |
| 1126 break; |
| 1127 |
| 1128 case Token::SUB: |
| 1129 __ SmiSub(left, left, right, &use_fp_on_smis); |
| 1130 __ movq(rax, left); |
| 1131 break; |
| 1132 |
| 1133 case Token::MUL: |
| 1134 ASSERT(right.is(rax)); |
| 1135 __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative. |
| 1136 break; |
| 1137 |
| 1138 case Token::DIV: |
| 1139 // SmiDiv will not accept left in rdx or right in rax. |
| 1140 left = rcx; |
| 1141 right = rbx; |
| 1142 __ movq(rbx, rax); |
| 1143 __ movq(rcx, rdx); |
| 1144 __ SmiDiv(rax, left, right, &use_fp_on_smis); |
| 1145 break; |
| 1146 |
| 1147 case Token::MOD: |
| 1148 // SmiMod will not accept left in rdx or right in rax. |
| 1149 left = rcx; |
| 1150 right = rbx; |
| 1151 __ movq(rbx, rax); |
| 1152 __ movq(rcx, rdx); |
| 1153 __ SmiMod(rax, left, right, &use_fp_on_smis); |
| 1154 break; |
| 1155 |
| 1156 case Token::BIT_OR: { |
| 1157 ASSERT(right.is(rax)); |
| 1158 __ movq(rcx, right); // Save the right operand. |
| 1159 __ SmiOr(right, right, left); // BIT_OR is commutative. |
| 1160 __ JumpIfNotSmi(right, ¬_smis); // Test delayed until after BIT_OR. |
| 1161 break; |
| 1162 } |
| 1163 case Token::BIT_XOR: |
| 1164 ASSERT(right.is(rax)); |
| 1165 __ SmiXor(right, right, left); // BIT_XOR is commutative. |
| 1166 break; |
| 1167 |
| 1168 case Token::BIT_AND: |
| 1169 ASSERT(right.is(rax)); |
| 1170 __ SmiAnd(right, right, left); // BIT_AND is commutative. |
| 1171 break; |
| 1172 |
| 1173 case Token::SHL: |
| 1174 __ SmiShiftLeft(left, left, right); |
| 1175 __ movq(rax, left); |
| 1176 break; |
| 1177 |
| 1178 case Token::SAR: |
| 1179 __ SmiShiftArithmeticRight(left, left, right); |
| 1180 __ movq(rax, left); |
| 1181 break; |
| 1182 |
| 1183 case Token::SHR: |
| 1184 __ SmiShiftLogicalRight(left, left, right, ¬_smis); |
| 1185 __ movq(rax, left); |
| 1186 break; |
| 1187 |
| 1188 default: |
| 1189 UNREACHABLE(); |
| 1190 } |
| 1191 |
| 1192 // 5. Emit return of result in rax. Some operations have registers pushed. |
| 1193 __ ret(0); |
| 1194 |
| 1195 // 6. For some operations emit inline code to perform floating point |
| 1196 // operations on known smis (e.g., if the result of the operation |
| 1197 // overflowed the smi range). |
| 1198 __ bind(&use_fp_on_smis); |
| 1199 if (op_ == Token::DIV || op_ == Token::MOD) { |
| 1200 // Restore left and right to rdx and rax. |
| 1201 __ movq(rdx, rcx); |
| 1202 __ movq(rax, rbx); |
| 1203 } |
| 1204 |
| 1205 |
| 1206 if (generate_inline_heapnumber_results) { |
| 1207 __ AllocateHeapNumber(rcx, rbx, slow); |
| 1208 Comment perform_float(masm, "-- Perform float operation on smis"); |
| 1209 FloatingPointHelper::LoadSSE2SmiOperands(masm); |
| 1210 switch (op_) { |
| 1211 case Token::ADD: __ addsd(xmm0, xmm1); break; |
| 1212 case Token::SUB: __ subsd(xmm0, xmm1); break; |
| 1213 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
| 1214 case Token::DIV: __ divsd(xmm0, xmm1); break; |
| 1215 default: UNREACHABLE(); |
| 1216 } |
| 1217 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); |
| 1218 __ movq(rax, rcx); |
| 1219 __ ret(0); |
| 1220 } |
| 1221 |
| 1222 // 7. Non-smi operands reach the end of the code generated by |
| 1223 // GenerateSmiCode, and fall through to subsequent code, |
| 1224 // with the operands in rdx and rax. |
| 1225 Comment done_comment(masm, "-- Enter non-smi code"); |
| 1226 __ bind(¬_smis); |
| 1227 if (op_ == Token::BIT_OR) { |
| 1228 __ movq(right, rcx); |
| 1229 } |
| 1096 } | 1230 } |
| 1097 | 1231 |
| 1098 | 1232 |
| 1099 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 1233 void TypeRecordingBinaryOpStub::GenerateFloatingPointCode( |
| 1100 Label call_runtime; | 1234 MacroAssembler* masm, |
| 1101 | 1235 Label* allocation_failure, |
| 1236 Label* non_numeric_failure) { |
| 1102 switch (op_) { | 1237 switch (op_) { |
| 1103 case Token::ADD: | 1238 case Token::ADD: |
| 1104 case Token::SUB: | 1239 case Token::SUB: |
| 1105 case Token::MUL: | 1240 case Token::MUL: |
| 1106 case Token::DIV: | 1241 case Token::DIV: { |
| 1107 break; | 1242 FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure); |
| 1108 case Token::MOD: | 1243 |
| 1244 switch (op_) { |
| 1245 case Token::ADD: __ addsd(xmm0, xmm1); break; |
| 1246 case Token::SUB: __ subsd(xmm0, xmm1); break; |
| 1247 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
| 1248 case Token::DIV: __ divsd(xmm0, xmm1); break; |
| 1249 default: UNREACHABLE(); |
| 1250 } |
| 1251 GenerateHeapResultAllocation(masm, allocation_failure); |
| 1252 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
| 1253 __ ret(0); |
| 1254 break; |
| 1255 } |
| 1256 case Token::MOD: { |
| 1257 // For MOD we jump to the allocation_failure label, to call runtime. |
| 1258 __ jmp(allocation_failure); |
| 1259 break; |
| 1260 } |
| 1109 case Token::BIT_OR: | 1261 case Token::BIT_OR: |
| 1110 case Token::BIT_AND: | 1262 case Token::BIT_AND: |
| 1111 case Token::BIT_XOR: | 1263 case Token::BIT_XOR: |
| 1112 case Token::SAR: | 1264 case Token::SAR: |
| 1113 case Token::SHL: | 1265 case Token::SHL: |
| 1266 case Token::SHR: { |
| 1267 Label non_smi_shr_result; |
| 1268 Register heap_number_map = r9; |
| 1269 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 1270 FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, |
| 1271 heap_number_map); |
| 1272 switch (op_) { |
| 1273 case Token::BIT_OR: __ orl(rax, rcx); break; |
| 1274 case Token::BIT_AND: __ andl(rax, rcx); break; |
| 1275 case Token::BIT_XOR: __ xorl(rax, rcx); break; |
| 1276 case Token::SAR: __ sarl_cl(rax); break; |
| 1277 case Token::SHL: __ shll_cl(rax); break; |
| 1278 case Token::SHR: { |
| 1279 __ shrl_cl(rax); |
| 1280 // Check if result is negative. This can only happen for a shift |
| 1281 // by zero. |
| 1282 __ testl(rax, rax); |
| 1283 __ j(negative, &non_smi_shr_result); |
| 1284 break; |
| 1285 } |
| 1286 default: UNREACHABLE(); |
| 1287 } |
| 1288 STATIC_ASSERT(kSmiValueSize == 32); |
| 1289 // Tag smi result and return. |
| 1290 __ Integer32ToSmi(rax, rax); |
| 1291 __ Ret(); |
| 1292 |
| 1293 // Logical shift right can produce an unsigned int32 that is not |
| 1294 // an int32, and so is not in the smi range. Allocate a heap number |
| 1295 // in that case. |
| 1296 if (op_ == Token::SHR) { |
| 1297 __ bind(&non_smi_shr_result); |
| 1298 Label allocation_failed; |
| 1299 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). |
| 1300 // Allocate heap number in new space. |
| 1301 // Not using AllocateHeapNumber macro in order to reuse |
| 1302 // already loaded heap_number_map. |
| 1303 __ AllocateInNewSpace(HeapNumber::kSize, |
| 1304 rax, |
| 1305 rcx, |
| 1306 no_reg, |
| 1307 &allocation_failed, |
| 1308 TAG_OBJECT); |
| 1309 // Set the map. |
| 1310 if (FLAG_debug_code) { |
| 1311 __ AbortIfNotRootValue(heap_number_map, |
| 1312 Heap::kHeapNumberMapRootIndex, |
| 1313 "HeapNumberMap register clobbered."); |
| 1314 } |
| 1315 __ movq(FieldOperand(rax, HeapObject::kMapOffset), |
| 1316 heap_number_map); |
| 1317 __ cvtqsi2sd(xmm0, rbx); |
| 1318 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
| 1319 __ Ret(); |
| 1320 |
| 1321 __ bind(&allocation_failed); |
| 1322 // We need tagged values in rdx and rax for the following code, |
| 1323 // not int32 in rax and rcx. |
| 1324 __ Integer32ToSmi(rax, rcx); |
| 1325 __ Integer32ToSmi(rdx, rax); |
| 1326 __ jmp(allocation_failure); |
| 1327 } |
| 1328 break; |
| 1329 } |
| 1330 default: UNREACHABLE(); break; |
| 1331 } |
| 1332 // No fall-through from this generated code. |
| 1333 if (FLAG_debug_code) { |
| 1334 __ Abort("Unexpected fall-through in " |
| 1335 "TypeRecordingBinaryStub::GenerateFloatingPointCode."); |
| 1336 } |
| 1337 } |
| 1338 |
| 1339 |
| 1340 void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) { |
| 1341 GenerateRegisterArgsPush(masm); |
| 1342 // Registers containing left and right operands respectively. |
| 1343 Register lhs = rdx; |
| 1344 Register rhs = rax; |
| 1345 |
| 1346 // Test for string arguments before calling runtime. |
| 1347 Label not_strings, both_strings, not_string1, string1, string1_smi2; |
| 1348 |
| 1349 __ JumpIfNotString(lhs, r8, ¬_string1); |
| 1350 |
| 1351 // First argument is a a string, test second. |
| 1352 __ JumpIfSmi(rhs, &string1_smi2); |
| 1353 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9); |
| 1354 __ j(above_equal, &string1); |
| 1355 |
| 1356 // First and second argument are strings. |
| 1357 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); |
| 1358 __ TailCallStub(&string_add_stub); |
| 1359 |
| 1360 __ bind(&string1_smi2); |
| 1361 // First argument is a string, second is a smi. Try to lookup the number |
| 1362 // string for the smi in the number string cache. |
| 1363 NumberToStringStub::GenerateLookupNumberStringCache( |
| 1364 masm, rhs, rbx, rcx, r8, true, &string1); |
| 1365 |
| 1366 // Replace second argument on stack and tailcall string add stub to make |
| 1367 // the result. |
| 1368 __ movq(Operand(rsp, 1 * kPointerSize), rbx); |
| 1369 __ TailCallStub(&string_add_stub); |
| 1370 |
| 1371 // Only first argument is a string. |
| 1372 __ bind(&string1); |
| 1373 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION); |
| 1374 |
| 1375 // First argument was not a string, test second. |
| 1376 __ bind(¬_string1); |
| 1377 __ JumpIfNotString(rhs, rhs, ¬_strings); |
| 1378 |
| 1379 // Only second argument is a string. |
| 1380 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION); |
| 1381 |
| 1382 __ bind(¬_strings); |
| 1383 // Neither argument is a string. |
| 1384 // Pop arguments, because CallRuntimeCode wants to push them again. |
| 1385 __ pop(rcx); |
| 1386 __ pop(rax); |
| 1387 __ pop(rdx); |
| 1388 __ push(rcx); |
| 1389 } |
| 1390 |
| 1391 |
| 1392 void TypeRecordingBinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) { |
| 1393 GenerateRegisterArgsPush(masm); |
| 1394 switch (op_) { |
| 1395 case Token::ADD: |
| 1396 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); |
| 1397 break; |
| 1398 case Token::SUB: |
| 1399 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); |
| 1400 break; |
| 1401 case Token::MUL: |
| 1402 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); |
| 1403 break; |
| 1404 case Token::DIV: |
| 1405 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); |
| 1406 break; |
| 1407 case Token::MOD: |
| 1408 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); |
| 1409 break; |
| 1410 case Token::BIT_OR: |
| 1411 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); |
| 1412 break; |
| 1413 case Token::BIT_AND: |
| 1414 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); |
| 1415 break; |
| 1416 case Token::BIT_XOR: |
| 1417 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); |
| 1418 break; |
| 1419 case Token::SAR: |
| 1420 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); |
| 1421 break; |
| 1422 case Token::SHL: |
| 1423 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); |
| 1424 break; |
| 1114 case Token::SHR: | 1425 case Token::SHR: |
| 1115 GenerateRegisterArgsPush(masm); | 1426 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); |
| 1116 break; | 1427 break; |
| 1117 default: | 1428 default: |
| 1118 UNREACHABLE(); | 1429 UNREACHABLE(); |
| 1119 } | |
| 1120 | |
| 1121 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || | |
| 1122 result_type_ == TRBinaryOpIC::SMI) { | |
| 1123 GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS); | |
| 1124 } else { | |
| 1125 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); | |
| 1126 } | |
| 1127 __ bind(&call_runtime); | |
| 1128 switch (op_) { | |
| 1129 case Token::ADD: | |
| 1130 case Token::SUB: | |
| 1131 case Token::MUL: | |
| 1132 case Token::DIV: | |
| 1133 GenerateTypeTransition(masm); | |
| 1134 break; | |
| 1135 case Token::MOD: | |
| 1136 case Token::BIT_OR: | |
| 1137 case Token::BIT_AND: | |
| 1138 case Token::BIT_XOR: | |
| 1139 case Token::SAR: | |
| 1140 case Token::SHL: | |
| 1141 case Token::SHR: | |
| 1142 GenerateTypeTransitionWithSavedArgs(masm); | |
| 1143 break; | |
| 1144 default: | |
| 1145 UNREACHABLE(); | |
| 1146 } | 1430 } |
| 1147 } | 1431 } |
| 1148 | 1432 |
| 1149 | 1433 |
| 1434 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
| 1435 Label not_smi; |
| 1436 |
| 1437 GenerateSmiCode(masm, ¬_smi, NO_HEAPNUMBER_RESULTS); |
| 1438 |
| 1439 __ bind(¬_smi); |
| 1440 GenerateTypeTransition(masm); |
| 1441 } |
| 1442 |
| 1443 |
| 1150 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { | 1444 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { |
| 1151 UNIMPLEMENTED(); | 1445 ASSERT(op_ == Token::ADD); |
| 1152 } | 1446 GenerateStringAddCode(masm); |
| 1153 | 1447 |
| 1154 | 1448 GenerateTypeTransition(masm); |
| 1155 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | |
| 1156 UNIMPLEMENTED(); | |
| 1157 } | 1449 } |
| 1158 | 1450 |
| 1159 | 1451 |
| 1160 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { | 1452 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
| 1161 UNIMPLEMENTED(); | 1453 Label gc_required, not_number; |
| 1454 GenerateFloatingPointCode(masm, &gc_required, ¬_number); |
| 1455 |
| 1456 __ bind(¬_number); |
| 1457 GenerateTypeTransition(masm); |
| 1458 |
| 1459 __ bind(&gc_required); |
| 1460 GenerateCallRuntimeCode(masm); |
| 1162 } | 1461 } |
| 1163 | 1462 |
| 1164 | 1463 |
| 1165 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 1464 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
| 1166 UNIMPLEMENTED(); | 1465 Label call_runtime, call_string_add_or_runtime; |
| 1466 |
| 1467 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); |
| 1468 |
| 1469 GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime); |
| 1470 |
| 1471 __ bind(&call_string_add_or_runtime); |
| 1472 if (op_ == Token::ADD) { |
| 1473 GenerateStringAddCode(masm); |
| 1474 } |
| 1475 |
| 1476 __ bind(&call_runtime); |
| 1477 GenerateCallRuntimeCode(masm); |
| 1167 } | 1478 } |
| 1168 | 1479 |
| 1169 | 1480 |
| 1170 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( | 1481 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( |
| 1171 MacroAssembler* masm, | 1482 MacroAssembler* masm, |
| 1172 Label* alloc_failure) { | 1483 Label* alloc_failure) { |
| 1173 UNIMPLEMENTED(); | 1484 Label skip_allocation; |
| 1485 OverwriteMode mode = mode_; |
| 1486 switch (mode) { |
| 1487 case OVERWRITE_LEFT: { |
| 1488 // If the argument in rdx is already an object, we skip the |
| 1489 // allocation of a heap number. |
| 1490 __ JumpIfNotSmi(rdx, &skip_allocation); |
| 1491 // Allocate a heap number for the result. Keep eax and edx intact |
| 1492 // for the possible runtime call. |
| 1493 __ AllocateHeapNumber(rbx, rcx, alloc_failure); |
| 1494 // Now rdx can be overwritten losing one of the arguments as we are |
| 1495 // now done and will not need it any more. |
| 1496 __ movq(rdx, rbx); |
| 1497 __ bind(&skip_allocation); |
| 1498 // Use object in rdx as a result holder |
| 1499 __ movq(rax, rdx); |
| 1500 break; |
| 1501 } |
| 1502 case OVERWRITE_RIGHT: |
| 1503 // If the argument in rax is already an object, we skip the |
| 1504 // allocation of a heap number. |
| 1505 __ JumpIfNotSmi(rax, &skip_allocation); |
| 1506 // Fall through! |
| 1507 case NO_OVERWRITE: |
| 1508 // Allocate a heap number for the result. Keep rax and rdx intact |
| 1509 // for the possible runtime call. |
| 1510 __ AllocateHeapNumber(rbx, rcx, alloc_failure); |
| 1511 // Now rax can be overwritten losing one of the arguments as we are |
| 1512 // now done and will not need it any more. |
| 1513 __ movq(rax, rbx); |
| 1514 __ bind(&skip_allocation); |
| 1515 break; |
| 1516 default: UNREACHABLE(); |
| 1517 } |
| 1174 } | 1518 } |
| 1175 | 1519 |
| 1176 | 1520 |
| 1177 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { | 1521 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
| 1178 __ pop(rcx); | 1522 __ pop(rcx); |
| 1179 __ push(rdx); | 1523 __ push(rdx); |
| 1180 __ push(rax); | 1524 __ push(rax); |
| 1181 __ push(rcx); | 1525 __ push(rcx); |
| 1182 } | 1526 } |
| 1183 | 1527 |
| (...skipping 302 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1486 __ bind(&rax_is_smi); | 1830 __ bind(&rax_is_smi); |
| 1487 __ SmiToInteger32(rcx, rax); | 1831 __ SmiToInteger32(rcx, rax); |
| 1488 | 1832 |
| 1489 __ bind(&done); | 1833 __ bind(&done); |
| 1490 __ movl(rax, rdx); | 1834 __ movl(rax, rdx); |
| 1491 } | 1835 } |
| 1492 | 1836 |
| 1493 | 1837 |
| 1494 // Input: rdx, rax are the left and right objects of a bit op. | 1838 // Input: rdx, rax are the left and right objects of a bit op. |
| 1495 // Output: rax, rcx are left and right integers for a bit op. | 1839 // Output: rax, rcx are left and right integers for a bit op. |
| 1840 // Jump to conversion_failure: rdx and rax are unchanged. |
| 1496 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, | 1841 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, |
| 1497 Label* conversion_failure, | 1842 Label* conversion_failure, |
| 1498 Register heap_number_map) { | 1843 Register heap_number_map) { |
| 1499 // Check float operands. | 1844 // Check float operands. |
| 1500 Label arg1_is_object, check_undefined_arg1; | 1845 Label arg1_is_object, check_undefined_arg1; |
| 1501 Label arg2_is_object, check_undefined_arg2; | 1846 Label arg2_is_object, check_undefined_arg2; |
| 1502 Label load_arg2, done; | 1847 Label load_arg2, done; |
| 1503 | 1848 |
| 1504 __ JumpIfNotSmi(rdx, &arg1_is_object); | 1849 __ JumpIfNotSmi(rdx, &arg1_is_object); |
| 1505 __ SmiToInteger32(rdx, rdx); | 1850 __ SmiToInteger32(r8, rdx); |
| 1506 __ jmp(&load_arg2); | 1851 __ jmp(&load_arg2); |
| 1507 | 1852 |
| 1508 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). | 1853 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). |
| 1509 __ bind(&check_undefined_arg1); | 1854 __ bind(&check_undefined_arg1); |
| 1510 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); | 1855 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); |
| 1511 __ j(not_equal, conversion_failure); | 1856 __ j(not_equal, conversion_failure); |
| 1512 __ movl(rdx, Immediate(0)); | 1857 __ movl(r8, Immediate(0)); |
| 1513 __ jmp(&load_arg2); | 1858 __ jmp(&load_arg2); |
| 1514 | 1859 |
| 1515 __ bind(&arg1_is_object); | 1860 __ bind(&arg1_is_object); |
| 1516 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); | 1861 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); |
| 1517 __ j(not_equal, &check_undefined_arg1); | 1862 __ j(not_equal, &check_undefined_arg1); |
| 1518 // Get the untagged integer version of the edx heap number in rcx. | 1863 // Get the untagged integer version of the rdx heap number in rcx. |
| 1519 IntegerConvert(masm, rdx, rdx); | 1864 IntegerConvert(masm, r8, rdx); |
| 1520 | 1865 |
| 1521 // Here rdx has the untagged integer, rax has a Smi or a heap number. | 1866 // Here r8 has the untagged integer, rax has a Smi or a heap number. |
| 1522 __ bind(&load_arg2); | 1867 __ bind(&load_arg2); |
| 1523 // Test if arg2 is a Smi. | 1868 // Test if arg2 is a Smi. |
| 1524 __ JumpIfNotSmi(rax, &arg2_is_object); | 1869 __ JumpIfNotSmi(rax, &arg2_is_object); |
| 1525 __ SmiToInteger32(rax, rax); | 1870 __ SmiToInteger32(rcx, rax); |
| 1526 __ movl(rcx, rax); | |
| 1527 __ jmp(&done); | 1871 __ jmp(&done); |
| 1528 | 1872 |
| 1529 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). | 1873 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). |
| 1530 __ bind(&check_undefined_arg2); | 1874 __ bind(&check_undefined_arg2); |
| 1531 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); | 1875 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); |
| 1532 __ j(not_equal, conversion_failure); | 1876 __ j(not_equal, conversion_failure); |
| 1533 __ movl(rcx, Immediate(0)); | 1877 __ movl(rcx, Immediate(0)); |
| 1534 __ jmp(&done); | 1878 __ jmp(&done); |
| 1535 | 1879 |
| 1536 __ bind(&arg2_is_object); | 1880 __ bind(&arg2_is_object); |
| 1537 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); | 1881 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); |
| 1538 __ j(not_equal, &check_undefined_arg2); | 1882 __ j(not_equal, &check_undefined_arg2); |
| 1539 // Get the untagged integer version of the rax heap number in rcx. | 1883 // Get the untagged integer version of the rax heap number in rcx. |
| 1540 IntegerConvert(masm, rcx, rax); | 1884 IntegerConvert(masm, rcx, rax); |
| 1541 __ bind(&done); | 1885 __ bind(&done); |
| 1542 __ movl(rax, rdx); | 1886 __ movl(rax, r8); |
| 1543 } | 1887 } |
| 1544 | 1888 |
| 1545 | 1889 |
| 1546 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { | 1890 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { |
| 1547 __ SmiToInteger32(kScratchRegister, rdx); | 1891 __ SmiToInteger32(kScratchRegister, rdx); |
| 1548 __ cvtlsi2sd(xmm0, kScratchRegister); | 1892 __ cvtlsi2sd(xmm0, kScratchRegister); |
| 1549 __ SmiToInteger32(kScratchRegister, rax); | 1893 __ SmiToInteger32(kScratchRegister, rax); |
| 1550 __ cvtlsi2sd(xmm1, kScratchRegister); | 1894 __ cvtlsi2sd(xmm1, kScratchRegister); |
| 1551 } | 1895 } |
| 1552 | 1896 |
| (...skipping 309 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1862 // at compilation. | 2206 // at compilation. |
| 1863 #ifdef V8_INTERPRETED_REGEXP | 2207 #ifdef V8_INTERPRETED_REGEXP |
| 1864 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | 2208 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
| 1865 #else // V8_INTERPRETED_REGEXP | 2209 #else // V8_INTERPRETED_REGEXP |
| 1866 if (!FLAG_regexp_entry_native) { | 2210 if (!FLAG_regexp_entry_native) { |
| 1867 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | 2211 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
| 1868 return; | 2212 return; |
| 1869 } | 2213 } |
| 1870 | 2214 |
| 1871 // Stack frame on entry. | 2215 // Stack frame on entry. |
| 1872 // esp[0]: return address | 2216 // rsp[0]: return address |
| 1873 // esp[8]: last_match_info (expected JSArray) | 2217 // rsp[8]: last_match_info (expected JSArray) |
| 1874 // esp[16]: previous index | 2218 // rsp[16]: previous index |
| 1875 // esp[24]: subject string | 2219 // rsp[24]: subject string |
| 1876 // esp[32]: JSRegExp object | 2220 // rsp[32]: JSRegExp object |
| 1877 | 2221 |
| 1878 static const int kLastMatchInfoOffset = 1 * kPointerSize; | 2222 static const int kLastMatchInfoOffset = 1 * kPointerSize; |
| 1879 static const int kPreviousIndexOffset = 2 * kPointerSize; | 2223 static const int kPreviousIndexOffset = 2 * kPointerSize; |
| 1880 static const int kSubjectOffset = 3 * kPointerSize; | 2224 static const int kSubjectOffset = 3 * kPointerSize; |
| 1881 static const int kJSRegExpOffset = 4 * kPointerSize; | 2225 static const int kJSRegExpOffset = 4 * kPointerSize; |
| 1882 | 2226 |
| 1883 Label runtime; | 2227 Label runtime; |
| 1884 | 2228 |
| 1885 // Ensure that a RegExp stack is allocated. | 2229 // Ensure that a RegExp stack is allocated. |
| 1886 ExternalReference address_of_regexp_stack_memory_address = | 2230 ExternalReference address_of_regexp_stack_memory_address = |
| (...skipping 322 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2209 Label slowcase; | 2553 Label slowcase; |
| 2210 Label done; | 2554 Label done; |
| 2211 __ movq(r8, Operand(rsp, kPointerSize * 3)); | 2555 __ movq(r8, Operand(rsp, kPointerSize * 3)); |
| 2212 __ JumpIfNotSmi(r8, &slowcase); | 2556 __ JumpIfNotSmi(r8, &slowcase); |
| 2213 __ SmiToInteger32(rbx, r8); | 2557 __ SmiToInteger32(rbx, r8); |
| 2214 __ cmpl(rbx, Immediate(kMaxInlineLength)); | 2558 __ cmpl(rbx, Immediate(kMaxInlineLength)); |
| 2215 __ j(above, &slowcase); | 2559 __ j(above, &slowcase); |
| 2216 // Smi-tagging is equivalent to multiplying by 2. | 2560 // Smi-tagging is equivalent to multiplying by 2. |
| 2217 STATIC_ASSERT(kSmiTag == 0); | 2561 STATIC_ASSERT(kSmiTag == 0); |
| 2218 STATIC_ASSERT(kSmiTagSize == 1); | 2562 STATIC_ASSERT(kSmiTagSize == 1); |
| 2219 // Allocate RegExpResult followed by FixedArray with size in ebx. | 2563 // Allocate RegExpResult followed by FixedArray with size in rbx. |
| 2220 // JSArray: [Map][empty properties][Elements][Length-smi][index][input] | 2564 // JSArray: [Map][empty properties][Elements][Length-smi][index][input] |
| 2221 // Elements: [Map][Length][..elements..] | 2565 // Elements: [Map][Length][..elements..] |
| 2222 __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize, | 2566 __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize, |
| 2223 times_pointer_size, | 2567 times_pointer_size, |
| 2224 rbx, // In: Number of elements. | 2568 rbx, // In: Number of elements. |
| 2225 rax, // Out: Start of allocation (tagged). | 2569 rax, // Out: Start of allocation (tagged). |
| 2226 rcx, // Out: End of allocation. | 2570 rcx, // Out: End of allocation. |
| 2227 rdx, // Scratch register | 2571 rdx, // Scratch register |
| 2228 &slowcase, | 2572 &slowcase, |
| 2229 TAG_OBJECT); | 2573 TAG_OBJECT); |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2268 __ Move(rdx, FACTORY->the_hole_value()); | 2612 __ Move(rdx, FACTORY->the_hole_value()); |
| 2269 __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize)); | 2613 __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize)); |
| 2270 // Fill fixed array elements with hole. | 2614 // Fill fixed array elements with hole. |
| 2271 // rax: JSArray. | 2615 // rax: JSArray. |
| 2272 // rbx: Number of elements in array that remains to be filled, as int32. | 2616 // rbx: Number of elements in array that remains to be filled, as int32. |
| 2273 // rcx: Start of elements in FixedArray. | 2617 // rcx: Start of elements in FixedArray. |
| 2274 // rdx: the hole. | 2618 // rdx: the hole. |
| 2275 Label loop; | 2619 Label loop; |
| 2276 __ testl(rbx, rbx); | 2620 __ testl(rbx, rbx); |
| 2277 __ bind(&loop); | 2621 __ bind(&loop); |
| 2278 __ j(less_equal, &done); // Jump if ecx is negative or zero. | 2622 __ j(less_equal, &done); // Jump if rcx is negative or zero. |
| 2279 __ subl(rbx, Immediate(1)); | 2623 __ subl(rbx, Immediate(1)); |
| 2280 __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx); | 2624 __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx); |
| 2281 __ jmp(&loop); | 2625 __ jmp(&loop); |
| 2282 | 2626 |
| 2283 __ bind(&done); | 2627 __ bind(&done); |
| 2284 __ ret(3 * kPointerSize); | 2628 __ ret(3 * kPointerSize); |
| 2285 | 2629 |
| 2286 __ bind(&slowcase); | 2630 __ bind(&slowcase); |
| 2287 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); | 2631 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); |
| 2288 } | 2632 } |
| (...skipping 342 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2631 __ testb(FieldOperand(rbx, Map::kBitFieldOffset), | 2975 __ testb(FieldOperand(rbx, Map::kBitFieldOffset), |
| 2632 Immediate(1 << Map::kIsUndetectable)); | 2976 Immediate(1 << Map::kIsUndetectable)); |
| 2633 __ j(zero, &return_unequal); | 2977 __ j(zero, &return_unequal); |
| 2634 __ testb(FieldOperand(rcx, Map::kBitFieldOffset), | 2978 __ testb(FieldOperand(rcx, Map::kBitFieldOffset), |
| 2635 Immediate(1 << Map::kIsUndetectable)); | 2979 Immediate(1 << Map::kIsUndetectable)); |
| 2636 __ j(zero, &return_unequal); | 2980 __ j(zero, &return_unequal); |
| 2637 // The objects are both undetectable, so they both compare as the value | 2981 // The objects are both undetectable, so they both compare as the value |
| 2638 // undefined, and are equal. | 2982 // undefined, and are equal. |
| 2639 __ Set(rax, EQUAL); | 2983 __ Set(rax, EQUAL); |
| 2640 __ bind(&return_unequal); | 2984 __ bind(&return_unequal); |
| 2641 // Return non-equal by returning the non-zero object pointer in eax, | 2985 // Return non-equal by returning the non-zero object pointer in rax, |
| 2642 // or return equal if we fell through to here. | 2986 // or return equal if we fell through to here. |
| 2643 __ ret(0); | 2987 __ ret(0); |
| 2644 __ bind(¬_both_objects); | 2988 __ bind(¬_both_objects); |
| 2645 } | 2989 } |
| 2646 | 2990 |
| 2647 // Push arguments below the return address to prepare jump to builtin. | 2991 // Push arguments below the return address to prepare jump to builtin. |
| 2648 __ pop(rcx); | 2992 __ pop(rcx); |
| 2649 __ push(rdx); | 2993 __ push(rdx); |
| 2650 __ push(rax); | 2994 __ push(rax); |
| 2651 | 2995 |
| (...skipping 480 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3132 __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize)); | 3476 __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize)); |
| 3133 __ call(kScratchRegister); | 3477 __ call(kScratchRegister); |
| 3134 | 3478 |
| 3135 // Unlink this frame from the handler chain. | 3479 // Unlink this frame from the handler chain. |
| 3136 __ movq(kScratchRegister, ExternalReference(Isolate::k_handler_address)); | 3480 __ movq(kScratchRegister, ExternalReference(Isolate::k_handler_address)); |
| 3137 __ pop(Operand(kScratchRegister, 0)); | 3481 __ pop(Operand(kScratchRegister, 0)); |
| 3138 // Pop next_sp. | 3482 // Pop next_sp. |
| 3139 __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize)); | 3483 __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize)); |
| 3140 | 3484 |
| 3141 #ifdef ENABLE_LOGGING_AND_PROFILING | 3485 #ifdef ENABLE_LOGGING_AND_PROFILING |
| 3142 // If current EBP value is the same as js_entry_sp value, it means that | 3486 // If current RBP value is the same as js_entry_sp value, it means that |
| 3143 // the current function is the outermost. | 3487 // the current function is the outermost. |
| 3144 __ movq(kScratchRegister, js_entry_sp); | 3488 __ movq(kScratchRegister, js_entry_sp); |
| 3145 __ cmpq(rbp, Operand(kScratchRegister, 0)); | 3489 __ cmpq(rbp, Operand(kScratchRegister, 0)); |
| 3146 __ j(not_equal, ¬_outermost_js_2); | 3490 __ j(not_equal, ¬_outermost_js_2); |
| 3147 __ movq(Operand(kScratchRegister, 0), Immediate(0)); | 3491 __ movq(Operand(kScratchRegister, 0), Immediate(0)); |
| 3148 __ bind(¬_outermost_js_2); | 3492 __ bind(¬_outermost_js_2); |
| 3149 #endif | 3493 #endif |
| 3150 | 3494 |
| 3151 // Restore the top frame descriptor from the stack. | 3495 // Restore the top frame descriptor from the stack. |
| 3152 __ bind(&exit); | 3496 __ bind(&exit); |
| (...skipping 1134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4287 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | 4631 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
| 4288 } | 4632 } |
| 4289 | 4633 |
| 4290 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | 4634 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
| 4291 ASSERT(state_ == CompareIC::SMIS); | 4635 ASSERT(state_ == CompareIC::SMIS); |
| 4292 NearLabel miss; | 4636 NearLabel miss; |
| 4293 __ JumpIfNotBothSmi(rdx, rax, &miss); | 4637 __ JumpIfNotBothSmi(rdx, rax, &miss); |
| 4294 | 4638 |
| 4295 if (GetCondition() == equal) { | 4639 if (GetCondition() == equal) { |
| 4296 // For equality we do not care about the sign of the result. | 4640 // For equality we do not care about the sign of the result. |
| 4297 __ SmiSub(rax, rax, rdx); | 4641 __ subq(rax, rdx); |
| 4298 } else { | 4642 } else { |
| 4299 NearLabel done; | 4643 NearLabel done; |
| 4300 __ SmiSub(rdx, rdx, rax); | 4644 __ subq(rdx, rax); |
| 4301 __ j(no_overflow, &done); | 4645 __ j(no_overflow, &done); |
| 4302 // Correct sign of result in case of overflow. | 4646 // Correct sign of result in case of overflow. |
| 4303 __ SmiNot(rdx, rdx); | 4647 __ SmiNot(rdx, rdx); |
| 4304 __ bind(&done); | 4648 __ bind(&done); |
| 4305 __ movq(rax, rdx); | 4649 __ movq(rax, rdx); |
| 4306 } | 4650 } |
| 4307 __ ret(0); | 4651 __ ret(0); |
| 4308 | 4652 |
| 4309 __ bind(&miss); | 4653 __ bind(&miss); |
| 4310 GenerateMiss(masm); | 4654 GenerateMiss(masm); |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4396 // Restore registers. | 4740 // Restore registers. |
| 4397 __ pop(rcx); | 4741 __ pop(rcx); |
| 4398 __ pop(rax); | 4742 __ pop(rax); |
| 4399 __ pop(rdx); | 4743 __ pop(rdx); |
| 4400 __ push(rcx); | 4744 __ push(rcx); |
| 4401 | 4745 |
| 4402 // Do a tail call to the rewritten stub. | 4746 // Do a tail call to the rewritten stub. |
| 4403 __ jmp(rdi); | 4747 __ jmp(rdi); |
| 4404 } | 4748 } |
| 4405 | 4749 |
| 4750 |
| 4751 void GenerateFastPixelArrayLoad(MacroAssembler* masm, |
| 4752 Register receiver, |
| 4753 Register key, |
| 4754 Register elements, |
| 4755 Register untagged_key, |
| 4756 Register result, |
| 4757 Label* not_pixel_array, |
| 4758 Label* key_not_smi, |
| 4759 Label* out_of_range) { |
| 4760 // Register use: |
| 4761 // receiver - holds the receiver and is unchanged. |
| 4762 // key - holds the key and is unchanged (must be a smi). |
| 4763 // elements - is set to the the receiver's element if |
| 4764 // the receiver doesn't have a pixel array or the |
| 4765 // key is not a smi, otherwise it's the elements' |
| 4766 // external pointer. |
| 4767 // untagged_key - is set to the untagged key |
| 4768 |
| 4769 // Some callers already have verified that the key is a smi. key_not_smi is |
| 4770 // set to NULL as a sentinel for that case. Otherwise, add an explicit check |
| 4771 // to ensure the key is a smi must be added. |
| 4772 if (key_not_smi != NULL) { |
| 4773 __ JumpIfNotSmi(key, key_not_smi); |
| 4774 } else { |
| 4775 if (FLAG_debug_code) { |
| 4776 __ AbortIfNotSmi(key); |
| 4777 } |
| 4778 } |
| 4779 __ SmiToInteger32(untagged_key, key); |
| 4780 |
| 4781 // Verify that the receiver has pixel array elements. |
| 4782 __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset)); |
| 4783 __ CheckMap(elements, FACTORY->pixel_array_map(), not_pixel_array, true); |
| 4784 |
| 4785 // Check that the smi is in range. |
| 4786 __ cmpl(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset)); |
| 4787 __ j(above_equal, out_of_range); // unsigned check handles negative keys. |
| 4788 |
| 4789 // Load and tag the element as a smi. |
| 4790 __ movq(elements, FieldOperand(elements, PixelArray::kExternalPointerOffset)); |
| 4791 __ movzxbq(result, Operand(elements, untagged_key, times_1, 0)); |
| 4792 __ Integer32ToSmi(result, result); |
| 4793 __ ret(0); |
| 4794 } |
| 4795 |
| 4796 |
| 4406 #undef __ | 4797 #undef __ |
| 4407 | 4798 |
| 4408 } } // namespace v8::internal | 4799 } } // namespace v8::internal |
| 4409 | 4800 |
| 4410 #endif // V8_TARGET_ARCH_X64 | 4801 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |