OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1019 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1030 | 1030 |
1031 // Patch the caller to an appropriate specialized stub and return the | 1031 // Patch the caller to an appropriate specialized stub and return the |
1032 // operation result to the caller of the stub. | 1032 // operation result to the caller of the stub. |
1033 __ TailCallExternalReference( | 1033 __ TailCallExternalReference( |
1034 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)), | 1034 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)), |
1035 5, | 1035 5, |
1036 1); | 1036 1); |
1037 } | 1037 } |
1038 | 1038 |
1039 | 1039 |
1040 // Prepare for a type transition runtime call when the args are already on | |
1041 // the stack, under the return address. | |
1042 void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs( | |
1043 MacroAssembler* masm) { | |
1044 __ pop(rcx); // Save return address. | |
1045 // Left and right arguments are already on top of the stack. | |
1046 // Push this stub's key. Although the operation and the type info are | |
1047 // encoded into the key, the encoding is opaque, so push them too. | |
1048 __ Push(Smi::FromInt(MinorKey())); | |
1049 __ Push(Smi::FromInt(op_)); | |
1050 __ Push(Smi::FromInt(operands_type_)); | |
1051 | |
1052 __ push(rcx); // Push return address. | |
1053 | |
1054 // Patch the caller to an appropriate specialized stub and return the | |
1055 // operation result to the caller of the stub. | |
1056 __ TailCallExternalReference( | |
1057 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)), | |
1058 5, | |
1059 1); | |
1060 } | |
1061 | |
1062 | |
1063 void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) { | 1040 void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) { |
1064 switch (operands_type_) { | 1041 switch (operands_type_) { |
1065 case TRBinaryOpIC::UNINITIALIZED: | 1042 case TRBinaryOpIC::UNINITIALIZED: |
1066 GenerateTypeTransition(masm); | 1043 GenerateTypeTransition(masm); |
1067 break; | 1044 break; |
1068 case TRBinaryOpIC::SMI: | 1045 case TRBinaryOpIC::SMI: |
1069 GenerateSmiStub(masm); | 1046 GenerateSmiStub(masm); |
1070 break; | 1047 break; |
1071 case TRBinaryOpIC::INT32: | 1048 case TRBinaryOpIC::INT32: |
1072 GenerateInt32Stub(masm); | 1049 UNREACHABLE(); |
| 1050 // The int32 case is identical to the Smi case. We avoid creating this |
| 1051 // ic state on x64. |
1073 break; | 1052 break; |
1074 case TRBinaryOpIC::HEAP_NUMBER: | 1053 case TRBinaryOpIC::HEAP_NUMBER: |
1075 GenerateHeapNumberStub(masm); | 1054 GenerateHeapNumberStub(masm); |
1076 break; | 1055 break; |
1077 case TRBinaryOpIC::STRING: | 1056 case TRBinaryOpIC::STRING: |
1078 GenerateStringStub(masm); | 1057 GenerateStringStub(masm); |
1079 break; | 1058 break; |
1080 case TRBinaryOpIC::GENERIC: | 1059 case TRBinaryOpIC::GENERIC: |
1081 GenerateGeneric(masm); | 1060 GenerateGeneric(masm); |
1082 break; | 1061 break; |
(...skipping 22 matching lines...) Expand all Loading... |
1105 op_name, | 1084 op_name, |
1106 overwrite_name, | 1085 overwrite_name, |
1107 TRBinaryOpIC::GetName(operands_type_)); | 1086 TRBinaryOpIC::GetName(operands_type_)); |
1108 return name_; | 1087 return name_; |
1109 } | 1088 } |
1110 | 1089 |
1111 | 1090 |
1112 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, | 1091 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
1113 Label* slow, | 1092 Label* slow, |
1114 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { | 1093 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { |
1115 UNIMPLEMENTED(); | 1094 |
| 1095 // We only generate heapnumber answers for overflowing calculations |
| 1096 // for the four basic arithmetic operations. |
| 1097 bool generate_inline_heapnumber_results = |
| 1098 (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) && |
| 1099 (op_ == Token::ADD || op_ == Token::SUB || |
| 1100 op_ == Token::MUL || op_ == Token::DIV); |
| 1101 |
| 1102 // Arguments to TypeRecordingBinaryOpStub are in rdx and rax. |
| 1103 Register left = rdx; |
| 1104 Register right = rax; |
| 1105 |
| 1106 |
| 1107 // Smi check of both operands. If op is BIT_OR, the check is delayed |
| 1108 // until after the OR operation. |
| 1109 Label not_smis; |
| 1110 Label use_fp_on_smis; |
| 1111 Label restore_MOD_registers; // Only used if op_ == Token::MOD. |
| 1112 |
| 1113 if (op_ != Token::BIT_OR) { |
| 1114 Comment smi_check_comment(masm, "-- Smi check arguments"); |
| 1115 __ JumpIfNotBothSmi(left, right, ¬_smis); |
| 1116 } |
| 1117 |
| 1118 // Perform the operation. |
| 1119 Comment perform_smi(masm, "-- Perform smi operation"); |
| 1120 switch (op_) { |
| 1121 case Token::ADD: |
| 1122 ASSERT(right.is(rax)); |
| 1123 __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative. |
| 1124 break; |
| 1125 |
| 1126 case Token::SUB: |
| 1127 __ SmiSub(left, left, right, &use_fp_on_smis); |
| 1128 __ movq(rax, left); |
| 1129 break; |
| 1130 |
| 1131 case Token::MUL: |
| 1132 ASSERT(right.is(rax)); |
| 1133 __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative. |
| 1134 break; |
| 1135 |
| 1136 case Token::DIV: |
| 1137 // SmiDiv will not accept left in rdx or right in rax. |
| 1138 left = rcx; |
| 1139 right = rbx; |
| 1140 __ movq(rbx, rax); |
| 1141 __ movq(rcx, rdx); |
| 1142 __ SmiDiv(rax, left, right, &use_fp_on_smis); |
| 1143 break; |
| 1144 |
| 1145 case Token::MOD: |
| 1146 // SmiMod will not accept left in rdx or right in rax. |
| 1147 left = rcx; |
| 1148 right = rbx; |
| 1149 __ movq(rbx, rax); |
| 1150 __ movq(rcx, rdx); |
| 1151 __ SmiMod(rax, left, right, &use_fp_on_smis); |
| 1152 break; |
| 1153 |
| 1154 case Token::BIT_OR: { |
| 1155 ASSERT(right.is(rax)); |
| 1156 __ movq(rcx, right); // Save the right operand. |
| 1157 __ SmiOr(right, right, left); // BIT_OR is commutative. |
| 1158 __ JumpIfNotSmi(right, ¬_smis); // Test delayed until after BIT_OR. |
| 1159 break; |
| 1160 } |
| 1161 case Token::BIT_XOR: |
| 1162 ASSERT(right.is(rax)); |
| 1163 __ SmiXor(right, right, left); // BIT_XOR is commutative. |
| 1164 break; |
| 1165 |
| 1166 case Token::BIT_AND: |
| 1167 ASSERT(right.is(rax)); |
| 1168 __ SmiAnd(right, right, left); // BIT_AND is commutative. |
| 1169 break; |
| 1170 |
| 1171 case Token::SHL: |
| 1172 __ SmiShiftLeft(left, left, right); |
| 1173 __ movq(rax, left); |
| 1174 break; |
| 1175 |
| 1176 case Token::SAR: |
| 1177 __ SmiShiftArithmeticRight(left, left, right); |
| 1178 __ movq(rax, left); |
| 1179 break; |
| 1180 |
| 1181 case Token::SHR: |
| 1182 __ SmiShiftLogicalRight(left, left, right, ¬_smis); |
| 1183 __ movq(rax, left); |
| 1184 break; |
| 1185 |
| 1186 default: |
| 1187 UNREACHABLE(); |
| 1188 } |
| 1189 |
| 1190 // 5. Emit return of result in rax. Some operations have registers pushed. |
| 1191 __ ret(0); |
| 1192 |
| 1193 // 6. For some operations emit inline code to perform floating point |
| 1194 // operations on known smis (e.g., if the result of the operation |
| 1195 // overflowed the smi range). |
| 1196 __ bind(&use_fp_on_smis); |
| 1197 if (op_ == Token::DIV || op_ == Token::MOD) { |
| 1198 // Restore left and right to rdx and rax. |
| 1199 __ movq(rdx, rcx); |
| 1200 __ movq(rax, rbx); |
| 1201 } |
| 1202 |
| 1203 |
| 1204 if (generate_inline_heapnumber_results) { |
| 1205 __ AllocateHeapNumber(rcx, rbx, slow); |
| 1206 Comment perform_float(masm, "-- Perform float operation on smis"); |
| 1207 FloatingPointHelper::LoadSSE2SmiOperands(masm); |
| 1208 switch (op_) { |
| 1209 case Token::ADD: __ addsd(xmm0, xmm1); break; |
| 1210 case Token::SUB: __ subsd(xmm0, xmm1); break; |
| 1211 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
| 1212 case Token::DIV: __ divsd(xmm0, xmm1); break; |
| 1213 default: UNREACHABLE(); |
| 1214 } |
| 1215 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); |
| 1216 __ movq(rax, rcx); |
| 1217 __ ret(0); |
| 1218 } |
| 1219 |
| 1220 // 7. Non-smi operands reach the end of the code generated by |
| 1221 // GenerateSmiCode, and fall through to subsequent code, |
| 1222 // with the operands in rdx and rax. |
| 1223 Comment done_comment(masm, "-- Enter non-smi code"); |
| 1224 __ bind(¬_smis); |
| 1225 if (op_ == Token::BIT_OR) { |
| 1226 __ movq(right, rcx); |
| 1227 } |
1116 } | 1228 } |
1117 | 1229 |
1118 | 1230 |
1119 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 1231 void TypeRecordingBinaryOpStub::GenerateFloatingPointCode( |
1120 Label call_runtime; | 1232 MacroAssembler* masm, |
1121 | 1233 Label* allocation_failure, |
| 1234 Label* non_numeric_failure) { |
1122 switch (op_) { | 1235 switch (op_) { |
1123 case Token::ADD: | 1236 case Token::ADD: |
1124 case Token::SUB: | 1237 case Token::SUB: |
1125 case Token::MUL: | 1238 case Token::MUL: |
1126 case Token::DIV: | 1239 case Token::DIV: { |
1127 break; | 1240 FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure); |
1128 case Token::MOD: | 1241 |
| 1242 switch (op_) { |
| 1243 case Token::ADD: __ addsd(xmm0, xmm1); break; |
| 1244 case Token::SUB: __ subsd(xmm0, xmm1); break; |
| 1245 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
| 1246 case Token::DIV: __ divsd(xmm0, xmm1); break; |
| 1247 default: UNREACHABLE(); |
| 1248 } |
| 1249 GenerateHeapResultAllocation(masm, allocation_failure); |
| 1250 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
| 1251 __ ret(0); |
| 1252 break; |
| 1253 } |
| 1254 case Token::MOD: { |
| 1255 // For MOD we jump to the allocation_failure label, to call runtime. |
| 1256 __ jmp(allocation_failure); |
| 1257 break; |
| 1258 } |
1129 case Token::BIT_OR: | 1259 case Token::BIT_OR: |
1130 case Token::BIT_AND: | 1260 case Token::BIT_AND: |
1131 case Token::BIT_XOR: | 1261 case Token::BIT_XOR: |
1132 case Token::SAR: | 1262 case Token::SAR: |
1133 case Token::SHL: | 1263 case Token::SHL: |
| 1264 case Token::SHR: { |
| 1265 Label non_smi_shr_result; |
| 1266 Register heap_number_map = r9; |
| 1267 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 1268 FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, |
| 1269 heap_number_map); |
| 1270 switch (op_) { |
| 1271 case Token::BIT_OR: __ orl(rax, rcx); break; |
| 1272 case Token::BIT_AND: __ andl(rax, rcx); break; |
| 1273 case Token::BIT_XOR: __ xorl(rax, rcx); break; |
| 1274 case Token::SAR: __ sarl_cl(rax); break; |
| 1275 case Token::SHL: __ shll_cl(rax); break; |
| 1276 case Token::SHR: { |
| 1277 __ shrl_cl(rax); |
| 1278 // Check if result is negative. This can only happen for a shift |
| 1279 // by zero. |
| 1280 __ testl(rax, rax); |
| 1281 __ j(negative, &non_smi_shr_result); |
| 1282 break; |
| 1283 } |
| 1284 default: UNREACHABLE(); |
| 1285 } |
| 1286 STATIC_ASSERT(kSmiValueSize == 32); |
| 1287 // Tag smi result and return. |
| 1288 __ Integer32ToSmi(rax, rax); |
| 1289 __ Ret(); |
| 1290 |
| 1291 // Logical shift right can produce an unsigned int32 that is not |
| 1292 // an int32, and so is not in the smi range. Allocate a heap number |
| 1293 // in that case. |
| 1294 if (op_ == Token::SHR) { |
| 1295 __ bind(&non_smi_shr_result); |
| 1296 Label allocation_failed; |
| 1297 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). |
| 1298 // Allocate heap number in new space. |
| 1299 // Not using AllocateHeapNumber macro in order to reuse |
| 1300 // already loaded heap_number_map. |
| 1301 __ AllocateInNewSpace(HeapNumber::kSize, |
| 1302 rax, |
| 1303 rcx, |
| 1304 no_reg, |
| 1305 &allocation_failed, |
| 1306 TAG_OBJECT); |
| 1307 // Set the map. |
| 1308 if (FLAG_debug_code) { |
| 1309 __ AbortIfNotRootValue(heap_number_map, |
| 1310 Heap::kHeapNumberMapRootIndex, |
| 1311 "HeapNumberMap register clobbered."); |
| 1312 } |
| 1313 __ movq(FieldOperand(rax, HeapObject::kMapOffset), |
| 1314 heap_number_map); |
| 1315 __ cvtqsi2sd(xmm0, rbx); |
| 1316 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
| 1317 __ Ret(); |
| 1318 |
| 1319 __ bind(&allocation_failed); |
| 1320 // We need tagged values in rdx and rax for the following code, |
| 1321 // not int32 in rax and rcx. |
| 1322 __ Integer32ToSmi(rax, rcx); |
| 1323 __ Integer32ToSmi(rdx, rax); |
| 1324 __ jmp(allocation_failure); |
| 1325 } |
| 1326 break; |
| 1327 } |
| 1328 default: UNREACHABLE(); break; |
| 1329 } |
| 1330 // No fall-through from this generated code. |
| 1331 if (FLAG_debug_code) { |
| 1332 __ Abort("Unexpected fall-through in " |
| 1333 "TypeRecordingBinaryStub::GenerateFloatingPointCode."); |
| 1334 } |
| 1335 } |
| 1336 |
| 1337 |
| 1338 void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) { |
| 1339 GenerateRegisterArgsPush(masm); |
| 1340 // Registers containing left and right operands respectively. |
| 1341 Register lhs = rdx; |
| 1342 Register rhs = rax; |
| 1343 |
| 1344 // Test for string arguments before calling runtime. |
| 1345 Label not_strings, both_strings, not_string1, string1, string1_smi2; |
| 1346 |
| 1347 __ JumpIfNotString(lhs, r8, ¬_string1); |
| 1348 |
| 1349 // First argument is a a string, test second. |
| 1350 __ JumpIfSmi(rhs, &string1_smi2); |
| 1351 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9); |
| 1352 __ j(above_equal, &string1); |
| 1353 |
| 1354 // First and second argument are strings. |
| 1355 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); |
| 1356 __ TailCallStub(&string_add_stub); |
| 1357 |
| 1358 __ bind(&string1_smi2); |
| 1359 // First argument is a string, second is a smi. Try to lookup the number |
| 1360 // string for the smi in the number string cache. |
| 1361 NumberToStringStub::GenerateLookupNumberStringCache( |
| 1362 masm, rhs, rbx, rcx, r8, true, &string1); |
| 1363 |
| 1364 // Replace second argument on stack and tailcall string add stub to make |
| 1365 // the result. |
| 1366 __ movq(Operand(rsp, 1 * kPointerSize), rbx); |
| 1367 __ TailCallStub(&string_add_stub); |
| 1368 |
| 1369 // Only first argument is a string. |
| 1370 __ bind(&string1); |
| 1371 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION); |
| 1372 |
| 1373 // First argument was not a string, test second. |
| 1374 __ bind(¬_string1); |
| 1375 __ JumpIfNotString(rhs, rhs, ¬_strings); |
| 1376 |
| 1377 // Only second argument is a string. |
| 1378 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION); |
| 1379 |
| 1380 __ bind(¬_strings); |
| 1381 // Neither argument is a string. |
| 1382 // Pop arguments, because CallRuntimeCode wants to push them again. |
| 1383 __ pop(rcx); |
| 1384 __ pop(rax); |
| 1385 __ pop(rdx); |
| 1386 __ push(rcx); |
| 1387 } |
| 1388 |
| 1389 |
| 1390 void TypeRecordingBinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) { |
| 1391 GenerateRegisterArgsPush(masm); |
| 1392 switch (op_) { |
| 1393 case Token::ADD: |
| 1394 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); |
| 1395 break; |
| 1396 case Token::SUB: |
| 1397 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); |
| 1398 break; |
| 1399 case Token::MUL: |
| 1400 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); |
| 1401 break; |
| 1402 case Token::DIV: |
| 1403 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); |
| 1404 break; |
| 1405 case Token::MOD: |
| 1406 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); |
| 1407 break; |
| 1408 case Token::BIT_OR: |
| 1409 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); |
| 1410 break; |
| 1411 case Token::BIT_AND: |
| 1412 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); |
| 1413 break; |
| 1414 case Token::BIT_XOR: |
| 1415 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); |
| 1416 break; |
| 1417 case Token::SAR: |
| 1418 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); |
| 1419 break; |
| 1420 case Token::SHL: |
| 1421 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); |
| 1422 break; |
1134 case Token::SHR: | 1423 case Token::SHR: |
1135 GenerateRegisterArgsPush(masm); | 1424 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); |
1136 break; | 1425 break; |
1137 default: | 1426 default: |
1138 UNREACHABLE(); | 1427 UNREACHABLE(); |
1139 } | |
1140 | |
1141 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || | |
1142 result_type_ == TRBinaryOpIC::SMI) { | |
1143 GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS); | |
1144 } else { | |
1145 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); | |
1146 } | |
1147 __ bind(&call_runtime); | |
1148 switch (op_) { | |
1149 case Token::ADD: | |
1150 case Token::SUB: | |
1151 case Token::MUL: | |
1152 case Token::DIV: | |
1153 GenerateTypeTransition(masm); | |
1154 break; | |
1155 case Token::MOD: | |
1156 case Token::BIT_OR: | |
1157 case Token::BIT_AND: | |
1158 case Token::BIT_XOR: | |
1159 case Token::SAR: | |
1160 case Token::SHL: | |
1161 case Token::SHR: | |
1162 GenerateTypeTransitionWithSavedArgs(masm); | |
1163 break; | |
1164 default: | |
1165 UNREACHABLE(); | |
1166 } | 1428 } |
1167 } | 1429 } |
1168 | 1430 |
1169 | 1431 |
| 1432 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
| 1433 Label not_smi; |
| 1434 |
| 1435 GenerateSmiCode(masm, ¬_smi, NO_HEAPNUMBER_RESULTS); |
| 1436 |
| 1437 __ bind(¬_smi); |
| 1438 GenerateTypeTransition(masm); |
| 1439 } |
| 1440 |
| 1441 |
1170 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { | 1442 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { |
1171 UNIMPLEMENTED(); | 1443 ASSERT(op_ == Token::ADD); |
1172 } | 1444 GenerateStringAddCode(masm); |
1173 | 1445 |
1174 | 1446 GenerateTypeTransition(masm); |
1175 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | |
1176 UNIMPLEMENTED(); | |
1177 } | 1447 } |
1178 | 1448 |
1179 | 1449 |
1180 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { | 1450 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
1181 UNIMPLEMENTED(); | 1451 Label gc_required, not_number; |
| 1452 GenerateFloatingPointCode(masm, &gc_required, ¬_number); |
| 1453 |
| 1454 __ bind(¬_number); |
| 1455 GenerateTypeTransition(masm); |
| 1456 |
| 1457 __ bind(&gc_required); |
| 1458 GenerateCallRuntimeCode(masm); |
1182 } | 1459 } |
1183 | 1460 |
1184 | 1461 |
1185 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 1462 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
1186 UNIMPLEMENTED(); | 1463 Label call_runtime, call_string_add_or_runtime; |
| 1464 |
| 1465 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); |
| 1466 |
| 1467 GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime); |
| 1468 |
| 1469 __ bind(&call_string_add_or_runtime); |
| 1470 if (op_ == Token::ADD) { |
| 1471 GenerateStringAddCode(masm); |
| 1472 } |
| 1473 |
| 1474 __ bind(&call_runtime); |
| 1475 GenerateCallRuntimeCode(masm); |
1187 } | 1476 } |
1188 | 1477 |
1189 | 1478 |
1190 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( | 1479 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( |
1191 MacroAssembler* masm, | 1480 MacroAssembler* masm, |
1192 Label* alloc_failure) { | 1481 Label* alloc_failure) { |
1193 UNIMPLEMENTED(); | 1482 Label skip_allocation; |
| 1483 OverwriteMode mode = mode_; |
| 1484 switch (mode) { |
| 1485 case OVERWRITE_LEFT: { |
| 1486 // If the argument in rdx is already an object, we skip the |
| 1487 // allocation of a heap number. |
| 1488 __ JumpIfNotSmi(rdx, &skip_allocation); |
| 1489 // Allocate a heap number for the result. Keep eax and edx intact |
| 1490 // for the possible runtime call. |
| 1491 __ AllocateHeapNumber(rbx, rcx, alloc_failure); |
| 1492 // Now rdx can be overwritten losing one of the arguments as we are |
| 1493 // now done and will not need it any more. |
| 1494 __ movq(rdx, rbx); |
| 1495 __ bind(&skip_allocation); |
| 1496 // Use object in rdx as a result holder |
| 1497 __ movq(rax, rdx); |
| 1498 break; |
| 1499 } |
| 1500 case OVERWRITE_RIGHT: |
| 1501 // If the argument in rax is already an object, we skip the |
| 1502 // allocation of a heap number. |
| 1503 __ JumpIfNotSmi(rax, &skip_allocation); |
| 1504 // Fall through! |
| 1505 case NO_OVERWRITE: |
| 1506 // Allocate a heap number for the result. Keep rax and rdx intact |
| 1507 // for the possible runtime call. |
| 1508 __ AllocateHeapNumber(rbx, rcx, alloc_failure); |
| 1509 // Now rax can be overwritten losing one of the arguments as we are |
| 1510 // now done and will not need it any more. |
| 1511 __ movq(rax, rbx); |
| 1512 __ bind(&skip_allocation); |
| 1513 break; |
| 1514 default: UNREACHABLE(); |
| 1515 } |
1194 } | 1516 } |
1195 | 1517 |
1196 | 1518 |
1197 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { | 1519 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
1198 __ pop(rcx); | 1520 __ pop(rcx); |
1199 __ push(rdx); | 1521 __ push(rdx); |
1200 __ push(rax); | 1522 __ push(rax); |
1201 __ push(rcx); | 1523 __ push(rcx); |
1202 } | 1524 } |
1203 | 1525 |
(...skipping 301 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1505 __ bind(&rax_is_smi); | 1827 __ bind(&rax_is_smi); |
1506 __ SmiToInteger32(rcx, rax); | 1828 __ SmiToInteger32(rcx, rax); |
1507 | 1829 |
1508 __ bind(&done); | 1830 __ bind(&done); |
1509 __ movl(rax, rdx); | 1831 __ movl(rax, rdx); |
1510 } | 1832 } |
1511 | 1833 |
1512 | 1834 |
1513 // Input: rdx, rax are the left and right objects of a bit op. | 1835 // Input: rdx, rax are the left and right objects of a bit op. |
1514 // Output: rax, rcx are left and right integers for a bit op. | 1836 // Output: rax, rcx are left and right integers for a bit op. |
| 1837 // Jump to conversion_failure: rdx and rax are unchanged. |
1515 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, | 1838 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, |
1516 Label* conversion_failure, | 1839 Label* conversion_failure, |
1517 Register heap_number_map) { | 1840 Register heap_number_map) { |
1518 // Check float operands. | 1841 // Check float operands. |
1519 Label arg1_is_object, check_undefined_arg1; | 1842 Label arg1_is_object, check_undefined_arg1; |
1520 Label arg2_is_object, check_undefined_arg2; | 1843 Label arg2_is_object, check_undefined_arg2; |
1521 Label load_arg2, done; | 1844 Label load_arg2, done; |
1522 | 1845 |
1523 __ JumpIfNotSmi(rdx, &arg1_is_object); | 1846 __ JumpIfNotSmi(rdx, &arg1_is_object); |
1524 __ SmiToInteger32(rdx, rdx); | 1847 __ SmiToInteger32(r8, rdx); |
1525 __ jmp(&load_arg2); | 1848 __ jmp(&load_arg2); |
1526 | 1849 |
1527 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). | 1850 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). |
1528 __ bind(&check_undefined_arg1); | 1851 __ bind(&check_undefined_arg1); |
1529 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); | 1852 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); |
1530 __ j(not_equal, conversion_failure); | 1853 __ j(not_equal, conversion_failure); |
1531 __ movl(rdx, Immediate(0)); | 1854 __ movl(r8, Immediate(0)); |
1532 __ jmp(&load_arg2); | 1855 __ jmp(&load_arg2); |
1533 | 1856 |
1534 __ bind(&arg1_is_object); | 1857 __ bind(&arg1_is_object); |
1535 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); | 1858 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); |
1536 __ j(not_equal, &check_undefined_arg1); | 1859 __ j(not_equal, &check_undefined_arg1); |
1537 // Get the untagged integer version of the edx heap number in rcx. | 1860 // Get the untagged integer version of the rdx heap number in rcx. |
1538 IntegerConvert(masm, rdx, rdx); | 1861 IntegerConvert(masm, r8, rdx); |
1539 | 1862 |
1540 // Here rdx has the untagged integer, rax has a Smi or a heap number. | 1863 // Here r8 has the untagged integer, rax has a Smi or a heap number. |
1541 __ bind(&load_arg2); | 1864 __ bind(&load_arg2); |
1542 // Test if arg2 is a Smi. | 1865 // Test if arg2 is a Smi. |
1543 __ JumpIfNotSmi(rax, &arg2_is_object); | 1866 __ JumpIfNotSmi(rax, &arg2_is_object); |
1544 __ SmiToInteger32(rax, rax); | 1867 __ SmiToInteger32(rcx, rax); |
1545 __ movl(rcx, rax); | |
1546 __ jmp(&done); | 1868 __ jmp(&done); |
1547 | 1869 |
1548 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). | 1870 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). |
1549 __ bind(&check_undefined_arg2); | 1871 __ bind(&check_undefined_arg2); |
1550 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); | 1872 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); |
1551 __ j(not_equal, conversion_failure); | 1873 __ j(not_equal, conversion_failure); |
1552 __ movl(rcx, Immediate(0)); | 1874 __ movl(rcx, Immediate(0)); |
1553 __ jmp(&done); | 1875 __ jmp(&done); |
1554 | 1876 |
1555 __ bind(&arg2_is_object); | 1877 __ bind(&arg2_is_object); |
1556 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); | 1878 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); |
1557 __ j(not_equal, &check_undefined_arg2); | 1879 __ j(not_equal, &check_undefined_arg2); |
1558 // Get the untagged integer version of the rax heap number in rcx. | 1880 // Get the untagged integer version of the rax heap number in rcx. |
1559 IntegerConvert(masm, rcx, rax); | 1881 IntegerConvert(masm, rcx, rax); |
1560 __ bind(&done); | 1882 __ bind(&done); |
1561 __ movl(rax, rdx); | 1883 __ movl(rax, r8); |
1562 } | 1884 } |
1563 | 1885 |
1564 | 1886 |
1565 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { | 1887 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { |
1566 __ SmiToInteger32(kScratchRegister, rdx); | 1888 __ SmiToInteger32(kScratchRegister, rdx); |
1567 __ cvtlsi2sd(xmm0, kScratchRegister); | 1889 __ cvtlsi2sd(xmm0, kScratchRegister); |
1568 __ SmiToInteger32(kScratchRegister, rax); | 1890 __ SmiToInteger32(kScratchRegister, rax); |
1569 __ cvtlsi2sd(xmm1, kScratchRegister); | 1891 __ cvtlsi2sd(xmm1, kScratchRegister); |
1570 } | 1892 } |
1571 | 1893 |
(...skipping 309 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1881 // at compilation. | 2203 // at compilation. |
1882 #ifdef V8_INTERPRETED_REGEXP | 2204 #ifdef V8_INTERPRETED_REGEXP |
1883 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | 2205 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
1884 #else // V8_INTERPRETED_REGEXP | 2206 #else // V8_INTERPRETED_REGEXP |
1885 if (!FLAG_regexp_entry_native) { | 2207 if (!FLAG_regexp_entry_native) { |
1886 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | 2208 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
1887 return; | 2209 return; |
1888 } | 2210 } |
1889 | 2211 |
1890 // Stack frame on entry. | 2212 // Stack frame on entry. |
1891 // esp[0]: return address | 2213 // rsp[0]: return address |
1892 // esp[8]: last_match_info (expected JSArray) | 2214 // rsp[8]: last_match_info (expected JSArray) |
1893 // esp[16]: previous index | 2215 // rsp[16]: previous index |
1894 // esp[24]: subject string | 2216 // rsp[24]: subject string |
1895 // esp[32]: JSRegExp object | 2217 // rsp[32]: JSRegExp object |
1896 | 2218 |
1897 static const int kLastMatchInfoOffset = 1 * kPointerSize; | 2219 static const int kLastMatchInfoOffset = 1 * kPointerSize; |
1898 static const int kPreviousIndexOffset = 2 * kPointerSize; | 2220 static const int kPreviousIndexOffset = 2 * kPointerSize; |
1899 static const int kSubjectOffset = 3 * kPointerSize; | 2221 static const int kSubjectOffset = 3 * kPointerSize; |
1900 static const int kJSRegExpOffset = 4 * kPointerSize; | 2222 static const int kJSRegExpOffset = 4 * kPointerSize; |
1901 | 2223 |
1902 Label runtime; | 2224 Label runtime; |
1903 | 2225 |
1904 // Ensure that a RegExp stack is allocated. | 2226 // Ensure that a RegExp stack is allocated. |
1905 ExternalReference address_of_regexp_stack_memory_address = | 2227 ExternalReference address_of_regexp_stack_memory_address = |
(...skipping 321 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2227 Label slowcase; | 2549 Label slowcase; |
2228 Label done; | 2550 Label done; |
2229 __ movq(r8, Operand(rsp, kPointerSize * 3)); | 2551 __ movq(r8, Operand(rsp, kPointerSize * 3)); |
2230 __ JumpIfNotSmi(r8, &slowcase); | 2552 __ JumpIfNotSmi(r8, &slowcase); |
2231 __ SmiToInteger32(rbx, r8); | 2553 __ SmiToInteger32(rbx, r8); |
2232 __ cmpl(rbx, Immediate(kMaxInlineLength)); | 2554 __ cmpl(rbx, Immediate(kMaxInlineLength)); |
2233 __ j(above, &slowcase); | 2555 __ j(above, &slowcase); |
2234 // Smi-tagging is equivalent to multiplying by 2. | 2556 // Smi-tagging is equivalent to multiplying by 2. |
2235 STATIC_ASSERT(kSmiTag == 0); | 2557 STATIC_ASSERT(kSmiTag == 0); |
2236 STATIC_ASSERT(kSmiTagSize == 1); | 2558 STATIC_ASSERT(kSmiTagSize == 1); |
2237 // Allocate RegExpResult followed by FixedArray with size in ebx. | 2559 // Allocate RegExpResult followed by FixedArray with size in rbx. |
2238 // JSArray: [Map][empty properties][Elements][Length-smi][index][input] | 2560 // JSArray: [Map][empty properties][Elements][Length-smi][index][input] |
2239 // Elements: [Map][Length][..elements..] | 2561 // Elements: [Map][Length][..elements..] |
2240 __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize, | 2562 __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize, |
2241 times_pointer_size, | 2563 times_pointer_size, |
2242 rbx, // In: Number of elements. | 2564 rbx, // In: Number of elements. |
2243 rax, // Out: Start of allocation (tagged). | 2565 rax, // Out: Start of allocation (tagged). |
2244 rcx, // Out: End of allocation. | 2566 rcx, // Out: End of allocation. |
2245 rdx, // Scratch register | 2567 rdx, // Scratch register |
2246 &slowcase, | 2568 &slowcase, |
2247 TAG_OBJECT); | 2569 TAG_OBJECT); |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2286 __ Move(rdx, Factory::the_hole_value()); | 2608 __ Move(rdx, Factory::the_hole_value()); |
2287 __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize)); | 2609 __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize)); |
2288 // Fill fixed array elements with hole. | 2610 // Fill fixed array elements with hole. |
2289 // rax: JSArray. | 2611 // rax: JSArray. |
2290 // rbx: Number of elements in array that remains to be filled, as int32. | 2612 // rbx: Number of elements in array that remains to be filled, as int32. |
2291 // rcx: Start of elements in FixedArray. | 2613 // rcx: Start of elements in FixedArray. |
2292 // rdx: the hole. | 2614 // rdx: the hole. |
2293 Label loop; | 2615 Label loop; |
2294 __ testl(rbx, rbx); | 2616 __ testl(rbx, rbx); |
2295 __ bind(&loop); | 2617 __ bind(&loop); |
2296 __ j(less_equal, &done); // Jump if ecx is negative or zero. | 2618 __ j(less_equal, &done); // Jump if rcx is negative or zero. |
2297 __ subl(rbx, Immediate(1)); | 2619 __ subl(rbx, Immediate(1)); |
2298 __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx); | 2620 __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx); |
2299 __ jmp(&loop); | 2621 __ jmp(&loop); |
2300 | 2622 |
2301 __ bind(&done); | 2623 __ bind(&done); |
2302 __ ret(3 * kPointerSize); | 2624 __ ret(3 * kPointerSize); |
2303 | 2625 |
2304 __ bind(&slowcase); | 2626 __ bind(&slowcase); |
2305 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); | 2627 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); |
2306 } | 2628 } |
(...skipping 342 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2649 __ testb(FieldOperand(rbx, Map::kBitFieldOffset), | 2971 __ testb(FieldOperand(rbx, Map::kBitFieldOffset), |
2650 Immediate(1 << Map::kIsUndetectable)); | 2972 Immediate(1 << Map::kIsUndetectable)); |
2651 __ j(zero, &return_unequal); | 2973 __ j(zero, &return_unequal); |
2652 __ testb(FieldOperand(rcx, Map::kBitFieldOffset), | 2974 __ testb(FieldOperand(rcx, Map::kBitFieldOffset), |
2653 Immediate(1 << Map::kIsUndetectable)); | 2975 Immediate(1 << Map::kIsUndetectable)); |
2654 __ j(zero, &return_unequal); | 2976 __ j(zero, &return_unequal); |
2655 // The objects are both undetectable, so they both compare as the value | 2977 // The objects are both undetectable, so they both compare as the value |
2656 // undefined, and are equal. | 2978 // undefined, and are equal. |
2657 __ Set(rax, EQUAL); | 2979 __ Set(rax, EQUAL); |
2658 __ bind(&return_unequal); | 2980 __ bind(&return_unequal); |
2659 // Return non-equal by returning the non-zero object pointer in eax, | 2981 // Return non-equal by returning the non-zero object pointer in rax, |
2660 // or return equal if we fell through to here. | 2982 // or return equal if we fell through to here. |
2661 __ ret(0); | 2983 __ ret(0); |
2662 __ bind(¬_both_objects); | 2984 __ bind(¬_both_objects); |
2663 } | 2985 } |
2664 | 2986 |
2665 // Push arguments below the return address to prepare jump to builtin. | 2987 // Push arguments below the return address to prepare jump to builtin. |
2666 __ pop(rcx); | 2988 __ pop(rcx); |
2667 __ push(rdx); | 2989 __ push(rdx); |
2668 __ push(rax); | 2990 __ push(rax); |
2669 | 2991 |
(...skipping 474 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3144 __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize)); | 3466 __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize)); |
3145 __ call(kScratchRegister); | 3467 __ call(kScratchRegister); |
3146 | 3468 |
3147 // Unlink this frame from the handler chain. | 3469 // Unlink this frame from the handler chain. |
3148 __ movq(kScratchRegister, ExternalReference(Top::k_handler_address)); | 3470 __ movq(kScratchRegister, ExternalReference(Top::k_handler_address)); |
3149 __ pop(Operand(kScratchRegister, 0)); | 3471 __ pop(Operand(kScratchRegister, 0)); |
3150 // Pop next_sp. | 3472 // Pop next_sp. |
3151 __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize)); | 3473 __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize)); |
3152 | 3474 |
3153 #ifdef ENABLE_LOGGING_AND_PROFILING | 3475 #ifdef ENABLE_LOGGING_AND_PROFILING |
3154 // If current EBP value is the same as js_entry_sp value, it means that | 3476 // If current RBP value is the same as js_entry_sp value, it means that |
3155 // the current function is the outermost. | 3477 // the current function is the outermost. |
3156 __ movq(kScratchRegister, js_entry_sp); | 3478 __ movq(kScratchRegister, js_entry_sp); |
3157 __ cmpq(rbp, Operand(kScratchRegister, 0)); | 3479 __ cmpq(rbp, Operand(kScratchRegister, 0)); |
3158 __ j(not_equal, ¬_outermost_js_2); | 3480 __ j(not_equal, ¬_outermost_js_2); |
3159 __ movq(Operand(kScratchRegister, 0), Immediate(0)); | 3481 __ movq(Operand(kScratchRegister, 0), Immediate(0)); |
3160 __ bind(¬_outermost_js_2); | 3482 __ bind(¬_outermost_js_2); |
3161 #endif | 3483 #endif |
3162 | 3484 |
3163 // Restore the top frame descriptor from the stack. | 3485 // Restore the top frame descriptor from the stack. |
3164 __ bind(&exit); | 3486 __ bind(&exit); |
(...skipping 1294 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4459 __ Integer32ToSmi(result, result); | 4781 __ Integer32ToSmi(result, result); |
4460 __ ret(0); | 4782 __ ret(0); |
4461 } | 4783 } |
4462 | 4784 |
4463 | 4785 |
4464 #undef __ | 4786 #undef __ |
4465 | 4787 |
4466 } } // namespace v8::internal | 4788 } } // namespace v8::internal |
4467 | 4789 |
4468 #endif // V8_TARGET_ARCH_X64 | 4790 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |