Chromium Code Reviews

Side by Side Diff: src/x64/code-stubs-x64.cc

Issue 6366028: X64 Crankshaft: Add TypeRecordingBinaryStub to X64 (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments.
Jump to:
View unified diff | | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1019 matching lines...)
1030 1030
1031 // Patch the caller to an appropriate specialized stub and return the 1031 // Patch the caller to an appropriate specialized stub and return the
1032 // operation result to the caller of the stub. 1032 // operation result to the caller of the stub.
1033 __ TailCallExternalReference( 1033 __ TailCallExternalReference(
1034 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)), 1034 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
1035 5, 1035 5,
1036 1); 1036 1);
1037 } 1037 }
1038 1038
1039 1039
1040 // Prepare for a type transition runtime call when the args are already on
1041 // the stack, under the return address.
1042 void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
1043 MacroAssembler* masm) {
1044 __ pop(rcx); // Save return address.
1045 // Left and right arguments are already on top of the stack.
1046 // Push this stub's key. Although the operation and the type info are
1047 // encoded into the key, the encoding is opaque, so push them too.
1048 __ Push(Smi::FromInt(MinorKey()));
1049 __ Push(Smi::FromInt(op_));
1050 __ Push(Smi::FromInt(operands_type_));
1051
1052 __ push(rcx); // Push return address.
1053
1054 // Patch the caller to an appropriate specialized stub and return the
1055 // operation result to the caller of the stub.
1056 __ TailCallExternalReference(
1057 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
1058 5,
1059 1);
1060 }
1061
1062
1063 void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) { 1040 void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
1064 switch (operands_type_) { 1041 switch (operands_type_) {
1065 case TRBinaryOpIC::UNINITIALIZED: 1042 case TRBinaryOpIC::UNINITIALIZED:
1066 GenerateTypeTransition(masm); 1043 GenerateTypeTransition(masm);
1067 break; 1044 break;
1068 case TRBinaryOpIC::SMI: 1045 case TRBinaryOpIC::SMI:
1069 GenerateSmiStub(masm); 1046 GenerateSmiStub(masm);
1070 break; 1047 break;
1071 case TRBinaryOpIC::INT32: 1048 case TRBinaryOpIC::INT32:
1072 GenerateInt32Stub(masm); 1049 UNREACHABLE();
1050 // The int32 case is identical to the Smi case. We avoid creating this
1051 // ic state on x64.
1073 break; 1052 break;
1074 case TRBinaryOpIC::HEAP_NUMBER: 1053 case TRBinaryOpIC::HEAP_NUMBER:
1075 GenerateHeapNumberStub(masm); 1054 GenerateHeapNumberStub(masm);
1076 break; 1055 break;
1077 case TRBinaryOpIC::STRING: 1056 case TRBinaryOpIC::STRING:
1078 GenerateStringStub(masm); 1057 GenerateStringStub(masm);
1079 break; 1058 break;
1080 case TRBinaryOpIC::GENERIC: 1059 case TRBinaryOpIC::GENERIC:
1081 GenerateGeneric(masm); 1060 GenerateGeneric(masm);
1082 break; 1061 break;
(...skipping 22 matching lines...)
1105 op_name, 1084 op_name,
1106 overwrite_name, 1085 overwrite_name,
1107 TRBinaryOpIC::GetName(operands_type_)); 1086 TRBinaryOpIC::GetName(operands_type_));
1108 return name_; 1087 return name_;
1109 } 1088 }
1110 1089
1111 1090
1112 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, 1091 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
1113 Label* slow, 1092 Label* slow,
1114 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { 1093 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
1115 UNIMPLEMENTED(); 1094
1095 // We only generate heapnumber answers for overflowing calculations
1096 // for the four basic arithmetic operations.
1097 bool generate_inline_heapnumber_results =
1098 (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
1099 (op_ == Token::ADD || op_ == Token::SUB ||
1100 op_ == Token::MUL || op_ == Token::DIV);
1101
1102 // Arguments to TypeRecordingBinaryOpStub are in rdx and rax.
1103 Register left = rdx;
1104 Register right = rax;
1105
1106
1107 // Smi check of both operands. If op is BIT_OR, the check is delayed
1108 // until after the OR operation.
1109 Label not_smis;
1110 Label use_fp_on_smis;
1111 Label restore_MOD_registers; // Only used if op_ == Token::MOD.
1112
1113 if (op_ != Token::BIT_OR) {
1114 Comment smi_check_comment(masm, "-- Smi check arguments");
1115 __ JumpIfNotBothSmi(left, right, &not_smis);
1116 }
1117
1118 // Perform the operation.
1119 Comment perform_smi(masm, "-- Perform smi operation");
1120 switch (op_) {
1121 case Token::ADD:
1122 ASSERT(right.is(rax));
1123 __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
1124 break;
1125
1126 case Token::SUB:
1127 __ SmiSub(left, left, right, &use_fp_on_smis);
1128 __ movq(rax, left);
1129 break;
1130
1131 case Token::MUL:
1132 ASSERT(right.is(rax));
1133 __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
1134 break;
1135
1136 case Token::DIV:
1137 // SmiDiv will not accept left in rdx or right in rax.
1138 left = rcx;
1139 right = rbx;
1140 __ movq(rbx, rax);
1141 __ movq(rcx, rdx);
1142 __ SmiDiv(rax, left, right, &use_fp_on_smis);
1143 break;
1144
1145 case Token::MOD:
1146 // SmiMod will not accept left in rdx or right in rax.
1147 left = rcx;
1148 right = rbx;
1149 __ movq(rbx, rax);
1150 __ movq(rcx, rdx);
1151 __ SmiMod(rax, left, right, &restore_MOD_registers);
Søren Thygesen Gjesse 2011/02/03 08:00:43 Do you need the label "restore_MOD_registers"? It
William Hesse 2011/02/04 14:37:25 No. Eliminated. On 2011/02/03 08:00:43, Søren Gj
1152 break;
1153
1154 case Token::BIT_OR: {
1155 ASSERT(right.is(rax));
1156 __ movq(rcx, right); // Save the right operand.
1157 __ SmiOr(right, right, left); // BIT_OR is commutative.
1158 __ JumpIfNotSmi(right, &not_smis); // Test delayed until after BIT_OR.
1159 break;
1160 }
1161 case Token::BIT_XOR:
1162 ASSERT(right.is(rax));
1163 __ SmiXor(right, right, left); // BIT_XOR is commutative.
1164 break;
1165
1166 case Token::BIT_AND:
1167 ASSERT(right.is(rax));
1168 __ SmiAnd(right, right, left); // BIT_AND is commutative.
1169 break;
1170
1171 case Token::SHL:
1172 __ SmiShiftLeft(left, left, right);
1173 __ movq(rax, left);
1174 break;
1175
1176 case Token::SAR:
1177 __ SmiShiftArithmeticRight(left, left, right);
1178 __ movq(rax, left);
1179 break;
1180
1181 case Token::SHR:
1182 __ SmiShiftLogicalRight(left, left, right, &not_smis);
1183 __ movq(rax, left);
1184 break;
1185
1186 default:
1187 UNREACHABLE();
1188 }
1189
1190 // 5. Emit return of result in rax. Some operations have registers pushed.
1191 __ ret(0);
1192
1193 // 6. For some operations emit inline code to perform floating point
1194 // operations on known smis (e.g., if the result of the operation
1195 // overflowed the smi range).
1196 __ bind(&use_fp_on_smis);
1197 __ bind(&restore_MOD_registers);
1198 if (op_ == Token::DIV || op_ == Token::MOD) {
1199 // Restore left and right to rdx and rax.
1200 __ movq(rdx, rcx);
1201 __ movq(rax, rbx);
1202 }
1203
1204
1205 if (generate_inline_heapnumber_results) {
Søren Thygesen Gjesse 2011/02/03 08:00:43 Can't you take mode_ into account here and reuse a
William Hesse 2011/02/04 14:37:25 We only reach here if the inputs were both Smis.
1206 __ AllocateHeapNumber(rcx, rbx, slow);
1207 Comment perform_float(masm, "-- Perform float operation on smis");
1208 FloatingPointHelper::LoadSSE2SmiOperands(masm);
1209 switch (op_) {
1210 case Token::ADD: __ addsd(xmm0, xmm1); break;
1211 case Token::SUB: __ subsd(xmm0, xmm1); break;
1212 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1213 case Token::DIV: __ divsd(xmm0, xmm1); break;
1214 default: UNREACHABLE();
1215 }
1216 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
1217 __ movq(rax, rcx);
1218 __ ret(0);
1219 }
1220
1221 // 7. Non-smi operands reach the end of the code generated by
1222 // GenerateSmiCode, and fall through to subsequent code,
1223 // with the operands in rdx and rax.
1224 Comment done_comment(masm, "-- Enter non-smi code");
1225 __ bind(&not_smis);
1226 if (op_ == Token::BIT_OR) {
Søren Thygesen Gjesse 2011/02/03 08:00:43 rax -> right?
William Hesse 2011/02/04 14:37:25 Done.
1227 __ movq(rax, rcx);
1228 }
1116 } 1229 }
1117 1230
1118 1231
1119 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { 1232 void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
1120 Label call_runtime; 1233 MacroAssembler* masm,
1121 1234 Label* allocation_failure,
1235 Label* non_numeric_failure) {
1122 switch (op_) { 1236 switch (op_) {
1123 case Token::ADD: 1237 case Token::ADD:
1124 case Token::SUB: 1238 case Token::SUB:
1125 case Token::MUL: 1239 case Token::MUL:
1126 case Token::DIV: 1240 case Token::DIV: {
1127 break; 1241 FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
1128 case Token::MOD: 1242
1243 switch (op_) {
1244 case Token::ADD: __ addsd(xmm0, xmm1); break;
1245 case Token::SUB: __ subsd(xmm0, xmm1); break;
1246 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1247 case Token::DIV: __ divsd(xmm0, xmm1); break;
1248 default: UNREACHABLE();
1249 }
1250 GenerateHeapResultAllocation(masm, allocation_failure);
1251 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
1252 __ ret(0);
1253 break;
1254 }
1255 case Token::MOD: {
1256 // For MOD we jump to the allocation_failure label, to call runtime.
1257 __ jmp(allocation_failure);
1258 break;
1259 }
1129 case Token::BIT_OR: 1260 case Token::BIT_OR:
1130 case Token::BIT_AND: 1261 case Token::BIT_AND:
1131 case Token::BIT_XOR: 1262 case Token::BIT_XOR:
1132 case Token::SAR: 1263 case Token::SAR:
1133 case Token::SHL: 1264 case Token::SHL:
1265 case Token::SHR: {
1266 Label non_smi_shr_result;
1267 Register heap_number_map = r9;
1268 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1269 FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
1270 heap_number_map);
1271 switch (op_) {
1272 case Token::BIT_OR: __ orl(rax, rcx); break;
1273 case Token::BIT_AND: __ andl(rax, rcx); break;
1274 case Token::BIT_XOR: __ xorl(rax, rcx); break;
1275 case Token::SAR: __ sarl_cl(rax); break;
1276 case Token::SHL: __ shll_cl(rax); break;
1277 case Token::SHR: {
1278 __ shrl_cl(rax);
1279 // Check if result is negative. This can only happen for a shift
1280 // by zero.
1281 __ testl(rax, rax);
1282 __ j(negative, &non_smi_shr_result);
1283 break;
1284 }
1285 default: UNREACHABLE();
1286 }
1287 STATIC_ASSERT(kSmiValueSize == 32);
1288 // Tag smi result and return.
1289 __ Integer32ToSmi(rax, rax);
1290 __ Ret();
1291
1292 // Logical shift right can produce an unsigned int32 that is not
1293 // an int32, and so is not in the smi range. Allocate a heap number
1294 // in that case.
1295 if (op_ == Token::SHR) {
1296 __ bind(&non_smi_shr_result);
1297 Label allocation_failed;
1298 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
1299 // Allocate heap number in new space.
1300 // Not using AllocateHeapNumber macro in order to reuse
Søren Thygesen Gjesse 2011/02/03 08:00:43 How about creating an AllocateHeapNumber in macro
William Hesse 2011/02/04 14:37:25 It is only used this one place, and it is really j
1301 // already loaded heap_number_map.
1302 __ AllocateInNewSpace(HeapNumber::kSize,
1303 rax,
1304 rcx,
1305 no_reg,
1306 &allocation_failed,
1307 TAG_OBJECT);
1308 // Set the map.
1309 if (FLAG_debug_code) {
1310 __ AbortIfNotRootValue(heap_number_map,
1311 Heap::kHeapNumberMapRootIndex,
1312 "HeapNumberMap register clobbered.");
1313 }
1314 __ movq(FieldOperand(rax, HeapObject::kMapOffset),
1315 heap_number_map);
1316 __ cvtqsi2sd(xmm0, rbx);
1317 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
1318 __ Ret();
1319
1320 __ bind(&allocation_failed);
1321 // We need tagged values in rdx and rax for the following code,
1322 // not int32 in rax and rcx.
1323 __ Integer32ToSmi(rax, rcx);
1324 __ Integer32ToSmi(rdx, rax);
1325 __ jmp(allocation_failure);
1326 }
1327 break;
1328 }
1329 default: UNREACHABLE(); break;
1330 }
1331 // No fall-through from this generated code.
1332 if (FLAG_debug_code) {
1333 __ Abort("Unexpected fall-through in "
1334 "TypeRecordingBinaryStub::GenerateFloatingPointCode.");
1335 }
1336 }
1337
1338
1339 void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
1340 GenerateRegisterArgsPush(masm);
1341 // Registers containing left and right operands respectively.
1342 Register lhs = rdx;
1343 Register rhs = rax;
1344
1345 // Test for string arguments before calling runtime.
1346 Label not_strings, both_strings, not_string1, string1, string1_smi2;
1347 Condition is_smi;
Søren Thygesen Gjesse 2011/02/03 08:00:43 Please remove this debugging code.
William Hesse 2011/02/04 14:37:25 Done.
1348 __ jmp(&not_strings); // Debugging jump.
1349
1350 is_smi = masm->CheckSmi(lhs);
Søren Thygesen Gjesse 2011/02/03 08:00:43 The code pattern for checking for a string is repe
William Hesse 2011/02/04 14:37:25 In one of the three places, the smi check jumps to
1351 __ j(is_smi, &not_string1);
1352 __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
1353 __ j(above_equal, &not_string1);
1354
1355 // First argument is a a string, test second.
1356 is_smi = masm->CheckSmi(rhs);
1357 __ j(is_smi, &string1_smi2);
1358 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
1359 __ j(above_equal, &string1);
1360
1361 // First and second argument are strings.
1362 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
1363 __ TailCallStub(&string_add_stub);
1364
1365 __ bind(&string1_smi2);
1366 // First argument is a string, second is a smi. Try to lookup the number
1367 // string for the smi in the number string cache.
1368 NumberToStringStub::GenerateLookupNumberStringCache(
1369 masm, rhs, rbx, rcx, r8, true, &string1);
1370
1371 // Replace second argument on stack and tailcall string add stub to make
1372 // the result.
1373 __ movq(Operand(rsp, 1 * kPointerSize), rbx);
1374 __ TailCallStub(&string_add_stub);
1375
1376 // Only first argument is a string.
1377 __ bind(&string1);
1378 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
1379
1380 // First argument was not a string, test second.
1381 __ bind(&not_string1);
1382 is_smi = masm->CheckSmi(rhs);
1383 __ j(is_smi, &not_strings);
1384 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
1385 __ j(above_equal, &not_strings);
1386
1387 // Only second argument is a string.
1388 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
1389
1390 __ bind(&not_strings);
1391 // Neither argument is a string.
1392 // Pop arguments, because CallRuntimeCode wants to push them again.
1393 __ pop(rcx);
1394 __ pop(rax);
1395 __ pop(rdx);
1396 __ push(rcx);
1397 }
1398
1399
1400 void TypeRecordingBinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
1401 GenerateRegisterArgsPush(masm);
1402 switch (op_) {
1403 case Token::ADD:
1404 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1405 break;
1406 case Token::SUB:
1407 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1408 break;
1409 case Token::MUL:
1410 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1411 break;
1412 case Token::DIV:
1413 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1414 break;
1415 case Token::MOD:
1416 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1417 break;
1418 case Token::BIT_OR:
1419 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1420 break;
1421 case Token::BIT_AND:
1422 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1423 break;
1424 case Token::BIT_XOR:
1425 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1426 break;
1427 case Token::SAR:
1428 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1429 break;
1430 case Token::SHL:
1431 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1432 break;
1134 case Token::SHR: 1433 case Token::SHR:
1135 GenerateRegisterArgsPush(masm); 1434 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1136 break; 1435 break;
1137 default: 1436 default:
1138 UNREACHABLE(); 1437 UNREACHABLE();
1139 }
1140
1141 if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
1142 result_type_ == TRBinaryOpIC::SMI) {
1143 GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
1144 } else {
1145 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1146 }
1147 __ bind(&call_runtime);
1148 switch (op_) {
1149 case Token::ADD:
1150 case Token::SUB:
1151 case Token::MUL:
1152 case Token::DIV:
1153 GenerateTypeTransition(masm);
1154 break;
1155 case Token::MOD:
1156 case Token::BIT_OR:
1157 case Token::BIT_AND:
1158 case Token::BIT_XOR:
1159 case Token::SAR:
1160 case Token::SHL:
1161 case Token::SHR:
1162 GenerateTypeTransitionWithSavedArgs(masm);
1163 break;
1164 default:
1165 UNREACHABLE();
1166 } 1438 }
1167 } 1439 }
1168 1440
1169 1441
1442 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1443 Label call_runtime;
1444
1445 if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
1446 result_type_ == TRBinaryOpIC::SMI) {
1447 GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
1448 } else {
1449 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1450 }
1451 __ bind(&call_runtime);
Søren Thygesen Gjesse 2011/02/03 08:00:43 Should there be a separate exit for failed heap al
William Hesse 2011/02/04 14:37:25 There should be no ALLOW_HEAPNUMBER_RESULTS case o
1452 GenerateTypeTransition(masm);
1453 }
1454
1455
1170 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { 1456 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
1171 UNIMPLEMENTED(); 1457 ASSERT(op_ == Token::ADD);
1172 } 1458 GenerateStringAddCode(masm);
1173 1459
1174 1460 GenerateTypeTransition(masm);
1175 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1176 UNIMPLEMENTED();
1177 } 1461 }
1178 1462
1179 1463
1180 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { 1464 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1181 UNIMPLEMENTED(); 1465 Label call_runtime, type_transition;
Søren Thygesen Gjesse 2011/02/03 08:00:43 Maybe rename the labels call_runtime -> gc_requ
William Hesse 2011/02/04 14:37:25 Done.
1466 GenerateFloatingPointCode(masm, &call_runtime, &type_transition);
1467
1468 __ bind(&type_transition);
1469 GenerateTypeTransition(masm);
1470
1471 __ bind(&call_runtime);
1472 GenerateCallRuntimeCode(masm);
1182 } 1473 }
1183 1474
1184 1475
1185 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { 1476 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
1186 UNIMPLEMENTED(); 1477 Label call_runtime, call_string_add_or_runtime;
1478
1479 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1480
1481 GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
1482
1483 __ bind(&call_string_add_or_runtime);
1484 if (op_ == Token::ADD) {
1485 GenerateStringAddCode(masm);
1486 }
1487
1488 __ bind(&call_runtime);
1489 GenerateCallRuntimeCode(masm);
1187 } 1490 }
1188 1491
1189 1492
1190 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( 1493 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
1191 MacroAssembler* masm, 1494 MacroAssembler* masm,
1192 Label* alloc_failure) { 1495 Label* alloc_failure) {
1193 UNIMPLEMENTED(); 1496 Label skip_allocation;
1497 OverwriteMode mode = mode_;
1498 switch (mode) {
1499 case OVERWRITE_LEFT: {
1500 // If the argument in rdx is already an object, we skip the
1501 // allocation of a heap number.
1502 __ JumpIfNotSmi(rdx, &skip_allocation);
1503 // Allocate a heap number for the result. Keep eax and edx intact
1504 // for the possible runtime call.
1505 __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1506 // Now rdx can be overwritten losing one of the arguments as we are
1507 // now done and will not need it any more.
1508 __ movq(rdx, rbx);
1509 __ bind(&skip_allocation);
1510 // Use object in rdx as a result holder
1511 __ movq(rax, rdx);
1512 break;
1513 }
1514 case OVERWRITE_RIGHT:
1515 // If the argument in rax is already an object, we skip the
1516 // allocation of a heap number.
1517 __ JumpIfNotSmi(rax, &skip_allocation);
1518 // Fall through!
1519 case NO_OVERWRITE:
1520 // Allocate a heap number for the result. Keep rax and rdx intact
1521 // for the possible runtime call.
1522 __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1523 // Now rax can be overwritten losing one of the arguments as we are
1524 // now done and will not need it any more.
1525 __ movq(rax, rbx);
1526 __ bind(&skip_allocation);
1527 break;
1528 default: UNREACHABLE();
1529 }
1194 } 1530 }
1195 1531
1196 1532
1197 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { 1533 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
1198 __ pop(rcx); 1534 __ pop(rcx);
1199 __ push(rdx); 1535 __ push(rdx);
1200 __ push(rax); 1536 __ push(rax);
1201 __ push(rcx); 1537 __ push(rcx);
1202 } 1538 }
1203 1539
(...skipping 301 matching lines...)
1505 __ bind(&rax_is_smi); 1841 __ bind(&rax_is_smi);
1506 __ SmiToInteger32(rcx, rax); 1842 __ SmiToInteger32(rcx, rax);
1507 1843
1508 __ bind(&done); 1844 __ bind(&done);
1509 __ movl(rax, rdx); 1845 __ movl(rax, rdx);
1510 } 1846 }
1511 1847
1512 1848
1513 // Input: rdx, rax are the left and right objects of a bit op. 1849 // Input: rdx, rax are the left and right objects of a bit op.
1514 // Output: rax, rcx are left and right integers for a bit op. 1850 // Output: rax, rcx are left and right integers for a bit op.
1851 // Jump to conversion_failure: rdx and rax are unchanged.
1515 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, 1852 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
1516 Label* conversion_failure, 1853 Label* conversion_failure,
1517 Register heap_number_map) { 1854 Register heap_number_map) {
1518 // Check float operands. 1855 // Check float operands.
1519 Label arg1_is_object, check_undefined_arg1; 1856 Label arg1_is_object, check_undefined_arg1;
1520 Label arg2_is_object, check_undefined_arg2; 1857 Label arg2_is_object, check_undefined_arg2;
1521 Label load_arg2, done; 1858 Label load_arg2, done;
1522 1859
1523 __ JumpIfNotSmi(rdx, &arg1_is_object); 1860 __ JumpIfNotSmi(rdx, &arg1_is_object);
1524 __ SmiToInteger32(rdx, rdx); 1861 __ SmiToInteger32(r8, rdx);
1525 __ jmp(&load_arg2); 1862 __ jmp(&load_arg2);
1526 1863
1527 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). 1864 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1528 __ bind(&check_undefined_arg1); 1865 __ bind(&check_undefined_arg1);
1529 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); 1866 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1530 __ j(not_equal, conversion_failure); 1867 __ j(not_equal, conversion_failure);
1531 __ movl(rdx, Immediate(0)); 1868 __ movl(r8, Immediate(0));
1532 __ jmp(&load_arg2); 1869 __ jmp(&load_arg2);
1533 1870
1534 __ bind(&arg1_is_object); 1871 __ bind(&arg1_is_object);
1535 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); 1872 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
1536 __ j(not_equal, &check_undefined_arg1); 1873 __ j(not_equal, &check_undefined_arg1);
1537 // Get the untagged integer version of the edx heap number in rcx. 1874 // Get the untagged integer version of the rdx heap number in rcx.
1538 IntegerConvert(masm, rdx, rdx); 1875 IntegerConvert(masm, r8, rdx);
1539 1876
1540 // Here rdx has the untagged integer, rax has a Smi or a heap number. 1877 // Here r8 has the untagged integer, rax has a Smi or a heap number.
1541 __ bind(&load_arg2); 1878 __ bind(&load_arg2);
1542 // Test if arg2 is a Smi. 1879 // Test if arg2 is a Smi.
1543 __ JumpIfNotSmi(rax, &arg2_is_object); 1880 __ JumpIfNotSmi(rax, &arg2_is_object);
1544 __ SmiToInteger32(rax, rax); 1881 __ SmiToInteger32(rcx, rax);
1545 __ movl(rcx, rax);
1546 __ jmp(&done); 1882 __ jmp(&done);
1547 1883
1548 // If the argument is undefined it converts to zero (ECMA-262, section 9.5). 1884 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1549 __ bind(&check_undefined_arg2); 1885 __ bind(&check_undefined_arg2);
1550 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); 1886 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1551 __ j(not_equal, conversion_failure); 1887 __ j(not_equal, conversion_failure);
1552 __ movl(rcx, Immediate(0)); 1888 __ movl(rcx, Immediate(0));
1553 __ jmp(&done); 1889 __ jmp(&done);
1554 1890
1555 __ bind(&arg2_is_object); 1891 __ bind(&arg2_is_object);
1556 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); 1892 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
1557 __ j(not_equal, &check_undefined_arg2); 1893 __ j(not_equal, &check_undefined_arg2);
1558 // Get the untagged integer version of the rax heap number in rcx. 1894 // Get the untagged integer version of the rax heap number in rcx.
1559 IntegerConvert(masm, rcx, rax); 1895 IntegerConvert(masm, rcx, rax);
1560 __ bind(&done); 1896 __ bind(&done);
1561 __ movl(rax, rdx); 1897 __ movl(rax, r8);
1562 } 1898 }
1563 1899
1564 1900
1565 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { 1901 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
1566 __ SmiToInteger32(kScratchRegister, rdx); 1902 __ SmiToInteger32(kScratchRegister, rdx);
1567 __ cvtlsi2sd(xmm0, kScratchRegister); 1903 __ cvtlsi2sd(xmm0, kScratchRegister);
1568 __ SmiToInteger32(kScratchRegister, rax); 1904 __ SmiToInteger32(kScratchRegister, rax);
1569 __ cvtlsi2sd(xmm1, kScratchRegister); 1905 __ cvtlsi2sd(xmm1, kScratchRegister);
1570 } 1906 }
1571 1907
(...skipping 309 matching lines...)
1881 // at compilation. 2217 // at compilation.
1882 #ifdef V8_INTERPRETED_REGEXP 2218 #ifdef V8_INTERPRETED_REGEXP
1883 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); 2219 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
1884 #else // V8_INTERPRETED_REGEXP 2220 #else // V8_INTERPRETED_REGEXP
1885 if (!FLAG_regexp_entry_native) { 2221 if (!FLAG_regexp_entry_native) {
1886 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); 2222 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
1887 return; 2223 return;
1888 } 2224 }
1889 2225
1890 // Stack frame on entry. 2226 // Stack frame on entry.
1891 // esp[0]: return address 2227 // rsp[0]: return address
1892 // esp[8]: last_match_info (expected JSArray) 2228 // rsp[8]: last_match_info (expected JSArray)
1893 // esp[16]: previous index 2229 // rsp[16]: previous index
1894 // esp[24]: subject string 2230 // rsp[24]: subject string
1895 // esp[32]: JSRegExp object 2231 // rsp[32]: JSRegExp object
1896 2232
1897 static const int kLastMatchInfoOffset = 1 * kPointerSize; 2233 static const int kLastMatchInfoOffset = 1 * kPointerSize;
1898 static const int kPreviousIndexOffset = 2 * kPointerSize; 2234 static const int kPreviousIndexOffset = 2 * kPointerSize;
1899 static const int kSubjectOffset = 3 * kPointerSize; 2235 static const int kSubjectOffset = 3 * kPointerSize;
1900 static const int kJSRegExpOffset = 4 * kPointerSize; 2236 static const int kJSRegExpOffset = 4 * kPointerSize;
1901 2237
1902 Label runtime; 2238 Label runtime;
1903 2239
1904 // Ensure that a RegExp stack is allocated. 2240 // Ensure that a RegExp stack is allocated.
1905 ExternalReference address_of_regexp_stack_memory_address = 2241 ExternalReference address_of_regexp_stack_memory_address =
(...skipping 321 matching lines...)
2227 Label slowcase; 2563 Label slowcase;
2228 Label done; 2564 Label done;
2229 __ movq(r8, Operand(rsp, kPointerSize * 3)); 2565 __ movq(r8, Operand(rsp, kPointerSize * 3));
2230 __ JumpIfNotSmi(r8, &slowcase); 2566 __ JumpIfNotSmi(r8, &slowcase);
2231 __ SmiToInteger32(rbx, r8); 2567 __ SmiToInteger32(rbx, r8);
2232 __ cmpl(rbx, Immediate(kMaxInlineLength)); 2568 __ cmpl(rbx, Immediate(kMaxInlineLength));
2233 __ j(above, &slowcase); 2569 __ j(above, &slowcase);
2234 // Smi-tagging is equivalent to multiplying by 2. 2570 // Smi-tagging is equivalent to multiplying by 2.
2235 STATIC_ASSERT(kSmiTag == 0); 2571 STATIC_ASSERT(kSmiTag == 0);
2236 STATIC_ASSERT(kSmiTagSize == 1); 2572 STATIC_ASSERT(kSmiTagSize == 1);
2237 // Allocate RegExpResult followed by FixedArray with size in ebx. 2573 // Allocate RegExpResult followed by FixedArray with size in rbx.
2238 // JSArray: [Map][empty properties][Elements][Length-smi][index][input] 2574 // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
2239 // Elements: [Map][Length][..elements..] 2575 // Elements: [Map][Length][..elements..]
2240 __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize, 2576 __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
2241 times_pointer_size, 2577 times_pointer_size,
2242 rbx, // In: Number of elements. 2578 rbx, // In: Number of elements.
2243 rax, // Out: Start of allocation (tagged). 2579 rax, // Out: Start of allocation (tagged).
2244 rcx, // Out: End of allocation. 2580 rcx, // Out: End of allocation.
2245 rdx, // Scratch register 2581 rdx, // Scratch register
2246 &slowcase, 2582 &slowcase,
2247 TAG_OBJECT); 2583 TAG_OBJECT);
(...skipping 38 matching lines...)
2286 __ Move(rdx, Factory::the_hole_value()); 2622 __ Move(rdx, Factory::the_hole_value());
2287 __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize)); 2623 __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
2288 // Fill fixed array elements with hole. 2624 // Fill fixed array elements with hole.
2289 // rax: JSArray. 2625 // rax: JSArray.
2290 // rbx: Number of elements in array that remains to be filled, as int32. 2626 // rbx: Number of elements in array that remains to be filled, as int32.
2291 // rcx: Start of elements in FixedArray. 2627 // rcx: Start of elements in FixedArray.
2292 // rdx: the hole. 2628 // rdx: the hole.
2293 Label loop; 2629 Label loop;
2294 __ testl(rbx, rbx); 2630 __ testl(rbx, rbx);
2295 __ bind(&loop); 2631 __ bind(&loop);
2296 __ j(less_equal, &done); // Jump if ecx is negative or zero. 2632 __ j(less_equal, &done); // Jump if rcx is negative or zero.
2297 __ subl(rbx, Immediate(1)); 2633 __ subl(rbx, Immediate(1));
2298 __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx); 2634 __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
2299 __ jmp(&loop); 2635 __ jmp(&loop);
2300 2636
2301 __ bind(&done); 2637 __ bind(&done);
2302 __ ret(3 * kPointerSize); 2638 __ ret(3 * kPointerSize);
2303 2639
2304 __ bind(&slowcase); 2640 __ bind(&slowcase);
2305 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); 2641 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
2306 } 2642 }
(...skipping 342 matching lines...)
2649 __ testb(FieldOperand(rbx, Map::kBitFieldOffset), 2985 __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
2650 Immediate(1 << Map::kIsUndetectable)); 2986 Immediate(1 << Map::kIsUndetectable));
2651 __ j(zero, &return_unequal); 2987 __ j(zero, &return_unequal);
2652 __ testb(FieldOperand(rcx, Map::kBitFieldOffset), 2988 __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
2653 Immediate(1 << Map::kIsUndetectable)); 2989 Immediate(1 << Map::kIsUndetectable));
2654 __ j(zero, &return_unequal); 2990 __ j(zero, &return_unequal);
2655 // The objects are both undetectable, so they both compare as the value 2991 // The objects are both undetectable, so they both compare as the value
2656 // undefined, and are equal. 2992 // undefined, and are equal.
2657 __ Set(rax, EQUAL); 2993 __ Set(rax, EQUAL);
2658 __ bind(&return_unequal); 2994 __ bind(&return_unequal);
2659 // Return non-equal by returning the non-zero object pointer in eax, 2995 // Return non-equal by returning the non-zero object pointer in rax,
2660 // or return equal if we fell through to here. 2996 // or return equal if we fell through to here.
2661 __ ret(0); 2997 __ ret(0);
2662 __ bind(&not_both_objects); 2998 __ bind(&not_both_objects);
2663 } 2999 }
2664 3000
2665 // Push arguments below the return address to prepare jump to builtin. 3001 // Push arguments below the return address to prepare jump to builtin.
2666 __ pop(rcx); 3002 __ pop(rcx);
2667 __ push(rdx); 3003 __ push(rdx);
2668 __ push(rax); 3004 __ push(rax);
2669 3005
(...skipping 474 matching lines...)
3144 __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize)); 3480 __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
3145 __ call(kScratchRegister); 3481 __ call(kScratchRegister);
3146 3482
3147 // Unlink this frame from the handler chain. 3483 // Unlink this frame from the handler chain.
3148 __ movq(kScratchRegister, ExternalReference(Top::k_handler_address)); 3484 __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
3149 __ pop(Operand(kScratchRegister, 0)); 3485 __ pop(Operand(kScratchRegister, 0));
3150 // Pop next_sp. 3486 // Pop next_sp.
3151 __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize)); 3487 __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
3152 3488
3153 #ifdef ENABLE_LOGGING_AND_PROFILING 3489 #ifdef ENABLE_LOGGING_AND_PROFILING
3154 // If current EBP value is the same as js_entry_sp value, it means that 3490 // If current RBP value is the same as js_entry_sp value, it means that
3155 // the current function is the outermost. 3491 // the current function is the outermost.
3156 __ movq(kScratchRegister, js_entry_sp); 3492 __ movq(kScratchRegister, js_entry_sp);
3157 __ cmpq(rbp, Operand(kScratchRegister, 0)); 3493 __ cmpq(rbp, Operand(kScratchRegister, 0));
3158 __ j(not_equal, &not_outermost_js_2); 3494 __ j(not_equal, &not_outermost_js_2);
3159 __ movq(Operand(kScratchRegister, 0), Immediate(0)); 3495 __ movq(Operand(kScratchRegister, 0), Immediate(0));
3160 __ bind(&not_outermost_js_2); 3496 __ bind(&not_outermost_js_2);
3161 #endif 3497 #endif
3162 3498
3163 // Restore the top frame descriptor from the stack. 3499 // Restore the top frame descriptor from the stack.
3164 __ bind(&exit); 3500 __ bind(&exit);
(...skipping 1247 matching lines...)
4412 4748
4413 // Do a tail call to the rewritten stub. 4749 // Do a tail call to the rewritten stub.
4414 __ jmp(rdi); 4750 __ jmp(rdi);
4415 } 4751 }
4416 4752
4417 #undef __ 4753 #undef __
4418 4754
4419 } } // namespace v8::internal 4755 } } // namespace v8::internal
4420 4756
4421 #endif // V8_TARGET_ARCH_X64 4757 #endif // V8_TARGET_ARCH_X64
OLDNEW

Powered by Google App Engine