Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 25571002: Revert "Hydrogenisation of binops" (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | src/ast.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
161 static Register registers[] = { r0 }; 161 static Register registers[] = { r0 };
162 descriptor->register_param_count_ = 1; 162 descriptor->register_param_count_ = 1;
163 descriptor->register_params_ = registers; 163 descriptor->register_params_ = registers;
164 descriptor->deoptimization_handler_ = 164 descriptor->deoptimization_handler_ =
165 FUNCTION_ADDR(CompareNilIC_Miss); 165 FUNCTION_ADDR(CompareNilIC_Miss);
166 descriptor->SetMissHandler( 166 descriptor->SetMissHandler(
167 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate)); 167 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
168 } 168 }
169 169
170 170
171 void BinaryOpStub::InitializeInterfaceDescriptor(
172 Isolate* isolate,
173 CodeStubInterfaceDescriptor* descriptor) {
174 static Register registers[] = { r1, r0 };
175 descriptor->register_param_count_ = 2;
176 descriptor->register_params_ = registers;
177 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
178 descriptor->SetMissHandler(
179 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
180 }
181
182
183 static void InitializeArrayConstructorDescriptor( 171 static void InitializeArrayConstructorDescriptor(
184 Isolate* isolate, 172 Isolate* isolate,
185 CodeStubInterfaceDescriptor* descriptor, 173 CodeStubInterfaceDescriptor* descriptor,
186 int constant_stack_parameter_count) { 174 int constant_stack_parameter_count) {
187 // register state 175 // register state
188 // r0 -- number of arguments 176 // r0 -- number of arguments
189 // r1 -- function 177 // r1 -- function
190 // r2 -- type info cell with elements kind 178 // r2 -- type info cell with elements kind
191 static Register registers[] = { r1, r2 }; 179 static Register registers[] = { r1, r2 };
192 descriptor->register_param_count_ = 2; 180 descriptor->register_param_count_ = 2;
(...skipping 997 matching lines...) Expand 10 before | Expand all | Expand 10 after
1190 __ CallCFunction( 1178 __ CallCFunction(
1191 ExternalReference::store_buffer_overflow_function(masm->isolate()), 1179 ExternalReference::store_buffer_overflow_function(masm->isolate()),
1192 argument_count); 1180 argument_count);
1193 if (save_doubles_ == kSaveFPRegs) { 1181 if (save_doubles_ == kSaveFPRegs) {
1194 __ RestoreFPRegs(sp, scratch); 1182 __ RestoreFPRegs(sp, scratch);
1195 } 1183 }
1196 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). 1184 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
1197 } 1185 }
1198 1186
1199 1187
1188 // Generates code to call a C function to do a double operation.
1189 // This code never falls through, but returns with a heap number containing
1190 // the result in r0.
1191 // Register heapnumber_result must be a heap number in which the
1192 // result of the operation will be stored.
1193 // Requires the following layout on entry:
1194 // d0: Left value.
1195 // d1: Right value.
1196 // If soft float ABI, use also r0, r1, r2, r3.
1197 static void CallCCodeForDoubleOperation(MacroAssembler* masm,
1198 Token::Value op,
1199 Register heap_number_result,
1200 Register scratch) {
1201 // Assert that heap_number_result is callee-saved.
1202 // We currently always use r5 to pass it.
1203 ASSERT(heap_number_result.is(r5));
1204
1205 // Push the current return address before the C call. Return will be
1206 // through pop(pc) below.
1207 __ push(lr);
1208 __ PrepareCallCFunction(0, 2, scratch);
1209 if (!masm->use_eabi_hardfloat()) {
1210 __ vmov(r0, r1, d0);
1211 __ vmov(r2, r3, d1);
1212 }
1213 {
1214 AllowExternalCallThatCantCauseGC scope(masm);
1215 __ CallCFunction(
1216 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
1217 }
1218 // Store answer in the overwritable heap number. Double returned in
1219 // registers r0 and r1 or in d0.
1220 if (masm->use_eabi_hardfloat()) {
1221 __ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
1222 } else {
1223 __ Strd(r0, r1,
1224 FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
1225 }
1226 // Place heap_number_result in r0 and return to the pushed return address.
1227 __ mov(r0, Operand(heap_number_result));
1228 __ pop(pc);
1229 }
1230
1231
1232 void BinaryOpStub::Initialize() {
1233 platform_specific_bit_ = true; // VFP2 is a base requirement for V8
1234 }
1235
1236
1237 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1238 Label get_result;
1239
1240 __ Push(r1, r0);
1241
1242 __ mov(r2, Operand(Smi::FromInt(MinorKey())));
1243 __ push(r2);
1244
1245 __ TailCallExternalReference(
1246 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
1247 masm->isolate()),
1248 3,
1249 1);
1250 }
1251
1252
1253 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
1254 MacroAssembler* masm) {
1255 UNIMPLEMENTED();
1256 }
1257
1258
1259 void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
1260 Token::Value op,
1261 Register scratch1,
1262 Register scratch2) {
1263 Register left = r1;
1264 Register right = r0;
1265
1266 ASSERT(right.is(r0));
1267 ASSERT(!AreAliased(left, right, scratch1, scratch2, ip));
1268 STATIC_ASSERT(kSmiTag == 0);
1269
1270 Label not_smi_result;
1271 switch (op) {
1272 case Token::ADD:
1273 __ add(right, left, Operand(right), SetCC); // Add optimistically.
1274 __ Ret(vc);
1275 __ sub(right, right, Operand(left)); // Revert optimistic add.
1276 break;
1277 case Token::SUB:
1278 __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
1279 __ Ret(vc);
1280 __ sub(right, left, Operand(right)); // Revert optimistic subtract.
1281 break;
1282 case Token::MUL:
1283 // Remove tag from one of the operands. This way the multiplication result
1284 // will be a smi if it fits the smi range.
1285 __ SmiUntag(ip, right);
1286 // Do multiplication
1287 // scratch1 = lower 32 bits of ip * left.
1288 // scratch2 = higher 32 bits of ip * left.
1289 __ smull(scratch1, scratch2, left, ip);
1290 // Check for overflowing the smi range - no overflow if higher 33 bits of
1291 // the result are identical.
1292 __ mov(ip, Operand(scratch1, ASR, 31));
1293 __ cmp(ip, Operand(scratch2));
1294 __ b(ne, &not_smi_result);
1295 // Go slow on zero result to handle -0.
1296 __ cmp(scratch1, Operand::Zero());
1297 __ mov(right, Operand(scratch1), LeaveCC, ne);
1298 __ Ret(ne);
1299 // We need -0 if we were multiplying a negative number with 0 to get 0.
1300 // We know one of them was zero.
1301 __ add(scratch2, right, Operand(left), SetCC);
1302 __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
1303 __ Ret(pl); // Return smi 0 if the non-zero one was positive.
1304 // We fall through here if we multiplied a negative number with 0, because
1305 // that would mean we should produce -0.
1306 break;
1307 case Token::DIV: {
1308 Label div_with_sdiv;
1309
1310 // Check for 0 divisor.
1311 __ cmp(right, Operand::Zero());
1312 __ b(eq, &not_smi_result);
1313
1314 // Check for power of two on the right hand side.
1315 __ sub(scratch1, right, Operand(1));
1316 __ tst(scratch1, right);
1317 if (CpuFeatures::IsSupported(SUDIV)) {
1318 __ b(ne, &div_with_sdiv);
1319 // Check for no remainder.
1320 __ tst(left, scratch1);
1321 __ b(ne, &not_smi_result);
1322 // Check for positive left hand side.
1323 __ cmp(left, Operand::Zero());
1324 __ b(mi, &div_with_sdiv);
1325 } else {
1326 __ b(ne, &not_smi_result);
1327 // Check for positive and no remainder.
1328 __ orr(scratch2, scratch1, Operand(0x80000000u));
1329 __ tst(left, scratch2);
1330 __ b(ne, &not_smi_result);
1331 }
1332
1333 // Perform division by shifting.
1334 __ clz(scratch1, scratch1);
1335 __ rsb(scratch1, scratch1, Operand(31));
1336 __ mov(right, Operand(left, LSR, scratch1));
1337 __ Ret();
1338
1339 if (CpuFeatures::IsSupported(SUDIV)) {
1340 CpuFeatureScope scope(masm, SUDIV);
1341 Label result_not_zero;
1342
1343 __ bind(&div_with_sdiv);
1344 // Do division.
1345 __ sdiv(scratch1, left, right);
1346 // Check that the remainder is zero.
1347 __ mls(scratch2, scratch1, right, left);
1348 __ cmp(scratch2, Operand::Zero());
1349 __ b(ne, &not_smi_result);
1350 // Check for negative zero result.
1351 __ cmp(scratch1, Operand::Zero());
1352 __ b(ne, &result_not_zero);
1353 __ cmp(right, Operand::Zero());
1354 __ b(lt, &not_smi_result);
1355 __ bind(&result_not_zero);
1356 // Check for the corner case of dividing the most negative smi by -1.
1357 __ cmp(scratch1, Operand(0x40000000));
1358 __ b(eq, &not_smi_result);
1359 // Tag and return the result.
1360 __ SmiTag(right, scratch1);
1361 __ Ret();
1362 }
1363 break;
1364 }
1365 case Token::MOD: {
1366 Label modulo_with_sdiv;
1367
1368 if (CpuFeatures::IsSupported(SUDIV)) {
1369 // Check for x % 0.
1370 __ cmp(right, Operand::Zero());
1371 __ b(eq, &not_smi_result);
1372
1373 // Check for two positive smis.
1374 __ orr(scratch1, left, Operand(right));
1375 __ tst(scratch1, Operand(0x80000000u));
1376 __ b(ne, &modulo_with_sdiv);
1377
1378 // Check for power of two on the right hand side.
1379 __ sub(scratch1, right, Operand(1));
1380 __ tst(scratch1, right);
1381 __ b(ne, &modulo_with_sdiv);
1382 } else {
1383 // Check for two positive smis.
1384 __ orr(scratch1, left, Operand(right));
1385 __ tst(scratch1, Operand(0x80000000u));
1386 __ b(ne, &not_smi_result);
1387
1388 // Check for power of two on the right hand side.
1389 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
1390 }
1391
1392 // Perform modulus by masking (scratch1 contains right - 1).
1393 __ and_(right, left, Operand(scratch1));
1394 __ Ret();
1395
1396 if (CpuFeatures::IsSupported(SUDIV)) {
1397 CpuFeatureScope scope(masm, SUDIV);
1398 __ bind(&modulo_with_sdiv);
1399 __ mov(scratch2, right);
1400 // Perform modulus with sdiv and mls.
1401 __ sdiv(scratch1, left, right);
1402 __ mls(right, scratch1, right, left);
1403 // Return if the result is not 0.
1404 __ cmp(right, Operand::Zero());
1405 __ Ret(ne);
1406 // The result is 0, check for -0 case.
1407 __ cmp(left, Operand::Zero());
1408 __ Ret(pl);
1409 // This is a -0 case, restore the value of right.
1410 __ mov(right, scratch2);
1411 // We fall through here to not_smi_result to produce -0.
1412 }
1413 break;
1414 }
1415 case Token::BIT_OR:
1416 __ orr(right, left, Operand(right));
1417 __ Ret();
1418 break;
1419 case Token::BIT_AND:
1420 __ and_(right, left, Operand(right));
1421 __ Ret();
1422 break;
1423 case Token::BIT_XOR:
1424 __ eor(right, left, Operand(right));
1425 __ Ret();
1426 break;
1427 case Token::SAR:
1428 // Remove tags from right operand.
1429 __ GetLeastBitsFromSmi(scratch1, right, 5);
1430 __ mov(right, Operand(left, ASR, scratch1));
1431 // Smi tag result.
1432 __ bic(right, right, Operand(kSmiTagMask));
1433 __ Ret();
1434 break;
1435 case Token::SHR:
1436 // Remove tags from operands. We can't do this on a 31 bit number
1437 // because then the 0s get shifted into bit 30 instead of bit 31.
1438 __ SmiUntag(scratch1, left);
1439 __ GetLeastBitsFromSmi(scratch2, right, 5);
1440 __ mov(scratch1, Operand(scratch1, LSR, scratch2));
1441 // Unsigned shift is not allowed to produce a negative number, so
1442 // check the sign bit and the sign bit after Smi tagging.
1443 __ tst(scratch1, Operand(0xc0000000));
1444 __ b(ne, &not_smi_result);
1445 // Smi tag result.
1446 __ SmiTag(right, scratch1);
1447 __ Ret();
1448 break;
1449 case Token::SHL:
1450 // Remove tags from operands.
1451 __ SmiUntag(scratch1, left);
1452 __ GetLeastBitsFromSmi(scratch2, right, 5);
1453 __ mov(scratch1, Operand(scratch1, LSL, scratch2));
1454 // Check that the signed result fits in a Smi.
1455 __ TrySmiTag(right, scratch1, &not_smi_result);
1456 __ Ret();
1457 break;
1458 default:
1459 UNREACHABLE();
1460 }
1461 __ bind(&not_smi_result);
1462 }
1463
1464
1465 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
1466 Register result,
1467 Register heap_number_map,
1468 Register scratch1,
1469 Register scratch2,
1470 Label* gc_required,
1471 OverwriteMode mode);
1472
1473
1474 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
1475 BinaryOpIC::TypeInfo left_type,
1476 BinaryOpIC::TypeInfo right_type,
1477 bool smi_operands,
1478 Label* not_numbers,
1479 Label* gc_required,
1480 Label* miss,
1481 Token::Value op,
1482 OverwriteMode mode,
1483 Register scratch1,
1484 Register scratch2,
1485 Register scratch3,
1486 Register scratch4) {
1487 Register left = r1;
1488 Register right = r0;
1489 Register result = scratch3;
1490 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
1491
1492 ASSERT(smi_operands || (not_numbers != NULL));
1493 if (smi_operands) {
1494 __ AssertSmi(left);
1495 __ AssertSmi(right);
1496 }
1497 if (left_type == BinaryOpIC::SMI) {
1498 __ JumpIfNotSmi(left, miss);
1499 }
1500 if (right_type == BinaryOpIC::SMI) {
1501 __ JumpIfNotSmi(right, miss);
1502 }
1503
1504 Register heap_number_map = scratch4;
1505 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1506
1507 switch (op) {
1508 case Token::ADD:
1509 case Token::SUB:
1510 case Token::MUL:
1511 case Token::DIV:
1512 case Token::MOD: {
1513 // Allocate new heap number for result.
1514 BinaryOpStub_GenerateHeapResultAllocation(
1515 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
1516
1517 // Load left and right operands into d0 and d1.
1518 if (smi_operands) {
1519 __ SmiToDouble(d1, right);
1520 __ SmiToDouble(d0, left);
1521 } else {
1522 // Load right operand into d1.
1523 if (right_type == BinaryOpIC::INT32) {
1524 __ LoadNumberAsInt32Double(
1525 right, d1, heap_number_map, scratch1, d8, miss);
1526 } else {
1527 Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
1528 __ LoadNumber(right, d1, heap_number_map, scratch1, fail);
1529 }
1530 // Load left operand into d0.
1531 if (left_type == BinaryOpIC::INT32) {
1532 __ LoadNumberAsInt32Double(
1533 left, d0, heap_number_map, scratch1, d8, miss);
1534 } else {
1535 Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
1536 __ LoadNumber(
1537 left, d0, heap_number_map, scratch1, fail);
1538 }
1539 }
1540
1541 // Calculate the result.
1542 if (op != Token::MOD) {
1543 // Using VFP registers:
1544 // d0: Left value
1545 // d1: Right value
1546 switch (op) {
1547 case Token::ADD:
1548 __ vadd(d5, d0, d1);
1549 break;
1550 case Token::SUB:
1551 __ vsub(d5, d0, d1);
1552 break;
1553 case Token::MUL:
1554 __ vmul(d5, d0, d1);
1555 break;
1556 case Token::DIV:
1557 __ vdiv(d5, d0, d1);
1558 break;
1559 default:
1560 UNREACHABLE();
1561 }
1562
1563 __ sub(r0, result, Operand(kHeapObjectTag));
1564 __ vstr(d5, r0, HeapNumber::kValueOffset);
1565 __ add(r0, r0, Operand(kHeapObjectTag));
1566 __ Ret();
1567 } else {
1568 // Call the C function to handle the double operation.
1569 CallCCodeForDoubleOperation(masm, op, result, scratch1);
1570 if (FLAG_debug_code) {
1571 __ stop("Unreachable code.");
1572 }
1573 }
1574 break;
1575 }
1576 case Token::BIT_OR:
1577 case Token::BIT_XOR:
1578 case Token::BIT_AND:
1579 case Token::SAR:
1580 case Token::SHR:
1581 case Token::SHL: {
1582 if (smi_operands) {
1583 __ SmiUntag(r3, left);
1584 __ SmiUntag(r2, right);
1585 } else {
1586 // Convert operands to 32-bit integers. Right in r2 and left in r3.
1587 __ TruncateNumberToI(left, r3, heap_number_map, scratch1, not_numbers);
1588 __ TruncateNumberToI(right, r2, heap_number_map, scratch1, not_numbers);
1589 }
1590
1591 Label result_not_a_smi;
1592 switch (op) {
1593 case Token::BIT_OR:
1594 __ orr(r2, r3, Operand(r2));
1595 break;
1596 case Token::BIT_XOR:
1597 __ eor(r2, r3, Operand(r2));
1598 break;
1599 case Token::BIT_AND:
1600 __ and_(r2, r3, Operand(r2));
1601 break;
1602 case Token::SAR:
1603 // Use only the 5 least significant bits of the shift count.
1604 __ GetLeastBitsFromInt32(r2, r2, 5);
1605 __ mov(r2, Operand(r3, ASR, r2));
1606 break;
1607 case Token::SHR:
1608 // Use only the 5 least significant bits of the shift count.
1609 __ GetLeastBitsFromInt32(r2, r2, 5);
1610 __ mov(r2, Operand(r3, LSR, r2), SetCC);
1611 // SHR is special because it is required to produce a positive answer.
1612 // The code below for writing into heap numbers isn't capable of
1613 // writing the register as an unsigned int so we go to slow case if we
1614 // hit this case.
1615 __ b(mi, &result_not_a_smi);
1616 break;
1617 case Token::SHL:
1618 // Use only the 5 least significant bits of the shift count.
1619 __ GetLeastBitsFromInt32(r2, r2, 5);
1620 __ mov(r2, Operand(r3, LSL, r2));
1621 break;
1622 default:
1623 UNREACHABLE();
1624 }
1625
1626 // Check that the *signed* result fits in a smi.
1627 __ TrySmiTag(r0, r2, &result_not_a_smi);
1628 __ Ret();
1629
1630 // Allocate new heap number for result.
1631 __ bind(&result_not_a_smi);
1632 if (smi_operands) {
1633 __ AllocateHeapNumber(
1634 result, scratch1, scratch2, heap_number_map, gc_required);
1635 } else {
1636 BinaryOpStub_GenerateHeapResultAllocation(
1637 masm, result, heap_number_map, scratch1, scratch2, gc_required,
1638 mode);
1639 }
1640
1641 // r2: Answer as signed int32.
1642 // result: Heap number to write answer into.
1643
1644 // Nothing can go wrong now, so move the heap number to r0, which is the
1645 // result.
1646 __ mov(r0, Operand(result));
1647
1648 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
1649 // mentioned above SHR needs to always produce a positive result.
1650 __ vmov(s0, r2);
1651 if (op == Token::SHR) {
1652 __ vcvt_f64_u32(d0, s0);
1653 } else {
1654 __ vcvt_f64_s32(d0, s0);
1655 }
1656 __ sub(r3, r0, Operand(kHeapObjectTag));
1657 __ vstr(d0, r3, HeapNumber::kValueOffset);
1658 __ Ret();
1659 break;
1660 }
1661 default:
1662 UNREACHABLE();
1663 }
1664 }
1665
1666
1667 // Generate the smi code. If the operation on smis are successful this return is
1668 // generated. If the result is not a smi and heap number allocation is not
1669 // requested the code falls through. If number allocation is requested but a
1670 // heap number cannot be allocated the code jumps to the label gc_required.
1671 void BinaryOpStub_GenerateSmiCode(
1672 MacroAssembler* masm,
1673 Label* use_runtime,
1674 Label* gc_required,
1675 Token::Value op,
1676 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
1677 OverwriteMode mode,
1678 Register scratch1,
1679 Register scratch2,
1680 Register scratch3,
1681 Register scratch4) {
1682 Label not_smis;
1683
1684 Register left = r1;
1685 Register right = r0;
1686 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
1687
1688 // Perform combined smi check on both operands.
1689 __ orr(scratch1, left, Operand(right));
1690 __ JumpIfNotSmi(scratch1, &not_smis);
1691
1692 // If the smi-smi operation results in a smi return is generated.
1693 BinaryOpStub_GenerateSmiSmiOperation(masm, op, scratch1, scratch2);
1694
1695 // If heap number results are possible generate the result in an allocated
1696 // heap number.
1697 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
1698 BinaryOpStub_GenerateFPOperation(
1699 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
1700 use_runtime, gc_required, &not_smis, op, mode, scratch2, scratch3,
1701 scratch1, scratch4);
1702 }
1703 __ bind(&not_smis);
1704 }
1705
1706
1707 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1708 Label right_arg_changed, call_runtime;
1709
1710 if (op_ == Token::MOD && encoded_right_arg_.has_value) {
1711 // It is guaranteed that the value will fit into a Smi, because if it
1712 // didn't, we wouldn't be here, see BinaryOp_Patch.
1713 __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value())));
1714 __ b(ne, &right_arg_changed);
1715 }
1716
1717 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1718 result_type_ == BinaryOpIC::SMI) {
1719 // Only allow smi results.
1720 BinaryOpStub_GenerateSmiCode(masm, &call_runtime, NULL, op_,
1721 NO_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9);
1722 } else {
1723 // Allow heap number result and don't make a transition if a heap number
1724 // cannot be allocated.
1725 BinaryOpStub_GenerateSmiCode(masm, &call_runtime, &call_runtime, op_,
1726 ALLOW_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9);
1727 }
1728
1729 // Code falls through if the result is not returned as either a smi or heap
1730 // number.
1731 __ bind(&right_arg_changed);
1732 GenerateTypeTransition(masm);
1733
1734 __ bind(&call_runtime);
1735 {
1736 FrameScope scope(masm, StackFrame::INTERNAL);
1737 GenerateRegisterArgsPush(masm);
1738 GenerateCallRuntime(masm);
1739 }
1740 __ Ret();
1741 }
1742
1743
1744 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1745 Label call_runtime;
1746 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
1747 ASSERT(op_ == Token::ADD);
1748 // If both arguments are strings, call the string add stub.
1749 // Otherwise, do a transition.
1750
1751 // Registers containing left and right operands respectively.
1752 Register left = r1;
1753 Register right = r0;
1754
1755 // Test if left operand is a string.
1756 __ JumpIfSmi(left, &call_runtime);
1757 __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
1758 __ b(ge, &call_runtime);
1759
1760 // Test if right operand is a string.
1761 __ JumpIfSmi(right, &call_runtime);
1762 __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
1763 __ b(ge, &call_runtime);
1764
1765 StringAddStub string_add_stub(
1766 (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
1767 GenerateRegisterArgsPush(masm);
1768 __ TailCallStub(&string_add_stub);
1769
1770 __ bind(&call_runtime);
1771 GenerateTypeTransition(masm);
1772 }
1773
1774
1775 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1776 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
1777
1778 Register left = r1;
1779 Register right = r0;
1780 Register scratch1 = r4;
1781 Register scratch2 = r9;
1782 Register scratch3 = r5;
1783 LowDwVfpRegister double_scratch = d0;
1784
1785 Register heap_number_result = no_reg;
1786 Register heap_number_map = r6;
1787 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1788
1789 Label call_runtime;
1790 // Labels for type transition, used for wrong input or output types.
1791 // Both label are currently actually bound to the same position. We use two
1792 // different label to differentiate the cause leading to type transition.
1793 Label transition;
1794
1795 // Smi-smi fast case.
1796 Label skip;
1797 __ orr(scratch1, left, right);
1798 __ JumpIfNotSmi(scratch1, &skip);
1799 BinaryOpStub_GenerateSmiSmiOperation(masm, op_, scratch2, scratch3);
1800 // Fall through if the result is not a smi.
1801 __ bind(&skip);
1802
1803 switch (op_) {
1804 case Token::ADD:
1805 case Token::SUB:
1806 case Token::MUL:
1807 case Token::DIV:
1808 case Token::MOD: {
1809 // It could be that only SMIs have been seen at either the left
1810 // or the right operand. For precise type feedback, patch the IC
1811 // again if this changes.
1812 if (left_type_ == BinaryOpIC::SMI) {
1813 __ JumpIfNotSmi(left, &transition);
1814 }
1815 if (right_type_ == BinaryOpIC::SMI) {
1816 __ JumpIfNotSmi(right, &transition);
1817 }
1818 // Load both operands and check that they are 32-bit integer.
1819 // Jump to type transition if they are not. The registers r0 and r1 (right
1820 // and left) are preserved for the runtime call.
1821 __ LoadNumberAsInt32Double(
1822 right, d1, heap_number_map, scratch1, d8, &transition);
1823 __ LoadNumberAsInt32Double(
1824 left, d0, heap_number_map, scratch1, d8, &transition);
1825
1826 if (op_ != Token::MOD) {
1827 Label return_heap_number;
1828 switch (op_) {
1829 case Token::ADD:
1830 __ vadd(d5, d0, d1);
1831 break;
1832 case Token::SUB:
1833 __ vsub(d5, d0, d1);
1834 break;
1835 case Token::MUL:
1836 __ vmul(d5, d0, d1);
1837 break;
1838 case Token::DIV:
1839 __ vdiv(d5, d0, d1);
1840 break;
1841 default:
1842 UNREACHABLE();
1843 }
1844
1845 if (result_type_ <= BinaryOpIC::INT32) {
1846 __ TryDoubleToInt32Exact(scratch1, d5, d8);
1847 // If the ne condition is set, result does
1848 // not fit in a 32-bit integer.
1849 __ b(ne, &transition);
1850 // Try to tag the result as a Smi, return heap number on overflow.
1851 __ SmiTag(scratch1, SetCC);
1852 __ b(vs, &return_heap_number);
1853 // Check for minus zero, transition in that case (because we need
1854 // to return a heap number).
1855 Label not_zero;
1856 ASSERT(kSmiTag == 0);
1857 __ b(ne, &not_zero);
1858 __ VmovHigh(scratch2, d5);
1859 __ tst(scratch2, Operand(HeapNumber::kSignMask));
1860 __ b(ne, &transition);
1861 __ bind(&not_zero);
1862 __ mov(r0, scratch1);
1863 __ Ret();
1864 }
1865
1866 __ bind(&return_heap_number);
1867 // Return a heap number, or fall through to type transition or runtime
1868 // call if we can't.
1869 // We are using vfp registers so r5 is available.
1870 heap_number_result = r5;
1871 BinaryOpStub_GenerateHeapResultAllocation(masm,
1872 heap_number_result,
1873 heap_number_map,
1874 scratch1,
1875 scratch2,
1876 &call_runtime,
1877 mode_);
1878 __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
1879 __ vstr(d5, r0, HeapNumber::kValueOffset);
1880 __ mov(r0, heap_number_result);
1881 __ Ret();
1882
1883 // A DIV operation expecting an integer result falls through
1884 // to type transition.
1885
1886 } else {
1887 if (encoded_right_arg_.has_value) {
1888 __ Vmov(d8, fixed_right_arg_value(), scratch1);
1889 __ VFPCompareAndSetFlags(d1, d8);
1890 __ b(ne, &transition);
1891 }
1892
1893 // Allocate a heap number to store the result.
1894 heap_number_result = r5;
1895 BinaryOpStub_GenerateHeapResultAllocation(masm,
1896 heap_number_result,
1897 heap_number_map,
1898 scratch1,
1899 scratch2,
1900 &call_runtime,
1901 mode_);
1902
1903 // Call the C function to handle the double operation.
1904 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
1905 if (FLAG_debug_code) {
1906 __ stop("Unreachable code.");
1907 }
1908
1909 __ b(&call_runtime);
1910 }
1911
1912 break;
1913 }
1914
1915 case Token::BIT_OR:
1916 case Token::BIT_XOR:
1917 case Token::BIT_AND:
1918 case Token::SAR:
1919 case Token::SHR:
1920 case Token::SHL: {
1921 Label return_heap_number;
1922 // Convert operands to 32-bit integers. Right in r2 and left in r3. The
1923 // registers r0 and r1 (right and left) are preserved for the runtime
1924 // call.
1925 __ LoadNumberAsInt32(left, r3, heap_number_map,
1926 scratch1, d0, d1, &transition);
1927 __ LoadNumberAsInt32(right, r2, heap_number_map,
1928 scratch1, d0, d1, &transition);
1929
1930 // The ECMA-262 standard specifies that, for shift operations, only the
1931 // 5 least significant bits of the shift value should be used.
1932 switch (op_) {
1933 case Token::BIT_OR:
1934 __ orr(r2, r3, Operand(r2));
1935 break;
1936 case Token::BIT_XOR:
1937 __ eor(r2, r3, Operand(r2));
1938 break;
1939 case Token::BIT_AND:
1940 __ and_(r2, r3, Operand(r2));
1941 break;
1942 case Token::SAR:
1943 __ and_(r2, r2, Operand(0x1f));
1944 __ mov(r2, Operand(r3, ASR, r2));
1945 break;
1946 case Token::SHR:
1947 __ and_(r2, r2, Operand(0x1f));
1948 __ mov(r2, Operand(r3, LSR, r2), SetCC);
1949 // SHR is special because it is required to produce a positive answer.
1950 // We only get a negative result if the shift value (r2) is 0.
1951 // This result cannot be respresented as a signed 32-bit integer, try
1952 // to return a heap number if we can.
1953 __ b(mi, (result_type_ <= BinaryOpIC::INT32)
1954 ? &transition
1955 : &return_heap_number);
1956 break;
1957 case Token::SHL:
1958 __ and_(r2, r2, Operand(0x1f));
1959 __ mov(r2, Operand(r3, LSL, r2));
1960 break;
1961 default:
1962 UNREACHABLE();
1963 }
1964
1965 // Check if the result fits in a smi. If not try to return a heap number.
1966 // (We know the result is an int32).
1967 __ TrySmiTag(r0, r2, &return_heap_number);
1968 __ Ret();
1969
1970 __ bind(&return_heap_number);
1971 heap_number_result = r5;
1972 BinaryOpStub_GenerateHeapResultAllocation(masm,
1973 heap_number_result,
1974 heap_number_map,
1975 scratch1,
1976 scratch2,
1977 &call_runtime,
1978 mode_);
1979
1980 if (op_ != Token::SHR) {
1981 // Convert the result to a floating point value.
1982 __ vmov(double_scratch.low(), r2);
1983 __ vcvt_f64_s32(double_scratch, double_scratch.low());
1984 } else {
1985 // The result must be interpreted as an unsigned 32-bit integer.
1986 __ vmov(double_scratch.low(), r2);
1987 __ vcvt_f64_u32(double_scratch, double_scratch.low());
1988 }
1989
1990 // Store the result.
1991 __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
1992 __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
1993 __ mov(r0, heap_number_result);
1994 __ Ret();
1995
1996 break;
1997 }
1998
1999 default:
2000 UNREACHABLE();
2001 }
2002
2003 // We never expect DIV to yield an integer result, so we always generate
2004 // type transition code for DIV operations expecting an integer result: the
2005 // code will fall through to this type transition.
2006 if (transition.is_linked() ||
2007 ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
2008 __ bind(&transition);
2009 GenerateTypeTransition(masm);
2010 }
2011
2012 __ bind(&call_runtime);
2013 {
2014 FrameScope scope(masm, StackFrame::INTERNAL);
2015 GenerateRegisterArgsPush(masm);
2016 GenerateCallRuntime(masm);
2017 }
2018 __ Ret();
2019 }
2020
2021
2022 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
2023 Label call_runtime;
2024
2025 if (op_ == Token::ADD) {
2026 // Handle string addition here, because it is the only operation
2027 // that does not do a ToNumber conversion on the operands.
2028 GenerateAddStrings(masm);
2029 }
2030
2031 // Convert oddball arguments to numbers.
2032 Label check, done;
2033 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
2034 __ b(ne, &check);
2035 if (Token::IsBitOp(op_)) {
2036 __ mov(r1, Operand(Smi::FromInt(0)));
2037 } else {
2038 __ LoadRoot(r1, Heap::kNanValueRootIndex);
2039 }
2040 __ jmp(&done);
2041 __ bind(&check);
2042 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
2043 __ b(ne, &done);
2044 if (Token::IsBitOp(op_)) {
2045 __ mov(r0, Operand(Smi::FromInt(0)));
2046 } else {
2047 __ LoadRoot(r0, Heap::kNanValueRootIndex);
2048 }
2049 __ bind(&done);
2050
2051 GenerateNumberStub(masm);
2052 }
2053
2054
2055 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
2056 Label call_runtime, transition;
2057 BinaryOpStub_GenerateFPOperation(
2058 masm, left_type_, right_type_, false,
2059 &transition, &call_runtime, &transition, op_, mode_, r6, r4, r5, r9);
2060
2061 __ bind(&transition);
2062 GenerateTypeTransition(masm);
2063
2064 __ bind(&call_runtime);
2065 {
2066 FrameScope scope(masm, StackFrame::INTERNAL);
2067 GenerateRegisterArgsPush(masm);
2068 GenerateCallRuntime(masm);
2069 }
2070 __ Ret();
2071 }
2072
2073
2074 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2075 Label call_runtime, call_string_add_or_runtime, transition;
2076
2077 BinaryOpStub_GenerateSmiCode(
2078 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_,
2079 r5, r6, r4, r9);
2080
2081 BinaryOpStub_GenerateFPOperation(
2082 masm, left_type_, right_type_, false,
2083 &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_, r6,
2084 r4, r5, r9);
2085
2086 __ bind(&transition);
2087 GenerateTypeTransition(masm);
2088
2089 __ bind(&call_string_add_or_runtime);
2090 if (op_ == Token::ADD) {
2091 GenerateAddStrings(masm);
2092 }
2093
2094 __ bind(&call_runtime);
2095 {
2096 FrameScope scope(masm, StackFrame::INTERNAL);
2097 GenerateRegisterArgsPush(masm);
2098 GenerateCallRuntime(masm);
2099 }
2100 __ Ret();
2101 }
2102
2103
2104 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
2105 ASSERT(op_ == Token::ADD);
2106 Label left_not_string, call_runtime;
2107
2108 Register left = r1;
2109 Register right = r0;
2110
2111 // Check if left argument is a string.
2112 __ JumpIfSmi(left, &left_not_string);
2113 __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
2114 __ b(ge, &left_not_string);
2115
2116 StringAddStub string_add_left_stub(
2117 (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
2118 GenerateRegisterArgsPush(masm);
2119 __ TailCallStub(&string_add_left_stub);
2120
2121 // Left operand is not a string, test right.
2122 __ bind(&left_not_string);
2123 __ JumpIfSmi(right, &call_runtime);
2124 __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
2125 __ b(ge, &call_runtime);
2126
2127 StringAddStub string_add_right_stub(
2128 (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
2129 GenerateRegisterArgsPush(masm);
2130 __ TailCallStub(&string_add_right_stub);
2131
2132 // At least one argument is not a string.
2133 __ bind(&call_runtime);
2134 }
2135
2136
2137 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
2138 Register result,
2139 Register heap_number_map,
2140 Register scratch1,
2141 Register scratch2,
2142 Label* gc_required,
2143 OverwriteMode mode) {
2144 // Code below will scratch result if allocation fails. To keep both arguments
2145 // intact for the runtime call result cannot be one of these.
2146 ASSERT(!result.is(r0) && !result.is(r1));
2147
2148 if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
2149 Label skip_allocation, allocated;
2150 Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0;
2151 // If the overwritable operand is already an object, we skip the
2152 // allocation of a heap number.
2153 __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
2154 // Allocate a heap number for the result.
2155 __ AllocateHeapNumber(
2156 result, scratch1, scratch2, heap_number_map, gc_required);
2157 __ b(&allocated);
2158 __ bind(&skip_allocation);
2159 // Use object holding the overwritable operand for result.
2160 __ mov(result, Operand(overwritable_operand));
2161 __ bind(&allocated);
2162 } else {
2163 ASSERT(mode == NO_OVERWRITE);
2164 __ AllocateHeapNumber(
2165 result, scratch1, scratch2, heap_number_map, gc_required);
2166 }
2167 }
2168
2169
2170 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
2171 __ Push(r1, r0);
2172 }
2173
2174
1200 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { 2175 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1201 // Untagged case: double input in d2, double result goes 2176 // Untagged case: double input in d2, double result goes
1202 // into d2. 2177 // into d2.
1203 // Tagged case: tagged input on top of stack and in r0, 2178 // Tagged case: tagged input on top of stack and in r0,
1204 // tagged result (heap number) goes into r0. 2179 // tagged result (heap number) goes into r0.
1205 2180
1206 Label input_not_smi; 2181 Label input_not_smi;
1207 Label loaded; 2182 Label loaded;
1208 Label calculate; 2183 Label calculate;
1209 Label invalid_cache; 2184 Label invalid_cache;
(...skipping 422 matching lines...) Expand 10 before | Expand all | Expand 10 after
1632 2607
1633 2608
1634 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { 2609 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1635 CEntryStub::GenerateAheadOfTime(isolate); 2610 CEntryStub::GenerateAheadOfTime(isolate);
1636 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); 2611 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
1637 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); 2612 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1638 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); 2613 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1639 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); 2614 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
1640 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); 2615 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1641 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); 2616 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1642 BinaryOpStub::GenerateAheadOfTime(isolate);
1643 } 2617 }
1644 2618
1645 2619
1646 void CodeStub::GenerateFPStubs(Isolate* isolate) { 2620 void CodeStub::GenerateFPStubs(Isolate* isolate) {
1647 SaveFPRegsMode mode = kSaveFPRegs; 2621 SaveFPRegsMode mode = kSaveFPRegs;
1648 CEntryStub save_doubles(1, mode); 2622 CEntryStub save_doubles(1, mode);
1649 StoreBufferOverflowStub stub(mode); 2623 StoreBufferOverflowStub stub(mode);
1650 // These stubs might already be in the snapshot, detect that and don't 2624 // These stubs might already be in the snapshot, detect that and don't
1651 // regenerate, which would lead to code stub initialization state being messed 2625 // regenerate, which would lead to code stub initialization state being messed
1652 // up. 2626 // up.
(...skipping 4436 matching lines...) Expand 10 before | Expand all | Expand 10 after
6089 __ bind(&fast_elements_case); 7063 __ bind(&fast_elements_case);
6090 GenerateCase(masm, FAST_ELEMENTS); 7064 GenerateCase(masm, FAST_ELEMENTS);
6091 } 7065 }
6092 7066
6093 7067
6094 #undef __ 7068 #undef __
6095 7069
6096 } } // namespace v8::internal 7070 } } // namespace v8::internal
6097 7071
6098 #endif // V8_TARGET_ARCH_ARM 7072 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « no previous file | src/ast.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698