OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1220 argument_count); | 1220 argument_count); |
1221 if (save_doubles_ == kSaveFPRegs) { | 1221 if (save_doubles_ == kSaveFPRegs) { |
1222 __ MultiPopFPU(kCallerSavedFPU); | 1222 __ MultiPopFPU(kCallerSavedFPU); |
1223 } | 1223 } |
1224 | 1224 |
1225 __ MultiPop(kJSCallerSaved | ra.bit()); | 1225 __ MultiPop(kJSCallerSaved | ra.bit()); |
1226 __ Ret(); | 1226 __ Ret(); |
1227 } | 1227 } |
1228 | 1228 |
1229 | 1229 |
1230 // Generates code to call a C function to do a double operation. | 1230 void BinaryOpStub::InitializeInterfaceDescriptor( |
1231 // This code never falls through, but returns with a heap number containing | 1231 Isolate* isolate, |
1232 // the result in v0. | 1232 CodeStubInterfaceDescriptor* descriptor) { |
1233 // Register heap_number_result must be a heap number in which the | 1233 static Register registers[] = { a1, a0 }; |
1234 // result of the operation will be stored. | 1234 descriptor->register_param_count_ = 2; |
1235 // Requires the following layout on entry: | 1235 descriptor->register_params_ = registers; |
1236 // a0: Left value (least significant part of mantissa). | 1236 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); |
1237 // a1: Left value (sign, exponent, top of mantissa). | 1237 descriptor->SetMissHandler( |
1238 // a2: Right value (least significant part of mantissa). | 1238 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); |
1239 // a3: Right value (sign, exponent, top of mantissa). | |
1240 static void CallCCodeForDoubleOperation(MacroAssembler* masm, | |
1241 Token::Value op, | |
1242 Register heap_number_result, | |
1243 Register scratch) { | |
1244 // Assert that heap_number_result is saved. | |
1245 // We currently always use s0 to pass it. | |
1246 ASSERT(heap_number_result.is(s0)); | |
1247 | |
1248 // Push the current return address before the C call. | |
1249 __ push(ra); | |
1250 __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. | |
1251 { | |
1252 AllowExternalCallThatCantCauseGC scope(masm); | |
1253 __ CallCFunction( | |
1254 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); | |
1255 } | |
1256 // Store answer in the overwritable heap number. | |
1257 // Double returned in register f0. | |
1258 __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); | |
1259 // Place heap_number_result in v0 and return to the pushed return address. | |
1260 __ pop(ra); | |
1261 __ Ret(USE_DELAY_SLOT); | |
1262 __ mov(v0, heap_number_result); | |
1263 } | 1239 } |
1264 | 1240 |
1265 | 1241 |
1266 void BinaryOpStub::Initialize() { | |
1267 platform_specific_bit_ = true; // FPU is a base requirement for V8. | |
1268 } | |
1269 | |
1270 | |
1271 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | |
1272 Label get_result; | |
1273 | |
1274 __ Push(a1, a0); | |
1275 | |
1276 __ li(a2, Operand(Smi::FromInt(MinorKey()))); | |
1277 __ push(a2); | |
1278 | |
1279 __ TailCallExternalReference( | |
1280 ExternalReference(IC_Utility(IC::kBinaryOp_Patch), | |
1281 masm->isolate()), | |
1282 3, | |
1283 1); | |
1284 } | |
1285 | |
1286 | |
1287 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( | |
1288 MacroAssembler* masm) { | |
1289 UNIMPLEMENTED(); | |
1290 } | |
1291 | |
1292 | |
1293 void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, | |
1294 Token::Value op) { | |
1295 Register left = a1; | |
1296 Register right = a0; | |
1297 | |
1298 Register scratch1 = t0; | |
1299 Register scratch2 = t1; | |
1300 | |
1301 ASSERT(right.is(a0)); | |
1302 STATIC_ASSERT(kSmiTag == 0); | |
1303 | |
1304 Label not_smi_result; | |
1305 switch (op) { | |
1306 case Token::ADD: | |
1307 __ AdduAndCheckForOverflow(v0, left, right, scratch1); | |
1308 __ RetOnNoOverflow(scratch1); | |
1309 // No need to revert anything - right and left are intact. | |
1310 break; | |
1311 case Token::SUB: | |
1312 __ SubuAndCheckForOverflow(v0, left, right, scratch1); | |
1313 __ RetOnNoOverflow(scratch1); | |
1314 // No need to revert anything - right and left are intact. | |
1315 break; | |
1316 case Token::MUL: { | |
1317 // Remove tag from one of the operands. This way the multiplication result | |
1318 // will be a smi if it fits the smi range. | |
1319 __ SmiUntag(scratch1, right); | |
1320 // Do multiplication. | |
1321 // lo = lower 32 bits of scratch1 * left. | |
1322 // hi = higher 32 bits of scratch1 * left. | |
1323 __ Mult(left, scratch1); | |
1324 // Check for overflowing the smi range - no overflow if higher 33 bits of | |
1325 // the result are identical. | |
1326 __ mflo(scratch1); | |
1327 __ mfhi(scratch2); | |
1328 __ sra(scratch1, scratch1, 31); | |
1329 __ Branch(¬_smi_result, ne, scratch1, Operand(scratch2)); | |
1330 // Go slow on zero result to handle -0. | |
1331 __ mflo(v0); | |
1332 __ Ret(ne, v0, Operand(zero_reg)); | |
1333 // We need -0 if we were multiplying a negative number with 0 to get 0. | |
1334 // We know one of them was zero. | |
1335 __ Addu(scratch2, right, left); | |
1336 Label skip; | |
1337 // ARM uses the 'pl' condition, which is 'ge'. | |
1338 // Negating it results in 'lt'. | |
1339 __ Branch(&skip, lt, scratch2, Operand(zero_reg)); | |
1340 ASSERT(Smi::FromInt(0) == 0); | |
1341 __ Ret(USE_DELAY_SLOT); | |
1342 __ mov(v0, zero_reg); // Return smi 0 if the non-zero one was positive. | |
1343 __ bind(&skip); | |
1344 // We fall through here if we multiplied a negative number with 0, because | |
1345 // that would mean we should produce -0. | |
1346 } | |
1347 break; | |
1348 case Token::DIV: { | |
1349 Label done; | |
1350 __ SmiUntag(scratch2, right); | |
1351 __ SmiUntag(scratch1, left); | |
1352 __ Div(scratch1, scratch2); | |
1353 // A minor optimization: div may be calculated asynchronously, so we check | |
1354 // for division by zero before getting the result. | |
1355 __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg)); | |
1356 // If the result is 0, we need to make sure the dividsor (right) is | |
1357 // positive, otherwise it is a -0 case. | |
1358 // Quotient is in 'lo', remainder is in 'hi'. | |
1359 // Check for no remainder first. | |
1360 __ mfhi(scratch1); | |
1361 __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg)); | |
1362 __ mflo(scratch1); | |
1363 __ Branch(&done, ne, scratch1, Operand(zero_reg)); | |
1364 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); | |
1365 __ bind(&done); | |
1366 // Check that the signed result fits in a Smi. | |
1367 __ Addu(scratch2, scratch1, Operand(0x40000000)); | |
1368 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); | |
1369 __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. | |
1370 __ SmiTag(v0, scratch1); | |
1371 } | |
1372 break; | |
1373 case Token::MOD: { | |
1374 Label done; | |
1375 __ SmiUntag(scratch2, right); | |
1376 __ SmiUntag(scratch1, left); | |
1377 __ Div(scratch1, scratch2); | |
1378 // A minor optimization: div may be calculated asynchronously, so we check | |
1379 // for division by 0 before calling mfhi. | |
1380 // Check for zero on the right hand side. | |
1381 __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg)); | |
1382 // If the result is 0, we need to make sure the dividend (left) is | |
1383 // positive (or 0), otherwise it is a -0 case. | |
1384 // Remainder is in 'hi'. | |
1385 __ mfhi(scratch2); | |
1386 __ Branch(&done, ne, scratch2, Operand(zero_reg)); | |
1387 __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg)); | |
1388 __ bind(&done); | |
1389 // Check that the signed result fits in a Smi. | |
1390 __ Addu(scratch1, scratch2, Operand(0x40000000)); | |
1391 __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg)); | |
1392 __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. | |
1393 __ SmiTag(v0, scratch2); | |
1394 } | |
1395 break; | |
1396 case Token::BIT_OR: | |
1397 __ Ret(USE_DELAY_SLOT); | |
1398 __ or_(v0, left, right); | |
1399 break; | |
1400 case Token::BIT_AND: | |
1401 __ Ret(USE_DELAY_SLOT); | |
1402 __ and_(v0, left, right); | |
1403 break; | |
1404 case Token::BIT_XOR: | |
1405 __ Ret(USE_DELAY_SLOT); | |
1406 __ xor_(v0, left, right); | |
1407 break; | |
1408 case Token::SAR: | |
1409 // Remove tags from right operand. | |
1410 __ GetLeastBitsFromSmi(scratch1, right, 5); | |
1411 __ srav(scratch1, left, scratch1); | |
1412 // Smi tag result. | |
1413 __ And(v0, scratch1, ~kSmiTagMask); | |
1414 __ Ret(); | |
1415 break; | |
1416 case Token::SHR: | |
1417 // Remove tags from operands. We can't do this on a 31 bit number | |
1418 // because then the 0s get shifted into bit 30 instead of bit 31. | |
1419 __ SmiUntag(scratch1, left); | |
1420 __ GetLeastBitsFromSmi(scratch2, right, 5); | |
1421 __ srlv(v0, scratch1, scratch2); | |
1422 // Unsigned shift is not allowed to produce a negative number, so | |
1423 // check the sign bit and the sign bit after Smi tagging. | |
1424 __ And(scratch1, v0, Operand(0xc0000000)); | |
1425 __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg)); | |
1426 // Smi tag result. | |
1427 __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. | |
1428 __ SmiTag(v0); | |
1429 break; | |
1430 case Token::SHL: | |
1431 // Remove tags from operands. | |
1432 __ SmiUntag(scratch1, left); | |
1433 __ GetLeastBitsFromSmi(scratch2, right, 5); | |
1434 __ sllv(scratch1, scratch1, scratch2); | |
1435 // Check that the signed result fits in a Smi. | |
1436 __ Addu(scratch2, scratch1, Operand(0x40000000)); | |
1437 __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); | |
1438 __ Ret(USE_DELAY_SLOT); | |
1439 __ SmiTag(v0, scratch1); // SmiTag emits one instruction in delay slot. | |
1440 break; | |
1441 default: | |
1442 UNREACHABLE(); | |
1443 } | |
1444 __ bind(¬_smi_result); | |
1445 } | |
1446 | |
1447 | |
1448 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, | |
1449 Register result, | |
1450 Register heap_number_map, | |
1451 Register scratch1, | |
1452 Register scratch2, | |
1453 Label* gc_required, | |
1454 OverwriteMode mode); | |
1455 | |
1456 | |
1457 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, | |
1458 BinaryOpIC::TypeInfo left_type, | |
1459 BinaryOpIC::TypeInfo right_type, | |
1460 bool smi_operands, | |
1461 Label* not_numbers, | |
1462 Label* gc_required, | |
1463 Label* miss, | |
1464 Token::Value op, | |
1465 OverwriteMode mode) { | |
1466 Register left = a1; | |
1467 Register right = a0; | |
1468 Register scratch1 = t3; | |
1469 Register scratch2 = t5; | |
1470 | |
1471 ASSERT(smi_operands || (not_numbers != NULL)); | |
1472 if (smi_operands) { | |
1473 __ AssertSmi(left); | |
1474 __ AssertSmi(right); | |
1475 } | |
1476 if (left_type == BinaryOpIC::SMI) { | |
1477 __ JumpIfNotSmi(left, miss); | |
1478 } | |
1479 if (right_type == BinaryOpIC::SMI) { | |
1480 __ JumpIfNotSmi(right, miss); | |
1481 } | |
1482 | |
1483 Register heap_number_map = t2; | |
1484 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
1485 | |
1486 switch (op) { | |
1487 case Token::ADD: | |
1488 case Token::SUB: | |
1489 case Token::MUL: | |
1490 case Token::DIV: | |
1491 case Token::MOD: { | |
1492 // Allocate new heap number for result. | |
1493 Register result = s0; | |
1494 BinaryOpStub_GenerateHeapResultAllocation( | |
1495 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); | |
1496 | |
1497 // Load left and right operands into f12 and f14. | |
1498 if (smi_operands) { | |
1499 __ SmiUntag(scratch1, a0); | |
1500 __ mtc1(scratch1, f14); | |
1501 __ cvt_d_w(f14, f14); | |
1502 __ SmiUntag(scratch1, a1); | |
1503 __ mtc1(scratch1, f12); | |
1504 __ cvt_d_w(f12, f12); | |
1505 } else { | |
1506 // Load right operand to f14. | |
1507 if (right_type == BinaryOpIC::INT32) { | |
1508 __ LoadNumberAsInt32Double( | |
1509 right, f14, heap_number_map, scratch1, scratch2, f2, miss); | |
1510 } else { | |
1511 Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; | |
1512 __ LoadNumber(right, f14, heap_number_map, scratch1, fail); | |
1513 } | |
1514 // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it | |
1515 // jumps to |miss|. | |
1516 if (left_type == BinaryOpIC::INT32) { | |
1517 __ LoadNumberAsInt32Double( | |
1518 left, f12, heap_number_map, scratch1, scratch2, f2, miss); | |
1519 } else { | |
1520 Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; | |
1521 __ LoadNumber(left, f12, heap_number_map, scratch1, fail); | |
1522 } | |
1523 } | |
1524 | |
1525 // Calculate the result. | |
1526 if (op != Token::MOD) { | |
1527 // Using FPU registers: | |
1528 // f12: Left value. | |
1529 // f14: Right value. | |
1530 switch (op) { | |
1531 case Token::ADD: | |
1532 __ add_d(f10, f12, f14); | |
1533 break; | |
1534 case Token::SUB: | |
1535 __ sub_d(f10, f12, f14); | |
1536 break; | |
1537 case Token::MUL: | |
1538 __ mul_d(f10, f12, f14); | |
1539 break; | |
1540 case Token::DIV: | |
1541 __ div_d(f10, f12, f14); | |
1542 break; | |
1543 default: | |
1544 UNREACHABLE(); | |
1545 } | |
1546 | |
1547 // ARM uses a workaround here because of the unaligned HeapNumber | |
1548 // kValueOffset. On MIPS this workaround is built into sdc1 so | |
1549 // there's no point in generating even more instructions. | |
1550 __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset)); | |
1551 __ Ret(USE_DELAY_SLOT); | |
1552 __ mov(v0, result); | |
1553 } else { | |
1554 // Call the C function to handle the double operation. | |
1555 CallCCodeForDoubleOperation(masm, op, result, scratch1); | |
1556 if (FLAG_debug_code) { | |
1557 __ stop("Unreachable code."); | |
1558 } | |
1559 } | |
1560 break; | |
1561 } | |
1562 case Token::BIT_OR: | |
1563 case Token::BIT_XOR: | |
1564 case Token::BIT_AND: | |
1565 case Token::SAR: | |
1566 case Token::SHR: | |
1567 case Token::SHL: { | |
1568 if (smi_operands) { | |
1569 __ SmiUntag(a3, left); | |
1570 __ SmiUntag(a2, right); | |
1571 } else { | |
1572 // Convert operands to 32-bit integers. Right in a2 and left in a3. | |
1573 __ TruncateNumberToI(left, a3, heap_number_map, scratch1, not_numbers); | |
1574 __ TruncateNumberToI(right, a2, heap_number_map, scratch1, not_numbers); | |
1575 } | |
1576 Label result_not_a_smi; | |
1577 switch (op) { | |
1578 case Token::BIT_OR: | |
1579 __ Or(a2, a3, Operand(a2)); | |
1580 break; | |
1581 case Token::BIT_XOR: | |
1582 __ Xor(a2, a3, Operand(a2)); | |
1583 break; | |
1584 case Token::BIT_AND: | |
1585 __ And(a2, a3, Operand(a2)); | |
1586 break; | |
1587 case Token::SAR: | |
1588 // Use only the 5 least significant bits of the shift count. | |
1589 __ GetLeastBitsFromInt32(a2, a2, 5); | |
1590 __ srav(a2, a3, a2); | |
1591 break; | |
1592 case Token::SHR: | |
1593 // Use only the 5 least significant bits of the shift count. | |
1594 __ GetLeastBitsFromInt32(a2, a2, 5); | |
1595 __ srlv(a2, a3, a2); | |
1596 // SHR is special because it is required to produce a positive answer. | |
1597 // The code below for writing into heap numbers isn't capable of | |
1598 // writing the register as an unsigned int so we go to slow case if we | |
1599 // hit this case. | |
1600 __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg)); | |
1601 break; | |
1602 case Token::SHL: | |
1603 // Use only the 5 least significant bits of the shift count. | |
1604 __ GetLeastBitsFromInt32(a2, a2, 5); | |
1605 __ sllv(a2, a3, a2); | |
1606 break; | |
1607 default: | |
1608 UNREACHABLE(); | |
1609 } | |
1610 // Check that the *signed* result fits in a smi. | |
1611 __ Addu(a3, a2, Operand(0x40000000)); | |
1612 __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg)); | |
1613 __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. | |
1614 __ SmiTag(v0, a2); | |
1615 | |
1616 // Allocate new heap number for result. | |
1617 __ bind(&result_not_a_smi); | |
1618 Register result = t1; | |
1619 if (smi_operands) { | |
1620 __ AllocateHeapNumber( | |
1621 result, scratch1, scratch2, heap_number_map, gc_required); | |
1622 } else { | |
1623 BinaryOpStub_GenerateHeapResultAllocation( | |
1624 masm, result, heap_number_map, scratch1, scratch2, gc_required, | |
1625 mode); | |
1626 } | |
1627 | |
1628 // a2: Answer as signed int32. | |
1629 // t1: Heap number to write answer into. | |
1630 | |
1631 // Nothing can go wrong now, so move the heap number to v0, which is the | |
1632 // result. | |
1633 __ mov(v0, t1); | |
1634 // Convert the int32 in a2 to the heap number in a0. As | |
1635 // mentioned above SHR needs to always produce a positive result. | |
1636 __ mtc1(a2, f0); | |
1637 if (op == Token::SHR) { | |
1638 __ Cvt_d_uw(f0, f0, f22); | |
1639 } else { | |
1640 __ cvt_d_w(f0, f0); | |
1641 } | |
1642 // ARM uses a workaround here because of the unaligned HeapNumber | |
1643 // kValueOffset. On MIPS this workaround is built into sdc1 so | |
1644 // there's no point in generating even more instructions. | |
1645 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); | |
1646 __ Ret(); | |
1647 break; | |
1648 } | |
1649 default: | |
1650 UNREACHABLE(); | |
1651 } | |
1652 } | |
1653 | |
1654 | |
1655 // Generate the smi code. If the operation on smis are successful this return is | |
1656 // generated. If the result is not a smi and heap number allocation is not | |
1657 // requested the code falls through. If number allocation is requested but a | |
1658 // heap number cannot be allocated the code jumps to the label gc_required. | |
1659 void BinaryOpStub_GenerateSmiCode( | |
1660 MacroAssembler* masm, | |
1661 Label* use_runtime, | |
1662 Label* gc_required, | |
1663 Token::Value op, | |
1664 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, | |
1665 OverwriteMode mode) { | |
1666 Label not_smis; | |
1667 | |
1668 Register left = a1; | |
1669 Register right = a0; | |
1670 Register scratch1 = t3; | |
1671 | |
1672 // Perform combined smi check on both operands. | |
1673 __ Or(scratch1, left, Operand(right)); | |
1674 STATIC_ASSERT(kSmiTag == 0); | |
1675 __ JumpIfNotSmi(scratch1, ¬_smis); | |
1676 | |
1677 // If the smi-smi operation results in a smi return is generated. | |
1678 BinaryOpStub_GenerateSmiSmiOperation(masm, op); | |
1679 | |
1680 // If heap number results are possible generate the result in an allocated | |
1681 // heap number. | |
1682 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { | |
1683 BinaryOpStub_GenerateFPOperation( | |
1684 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, | |
1685 use_runtime, gc_required, ¬_smis, op, mode); | |
1686 } | |
1687 __ bind(¬_smis); | |
1688 } | |
1689 | |
1690 | |
1691 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | |
1692 Label right_arg_changed, call_runtime; | |
1693 | |
1694 if (op_ == Token::MOD && encoded_right_arg_.has_value) { | |
1695 // It is guaranteed that the value will fit into a Smi, because if it | |
1696 // didn't, we wouldn't be here, see BinaryOp_Patch. | |
1697 __ Branch(&right_arg_changed, | |
1698 ne, | |
1699 a0, | |
1700 Operand(Smi::FromInt(fixed_right_arg_value()))); | |
1701 } | |
1702 | |
1703 if (result_type_ == BinaryOpIC::UNINITIALIZED || | |
1704 result_type_ == BinaryOpIC::SMI) { | |
1705 // Only allow smi results. | |
1706 BinaryOpStub_GenerateSmiCode( | |
1707 masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); | |
1708 } else { | |
1709 // Allow heap number result and don't make a transition if a heap number | |
1710 // cannot be allocated. | |
1711 BinaryOpStub_GenerateSmiCode( | |
1712 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, | |
1713 mode_); | |
1714 } | |
1715 | |
1716 // Code falls through if the result is not returned as either a smi or heap | |
1717 // number. | |
1718 __ bind(&right_arg_changed); | |
1719 GenerateTypeTransition(masm); | |
1720 | |
1721 __ bind(&call_runtime); | |
1722 { | |
1723 FrameScope scope(masm, StackFrame::INTERNAL); | |
1724 GenerateRegisterArgsPush(masm); | |
1725 GenerateCallRuntime(masm); | |
1726 } | |
1727 __ Ret(); | |
1728 } | |
1729 | |
1730 | |
1731 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { | |
1732 Label call_runtime; | |
1733 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); | |
1734 ASSERT(op_ == Token::ADD); | |
1735 // If both arguments are strings, call the string add stub. | |
1736 // Otherwise, do a transition. | |
1737 | |
1738 // Registers containing left and right operands respectively. | |
1739 Register left = a1; | |
1740 Register right = a0; | |
1741 | |
1742 // Test if left operand is a string. | |
1743 __ JumpIfSmi(left, &call_runtime); | |
1744 __ GetObjectType(left, a2, a2); | |
1745 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); | |
1746 | |
1747 // Test if right operand is a string. | |
1748 __ JumpIfSmi(right, &call_runtime); | |
1749 __ GetObjectType(right, a2, a2); | |
1750 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); | |
1751 | |
1752 StringAddStub string_add_stub( | |
1753 (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME)); | |
1754 GenerateRegisterArgsPush(masm); | |
1755 __ TailCallStub(&string_add_stub); | |
1756 | |
1757 __ bind(&call_runtime); | |
1758 GenerateTypeTransition(masm); | |
1759 } | |
1760 | |
1761 | |
1762 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | |
1763 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); | |
1764 | |
1765 Register left = a1; | |
1766 Register right = a0; | |
1767 Register scratch1 = t3; | |
1768 Register scratch2 = t5; | |
1769 FPURegister double_scratch = f0; | |
1770 FPURegister single_scratch = f6; | |
1771 | |
1772 Register heap_number_result = no_reg; | |
1773 Register heap_number_map = t2; | |
1774 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
1775 | |
1776 Label call_runtime; | |
1777 // Labels for type transition, used for wrong input or output types. | |
1778 // Both label are currently actually bound to the same position. We use two | |
1779 // different label to differentiate the cause leading to type transition. | |
1780 Label transition; | |
1781 | |
1782 // Smi-smi fast case. | |
1783 Label skip; | |
1784 __ Or(scratch1, left, right); | |
1785 __ JumpIfNotSmi(scratch1, &skip); | |
1786 BinaryOpStub_GenerateSmiSmiOperation(masm, op_); | |
1787 // Fall through if the result is not a smi. | |
1788 __ bind(&skip); | |
1789 | |
1790 switch (op_) { | |
1791 case Token::ADD: | |
1792 case Token::SUB: | |
1793 case Token::MUL: | |
1794 case Token::DIV: | |
1795 case Token::MOD: { | |
1796 // It could be that only SMIs have been seen at either the left | |
1797 // or the right operand. For precise type feedback, patch the IC | |
1798 // again if this changes. | |
1799 if (left_type_ == BinaryOpIC::SMI) { | |
1800 __ JumpIfNotSmi(left, &transition); | |
1801 } | |
1802 if (right_type_ == BinaryOpIC::SMI) { | |
1803 __ JumpIfNotSmi(right, &transition); | |
1804 } | |
1805 // Load both operands and check that they are 32-bit integer. | |
1806 // Jump to type transition if they are not. The registers a0 and a1 (right | |
1807 // and left) are preserved for the runtime call. | |
1808 | |
1809 __ LoadNumberAsInt32Double( | |
1810 right, f14, heap_number_map, scratch1, scratch2, f2, &transition); | |
1811 __ LoadNumberAsInt32Double( | |
1812 left, f12, heap_number_map, scratch1, scratch2, f2, &transition); | |
1813 | |
1814 if (op_ != Token::MOD) { | |
1815 Label return_heap_number; | |
1816 switch (op_) { | |
1817 case Token::ADD: | |
1818 __ add_d(f10, f12, f14); | |
1819 break; | |
1820 case Token::SUB: | |
1821 __ sub_d(f10, f12, f14); | |
1822 break; | |
1823 case Token::MUL: | |
1824 __ mul_d(f10, f12, f14); | |
1825 break; | |
1826 case Token::DIV: | |
1827 __ div_d(f10, f12, f14); | |
1828 break; | |
1829 default: | |
1830 UNREACHABLE(); | |
1831 } | |
1832 | |
1833 if (result_type_ <= BinaryOpIC::INT32) { | |
1834 Register except_flag = scratch2; | |
1835 const FPURoundingMode kRoundingMode = op_ == Token::DIV ? | |
1836 kRoundToMinusInf : kRoundToZero; | |
1837 const CheckForInexactConversion kConversion = op_ == Token::DIV ? | |
1838 kCheckForInexactConversion : kDontCheckForInexactConversion; | |
1839 __ EmitFPUTruncate(kRoundingMode, | |
1840 scratch1, | |
1841 f10, | |
1842 at, | |
1843 f16, | |
1844 except_flag, | |
1845 kConversion); | |
1846 // If except_flag != 0, result does not fit in a 32-bit integer. | |
1847 __ Branch(&transition, ne, except_flag, Operand(zero_reg)); | |
1848 // Try to tag the result as a Smi, return heap number on overflow. | |
1849 __ SmiTagCheckOverflow(scratch1, scratch1, scratch2); | |
1850 __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg)); | |
1851 // Check for minus zero, transition in that case (because we need | |
1852 // to return a heap number). | |
1853 Label not_zero; | |
1854 ASSERT(kSmiTag == 0); | |
1855 __ Branch(¬_zero, ne, scratch1, Operand(zero_reg)); | |
1856 __ mfc1(scratch2, f11); | |
1857 __ And(scratch2, scratch2, HeapNumber::kSignMask); | |
1858 __ Branch(&transition, ne, scratch2, Operand(zero_reg)); | |
1859 __ bind(¬_zero); | |
1860 | |
1861 __ Ret(USE_DELAY_SLOT); | |
1862 __ mov(v0, scratch1); | |
1863 } | |
1864 | |
1865 __ bind(&return_heap_number); | |
1866 // Return a heap number, or fall through to type transition or runtime | |
1867 // call if we can't. | |
1868 // We are using FPU registers so s0 is available. | |
1869 heap_number_result = s0; | |
1870 BinaryOpStub_GenerateHeapResultAllocation(masm, | |
1871 heap_number_result, | |
1872 heap_number_map, | |
1873 scratch1, | |
1874 scratch2, | |
1875 &call_runtime, | |
1876 mode_); | |
1877 __ sdc1(f10, | |
1878 FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); | |
1879 __ Ret(USE_DELAY_SLOT); | |
1880 __ mov(v0, heap_number_result); | |
1881 | |
1882 // A DIV operation expecting an integer result falls through | |
1883 // to type transition. | |
1884 | |
1885 } else { | |
1886 if (encoded_right_arg_.has_value) { | |
1887 __ Move(f16, fixed_right_arg_value()); | |
1888 __ BranchF(&transition, NULL, ne, f14, f16); | |
1889 } | |
1890 | |
1891 Label pop_and_call_runtime; | |
1892 | |
1893 // Allocate a heap number to store the result. | |
1894 heap_number_result = s0; | |
1895 BinaryOpStub_GenerateHeapResultAllocation(masm, | |
1896 heap_number_result, | |
1897 heap_number_map, | |
1898 scratch1, | |
1899 scratch2, | |
1900 &pop_and_call_runtime, | |
1901 mode_); | |
1902 | |
1903 // Call the C function to handle the double operation. | |
1904 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); | |
1905 if (FLAG_debug_code) { | |
1906 __ stop("Unreachable code."); | |
1907 } | |
1908 | |
1909 __ bind(&pop_and_call_runtime); | |
1910 __ Drop(2); | |
1911 __ Branch(&call_runtime); | |
1912 } | |
1913 | |
1914 break; | |
1915 } | |
1916 | |
1917 case Token::BIT_OR: | |
1918 case Token::BIT_XOR: | |
1919 case Token::BIT_AND: | |
1920 case Token::SAR: | |
1921 case Token::SHR: | |
1922 case Token::SHL: { | |
1923 Label return_heap_number; | |
1924 // Convert operands to 32-bit integers. Right in a2 and left in a3. The | |
1925 // registers a0 and a1 (right and left) are preserved for the runtime | |
1926 // call. | |
1927 __ LoadNumberAsInt32( | |
1928 left, a3, heap_number_map, scratch1, scratch2, f0, f2, &transition); | |
1929 __ LoadNumberAsInt32( | |
1930 right, a2, heap_number_map, scratch1, scratch2, f0, f2, &transition); | |
1931 | |
1932 // The ECMA-262 standard specifies that, for shift operations, only the | |
1933 // 5 least significant bits of the shift value should be used. | |
1934 switch (op_) { | |
1935 case Token::BIT_OR: | |
1936 __ Or(a2, a3, Operand(a2)); | |
1937 break; | |
1938 case Token::BIT_XOR: | |
1939 __ Xor(a2, a3, Operand(a2)); | |
1940 break; | |
1941 case Token::BIT_AND: | |
1942 __ And(a2, a3, Operand(a2)); | |
1943 break; | |
1944 case Token::SAR: | |
1945 __ And(a2, a2, Operand(0x1f)); | |
1946 __ srav(a2, a3, a2); | |
1947 break; | |
1948 case Token::SHR: | |
1949 __ And(a2, a2, Operand(0x1f)); | |
1950 __ srlv(a2, a3, a2); | |
1951 // SHR is special because it is required to produce a positive answer. | |
1952 // We only get a negative result if the shift value (a2) is 0. | |
1953 // This result cannot be respresented as a signed 32-bit integer, try | |
1954 // to return a heap number if we can. | |
1955 __ Branch((result_type_ <= BinaryOpIC::INT32) | |
1956 ? &transition | |
1957 : &return_heap_number, | |
1958 lt, | |
1959 a2, | |
1960 Operand(zero_reg)); | |
1961 break; | |
1962 case Token::SHL: | |
1963 __ And(a2, a2, Operand(0x1f)); | |
1964 __ sllv(a2, a3, a2); | |
1965 break; | |
1966 default: | |
1967 UNREACHABLE(); | |
1968 } | |
1969 | |
1970 // Check if the result fits in a smi. | |
1971 __ Addu(scratch1, a2, Operand(0x40000000)); | |
1972 // If not try to return a heap number. (We know the result is an int32.) | |
1973 __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg)); | |
1974 // Tag the result and return. | |
1975 __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. | |
1976 __ SmiTag(v0, a2); | |
1977 | |
1978 __ bind(&return_heap_number); | |
1979 heap_number_result = t1; | |
1980 BinaryOpStub_GenerateHeapResultAllocation(masm, | |
1981 heap_number_result, | |
1982 heap_number_map, | |
1983 scratch1, | |
1984 scratch2, | |
1985 &call_runtime, | |
1986 mode_); | |
1987 | |
1988 if (op_ != Token::SHR) { | |
1989 // Convert the result to a floating point value. | |
1990 __ mtc1(a2, double_scratch); | |
1991 __ cvt_d_w(double_scratch, double_scratch); | |
1992 } else { | |
1993 // The result must be interpreted as an unsigned 32-bit integer. | |
1994 __ mtc1(a2, double_scratch); | |
1995 __ Cvt_d_uw(double_scratch, double_scratch, single_scratch); | |
1996 } | |
1997 | |
1998 // Store the result. | |
1999 __ sdc1(double_scratch, | |
2000 FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); | |
2001 __ Ret(USE_DELAY_SLOT); | |
2002 __ mov(v0, heap_number_result); | |
2003 | |
2004 break; | |
2005 } | |
2006 | |
2007 default: | |
2008 UNREACHABLE(); | |
2009 } | |
2010 | |
2011 // We never expect DIV to yield an integer result, so we always generate | |
2012 // type transition code for DIV operations expecting an integer result: the | |
2013 // code will fall through to this type transition. | |
2014 if (transition.is_linked() || | |
2015 ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) { | |
2016 __ bind(&transition); | |
2017 GenerateTypeTransition(masm); | |
2018 } | |
2019 | |
2020 __ bind(&call_runtime); | |
2021 { | |
2022 FrameScope scope(masm, StackFrame::INTERNAL); | |
2023 GenerateRegisterArgsPush(masm); | |
2024 GenerateCallRuntime(masm); | |
2025 } | |
2026 __ Ret(); | |
2027 } | |
2028 | |
2029 | |
2030 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { | |
2031 Label call_runtime; | |
2032 | |
2033 if (op_ == Token::ADD) { | |
2034 // Handle string addition here, because it is the only operation | |
2035 // that does not do a ToNumber conversion on the operands. | |
2036 GenerateAddStrings(masm); | |
2037 } | |
2038 | |
2039 // Convert oddball arguments to numbers. | |
2040 Label check, done; | |
2041 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); | |
2042 __ Branch(&check, ne, a1, Operand(t0)); | |
2043 if (Token::IsBitOp(op_)) { | |
2044 __ li(a1, Operand(Smi::FromInt(0))); | |
2045 } else { | |
2046 __ LoadRoot(a1, Heap::kNanValueRootIndex); | |
2047 } | |
2048 __ jmp(&done); | |
2049 __ bind(&check); | |
2050 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); | |
2051 __ Branch(&done, ne, a0, Operand(t0)); | |
2052 if (Token::IsBitOp(op_)) { | |
2053 __ li(a0, Operand(Smi::FromInt(0))); | |
2054 } else { | |
2055 __ LoadRoot(a0, Heap::kNanValueRootIndex); | |
2056 } | |
2057 __ bind(&done); | |
2058 | |
2059 GenerateNumberStub(masm); | |
2060 } | |
2061 | |
2062 | |
2063 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { | |
2064 Label call_runtime, transition; | |
2065 BinaryOpStub_GenerateFPOperation( | |
2066 masm, left_type_, right_type_, false, | |
2067 &transition, &call_runtime, &transition, op_, mode_); | |
2068 | |
2069 __ bind(&transition); | |
2070 GenerateTypeTransition(masm); | |
2071 | |
2072 __ bind(&call_runtime); | |
2073 { | |
2074 FrameScope scope(masm, StackFrame::INTERNAL); | |
2075 GenerateRegisterArgsPush(masm); | |
2076 GenerateCallRuntime(masm); | |
2077 } | |
2078 __ Ret(); | |
2079 } | |
2080 | |
2081 | |
2082 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | |
2083 Label call_runtime, call_string_add_or_runtime, transition; | |
2084 | |
2085 BinaryOpStub_GenerateSmiCode( | |
2086 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); | |
2087 | |
2088 BinaryOpStub_GenerateFPOperation( | |
2089 masm, left_type_, right_type_, false, | |
2090 &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); | |
2091 | |
2092 __ bind(&transition); | |
2093 GenerateTypeTransition(masm); | |
2094 | |
2095 __ bind(&call_string_add_or_runtime); | |
2096 if (op_ == Token::ADD) { | |
2097 GenerateAddStrings(masm); | |
2098 } | |
2099 | |
2100 __ bind(&call_runtime); | |
2101 { | |
2102 FrameScope scope(masm, StackFrame::INTERNAL); | |
2103 GenerateRegisterArgsPush(masm); | |
2104 GenerateCallRuntime(masm); | |
2105 } | |
2106 __ Ret(); | |
2107 } | |
2108 | |
2109 | |
2110 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { | |
2111 ASSERT(op_ == Token::ADD); | |
2112 Label left_not_string, call_runtime; | |
2113 | |
2114 Register left = a1; | |
2115 Register right = a0; | |
2116 | |
2117 // Check if left argument is a string. | |
2118 __ JumpIfSmi(left, &left_not_string); | |
2119 __ GetObjectType(left, a2, a2); | |
2120 __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE)); | |
2121 | |
2122 StringAddStub string_add_left_stub( | |
2123 (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME)); | |
2124 GenerateRegisterArgsPush(masm); | |
2125 __ TailCallStub(&string_add_left_stub); | |
2126 | |
2127 // Left operand is not a string, test right. | |
2128 __ bind(&left_not_string); | |
2129 __ JumpIfSmi(right, &call_runtime); | |
2130 __ GetObjectType(right, a2, a2); | |
2131 __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); | |
2132 | |
2133 StringAddStub string_add_right_stub( | |
2134 (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME)); | |
2135 GenerateRegisterArgsPush(masm); | |
2136 __ TailCallStub(&string_add_right_stub); | |
2137 | |
2138 // At least one argument is not a string. | |
2139 __ bind(&call_runtime); | |
2140 } | |
2141 | |
2142 | |
2143 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, | |
2144 Register result, | |
2145 Register heap_number_map, | |
2146 Register scratch1, | |
2147 Register scratch2, | |
2148 Label* gc_required, | |
2149 OverwriteMode mode) { | |
2150 // Code below will scratch result if allocation fails. To keep both arguments | |
2151 // intact for the runtime call result cannot be one of these. | |
2152 ASSERT(!result.is(a0) && !result.is(a1)); | |
2153 | |
2154 if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) { | |
2155 Label skip_allocation, allocated; | |
2156 Register overwritable_operand = mode == OVERWRITE_LEFT ? a1 : a0; | |
2157 // If the overwritable operand is already an object, we skip the | |
2158 // allocation of a heap number. | |
2159 __ JumpIfNotSmi(overwritable_operand, &skip_allocation); | |
2160 // Allocate a heap number for the result. | |
2161 __ AllocateHeapNumber( | |
2162 result, scratch1, scratch2, heap_number_map, gc_required); | |
2163 __ Branch(&allocated); | |
2164 __ bind(&skip_allocation); | |
2165 // Use object holding the overwritable operand for result. | |
2166 __ mov(result, overwritable_operand); | |
2167 __ bind(&allocated); | |
2168 } else { | |
2169 ASSERT(mode == NO_OVERWRITE); | |
2170 __ AllocateHeapNumber( | |
2171 result, scratch1, scratch2, heap_number_map, gc_required); | |
2172 } | |
2173 } | |
2174 | |
2175 | |
2176 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { | |
2177 __ Push(a1, a0); | |
2178 } | |
2179 | |
2180 | |
2181 | |
2182 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { | 1242 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
2183 // Untagged case: double input in f4, double result goes | 1243 // Untagged case: double input in f4, double result goes |
2184 // into f4. | 1244 // into f4. |
2185 // Tagged case: tagged input on top of stack and in a0, | 1245 // Tagged case: tagged input on top of stack and in a0, |
2186 // tagged result (heap number) goes into v0. | 1246 // tagged result (heap number) goes into v0. |
2187 | 1247 |
2188 Label input_not_smi; | 1248 Label input_not_smi; |
2189 Label loaded; | 1249 Label loaded; |
2190 Label calculate; | 1250 Label calculate; |
2191 Label invalid_cache; | 1251 Label invalid_cache; |
(...skipping 449 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2641 | 1701 |
2642 | 1702 |
2643 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { | 1703 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { |
2644 CEntryStub::GenerateAheadOfTime(isolate); | 1704 CEntryStub::GenerateAheadOfTime(isolate); |
2645 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); | 1705 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); |
2646 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); | 1706 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); |
2647 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); | 1707 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); |
2648 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); | 1708 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); |
2649 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); | 1709 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); |
2650 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); | 1710 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); |
| 1711 BinaryOpStub::GenerateAheadOfTime(isolate); |
2651 } | 1712 } |
2652 | 1713 |
2653 | 1714 |
2654 void CodeStub::GenerateFPStubs(Isolate* isolate) { | 1715 void CodeStub::GenerateFPStubs(Isolate* isolate) { |
2655 SaveFPRegsMode mode = kSaveFPRegs; | 1716 SaveFPRegsMode mode = kSaveFPRegs; |
2656 CEntryStub save_doubles(1, mode); | 1717 CEntryStub save_doubles(1, mode); |
2657 StoreBufferOverflowStub stub(mode); | 1718 StoreBufferOverflowStub stub(mode); |
2658 // These stubs might already be in the snapshot, detect that and don't | 1719 // These stubs might already be in the snapshot, detect that and don't |
2659 // regenerate, which would lead to code stub initialization state being messed | 1720 // regenerate, which would lead to code stub initialization state being messed |
2660 // up. | 1721 // up. |
(...skipping 4509 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7170 __ bind(&fast_elements_case); | 6231 __ bind(&fast_elements_case); |
7171 GenerateCase(masm, FAST_ELEMENTS); | 6232 GenerateCase(masm, FAST_ELEMENTS); |
7172 } | 6233 } |
7173 | 6234 |
7174 | 6235 |
7175 #undef __ | 6236 #undef __ |
7176 | 6237 |
7177 } } // namespace v8::internal | 6238 } } // namespace v8::internal |
7178 | 6239 |
7179 #endif // V8_TARGET_ARCH_MIPS | 6240 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |