| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 151 static Register registers[] = { r0 }; | 151 static Register registers[] = { r0 }; |
| 152 descriptor->register_param_count_ = 1; | 152 descriptor->register_param_count_ = 1; |
| 153 descriptor->register_params_ = registers; | 153 descriptor->register_params_ = registers; |
| 154 descriptor->deoptimization_handler_ = | 154 descriptor->deoptimization_handler_ = |
| 155 FUNCTION_ADDR(CompareNilIC_Miss); | 155 FUNCTION_ADDR(CompareNilIC_Miss); |
| 156 descriptor->SetMissHandler( | 156 descriptor->SetMissHandler( |
| 157 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate)); | 157 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate)); |
| 158 } | 158 } |
| 159 | 159 |
| 160 | 160 |
| 161 void BinaryOpStub::InitializeInterfaceDescriptor( |
| 162 Isolate* isolate, |
| 163 CodeStubInterfaceDescriptor* descriptor) { |
| 164 static Register registers[] = { r1, r0 }; |
| 165 descriptor->register_param_count_ = 2; |
| 166 descriptor->register_params_ = registers; |
| 167 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); |
| 168 descriptor->SetMissHandler( |
| 169 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); |
| 170 } |
| 171 |
| 172 |
| 161 static void InitializeArrayConstructorDescriptor( | 173 static void InitializeArrayConstructorDescriptor( |
| 162 Isolate* isolate, | 174 Isolate* isolate, |
| 163 CodeStubInterfaceDescriptor* descriptor, | 175 CodeStubInterfaceDescriptor* descriptor, |
| 164 int constant_stack_parameter_count) { | 176 int constant_stack_parameter_count) { |
| 165 // register state | 177 // register state |
| 166 // r0 -- number of arguments | 178 // r0 -- number of arguments |
| 167 // r1 -- function | 179 // r1 -- function |
| 168 // r2 -- type info cell with elements kind | 180 // r2 -- type info cell with elements kind |
| 169 static Register registers[] = { r1, r2 }; | 181 static Register registers[] = { r1, r2 }; |
| 170 descriptor->register_param_count_ = 2; | 182 descriptor->register_param_count_ = 2; |
| (...skipping 1103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1274 __ CallCFunction( | 1286 __ CallCFunction( |
| 1275 ExternalReference::store_buffer_overflow_function(masm->isolate()), | 1287 ExternalReference::store_buffer_overflow_function(masm->isolate()), |
| 1276 argument_count); | 1288 argument_count); |
| 1277 if (save_doubles_ == kSaveFPRegs) { | 1289 if (save_doubles_ == kSaveFPRegs) { |
| 1278 __ RestoreFPRegs(sp, scratch); | 1290 __ RestoreFPRegs(sp, scratch); |
| 1279 } | 1291 } |
| 1280 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). | 1292 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). |
| 1281 } | 1293 } |
| 1282 | 1294 |
| 1283 | 1295 |
| 1284 // Generates code to call a C function to do a double operation. | |
| 1285 // This code never falls through, but returns with a heap number containing | |
| 1286 // the result in r0. | |
| 1287 // Register heapnumber_result must be a heap number in which the | |
| 1288 // result of the operation will be stored. | |
| 1289 // Requires the following layout on entry: | |
| 1290 // d0: Left value. | |
| 1291 // d1: Right value. | |
| 1292 // If soft float ABI, use also r0, r1, r2, r3. | |
| 1293 static void CallCCodeForDoubleOperation(MacroAssembler* masm, | |
| 1294 Token::Value op, | |
| 1295 Register heap_number_result, | |
| 1296 Register scratch) { | |
| 1297 // Assert that heap_number_result is callee-saved. | |
| 1298 // We currently always use r5 to pass it. | |
| 1299 ASSERT(heap_number_result.is(r5)); | |
| 1300 | |
| 1301 // Push the current return address before the C call. Return will be | |
| 1302 // through pop(pc) below. | |
| 1303 __ push(lr); | |
| 1304 __ PrepareCallCFunction(0, 2, scratch); | |
| 1305 if (!masm->use_eabi_hardfloat()) { | |
| 1306 __ vmov(r0, r1, d0); | |
| 1307 __ vmov(r2, r3, d1); | |
| 1308 } | |
| 1309 { | |
| 1310 AllowExternalCallThatCantCauseGC scope(masm); | |
| 1311 __ CallCFunction( | |
| 1312 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); | |
| 1313 } | |
| 1314 // Store answer in the overwritable heap number. Double returned in | |
| 1315 // registers r0 and r1 or in d0. | |
| 1316 if (masm->use_eabi_hardfloat()) { | |
| 1317 __ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); | |
| 1318 } else { | |
| 1319 __ Strd(r0, r1, | |
| 1320 FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); | |
| 1321 } | |
| 1322 // Place heap_number_result in r0 and return to the pushed return address. | |
| 1323 __ mov(r0, Operand(heap_number_result)); | |
| 1324 __ pop(pc); | |
| 1325 } | |
| 1326 | |
| 1327 | |
| 1328 void BinaryOpStub::Initialize() { | |
| 1329 platform_specific_bit_ = true; // VFP2 is a base requirement for V8 | |
| 1330 } | |
| 1331 | |
| 1332 | |
| 1333 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | |
| 1334 Label get_result; | |
| 1335 | |
| 1336 __ Push(r1, r0); | |
| 1337 | |
| 1338 __ mov(r2, Operand(Smi::FromInt(MinorKey()))); | |
| 1339 __ push(r2); | |
| 1340 | |
| 1341 __ TailCallExternalReference( | |
| 1342 ExternalReference(IC_Utility(IC::kBinaryOp_Patch), | |
| 1343 masm->isolate()), | |
| 1344 3, | |
| 1345 1); | |
| 1346 } | |
| 1347 | |
| 1348 | |
| 1349 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( | |
| 1350 MacroAssembler* masm) { | |
| 1351 UNIMPLEMENTED(); | |
| 1352 } | |
| 1353 | |
| 1354 | |
| 1355 void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, | |
| 1356 Token::Value op) { | |
| 1357 Register left = r1; | |
| 1358 Register right = r0; | |
| 1359 Register scratch1 = r7; | |
| 1360 Register scratch2 = r9; | |
| 1361 | |
| 1362 ASSERT(right.is(r0)); | |
| 1363 STATIC_ASSERT(kSmiTag == 0); | |
| 1364 | |
| 1365 Label not_smi_result; | |
| 1366 switch (op) { | |
| 1367 case Token::ADD: | |
| 1368 __ add(right, left, Operand(right), SetCC); // Add optimistically. | |
| 1369 __ Ret(vc); | |
| 1370 __ sub(right, right, Operand(left)); // Revert optimistic add. | |
| 1371 break; | |
| 1372 case Token::SUB: | |
| 1373 __ sub(right, left, Operand(right), SetCC); // Subtract optimistically. | |
| 1374 __ Ret(vc); | |
| 1375 __ sub(right, left, Operand(right)); // Revert optimistic subtract. | |
| 1376 break; | |
| 1377 case Token::MUL: | |
| 1378 // Remove tag from one of the operands. This way the multiplication result | |
| 1379 // will be a smi if it fits the smi range. | |
| 1380 __ SmiUntag(ip, right); | |
| 1381 // Do multiplication | |
| 1382 // scratch1 = lower 32 bits of ip * left. | |
| 1383 // scratch2 = higher 32 bits of ip * left. | |
| 1384 __ smull(scratch1, scratch2, left, ip); | |
| 1385 // Check for overflowing the smi range - no overflow if higher 33 bits of | |
| 1386 // the result are identical. | |
| 1387 __ mov(ip, Operand(scratch1, ASR, 31)); | |
| 1388 __ cmp(ip, Operand(scratch2)); | |
| 1389 __ b(ne, ¬_smi_result); | |
| 1390 // Go slow on zero result to handle -0. | |
| 1391 __ cmp(scratch1, Operand::Zero()); | |
| 1392 __ mov(right, Operand(scratch1), LeaveCC, ne); | |
| 1393 __ Ret(ne); | |
| 1394 // We need -0 if we were multiplying a negative number with 0 to get 0. | |
| 1395 // We know one of them was zero. | |
| 1396 __ add(scratch2, right, Operand(left), SetCC); | |
| 1397 __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl); | |
| 1398 __ Ret(pl); // Return smi 0 if the non-zero one was positive. | |
| 1399 // We fall through here if we multiplied a negative number with 0, because | |
| 1400 // that would mean we should produce -0. | |
| 1401 break; | |
| 1402 case Token::DIV: { | |
| 1403 Label div_with_sdiv; | |
| 1404 | |
| 1405 // Check for 0 divisor. | |
| 1406 __ cmp(right, Operand::Zero()); | |
| 1407 __ b(eq, ¬_smi_result); | |
| 1408 | |
| 1409 // Check for power of two on the right hand side. | |
| 1410 __ sub(scratch1, right, Operand(1)); | |
| 1411 __ tst(scratch1, right); | |
| 1412 if (CpuFeatures::IsSupported(SUDIV)) { | |
| 1413 __ b(ne, &div_with_sdiv); | |
| 1414 // Check for no remainder. | |
| 1415 __ tst(left, scratch1); | |
| 1416 __ b(ne, ¬_smi_result); | |
| 1417 // Check for positive left hand side. | |
| 1418 __ cmp(left, Operand::Zero()); | |
| 1419 __ b(mi, &div_with_sdiv); | |
| 1420 } else { | |
| 1421 __ b(ne, ¬_smi_result); | |
| 1422 // Check for positive and no remainder. | |
| 1423 __ orr(scratch2, scratch1, Operand(0x80000000u)); | |
| 1424 __ tst(left, scratch2); | |
| 1425 __ b(ne, ¬_smi_result); | |
| 1426 } | |
| 1427 | |
| 1428 // Perform division by shifting. | |
| 1429 __ clz(scratch1, scratch1); | |
| 1430 __ rsb(scratch1, scratch1, Operand(31)); | |
| 1431 __ mov(right, Operand(left, LSR, scratch1)); | |
| 1432 __ Ret(); | |
| 1433 | |
| 1434 if (CpuFeatures::IsSupported(SUDIV)) { | |
| 1435 CpuFeatureScope scope(masm, SUDIV); | |
| 1436 Label result_not_zero; | |
| 1437 | |
| 1438 __ bind(&div_with_sdiv); | |
| 1439 // Do division. | |
| 1440 __ sdiv(scratch1, left, right); | |
| 1441 // Check that the remainder is zero. | |
| 1442 __ mls(scratch2, scratch1, right, left); | |
| 1443 __ cmp(scratch2, Operand::Zero()); | |
| 1444 __ b(ne, ¬_smi_result); | |
| 1445 // Check for negative zero result. | |
| 1446 __ cmp(scratch1, Operand::Zero()); | |
| 1447 __ b(ne, &result_not_zero); | |
| 1448 __ cmp(right, Operand::Zero()); | |
| 1449 __ b(lt, ¬_smi_result); | |
| 1450 __ bind(&result_not_zero); | |
| 1451 // Check for the corner case of dividing the most negative smi by -1. | |
| 1452 __ cmp(scratch1, Operand(0x40000000)); | |
| 1453 __ b(eq, ¬_smi_result); | |
| 1454 // Tag and return the result. | |
| 1455 __ SmiTag(right, scratch1); | |
| 1456 __ Ret(); | |
| 1457 } | |
| 1458 break; | |
| 1459 } | |
| 1460 case Token::MOD: { | |
| 1461 Label modulo_with_sdiv; | |
| 1462 | |
| 1463 if (CpuFeatures::IsSupported(SUDIV)) { | |
| 1464 // Check for x % 0. | |
| 1465 __ cmp(right, Operand::Zero()); | |
| 1466 __ b(eq, ¬_smi_result); | |
| 1467 | |
| 1468 // Check for two positive smis. | |
| 1469 __ orr(scratch1, left, Operand(right)); | |
| 1470 __ tst(scratch1, Operand(0x80000000u)); | |
| 1471 __ b(ne, &modulo_with_sdiv); | |
| 1472 | |
| 1473 // Check for power of two on the right hand side. | |
| 1474 __ sub(scratch1, right, Operand(1)); | |
| 1475 __ tst(scratch1, right); | |
| 1476 __ b(ne, &modulo_with_sdiv); | |
| 1477 } else { | |
| 1478 // Check for two positive smis. | |
| 1479 __ orr(scratch1, left, Operand(right)); | |
| 1480 __ tst(scratch1, Operand(0x80000000u)); | |
| 1481 __ b(ne, ¬_smi_result); | |
| 1482 | |
| 1483 // Check for power of two on the right hand side. | |
| 1484 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); | |
| 1485 } | |
| 1486 | |
| 1487 // Perform modulus by masking (scratch1 contains right - 1). | |
| 1488 __ and_(right, left, Operand(scratch1)); | |
| 1489 __ Ret(); | |
| 1490 | |
| 1491 if (CpuFeatures::IsSupported(SUDIV)) { | |
| 1492 CpuFeatureScope scope(masm, SUDIV); | |
| 1493 __ bind(&modulo_with_sdiv); | |
| 1494 __ mov(scratch2, right); | |
| 1495 // Perform modulus with sdiv and mls. | |
| 1496 __ sdiv(scratch1, left, right); | |
| 1497 __ mls(right, scratch1, right, left); | |
| 1498 // Return if the result is not 0. | |
| 1499 __ cmp(right, Operand::Zero()); | |
| 1500 __ Ret(ne); | |
| 1501 // The result is 0, check for -0 case. | |
| 1502 __ cmp(left, Operand::Zero()); | |
| 1503 __ Ret(pl); | |
| 1504 // This is a -0 case, restore the value of right. | |
| 1505 __ mov(right, scratch2); | |
| 1506 // We fall through here to not_smi_result to produce -0. | |
| 1507 } | |
| 1508 break; | |
| 1509 } | |
| 1510 case Token::BIT_OR: | |
| 1511 __ orr(right, left, Operand(right)); | |
| 1512 __ Ret(); | |
| 1513 break; | |
| 1514 case Token::BIT_AND: | |
| 1515 __ and_(right, left, Operand(right)); | |
| 1516 __ Ret(); | |
| 1517 break; | |
| 1518 case Token::BIT_XOR: | |
| 1519 __ eor(right, left, Operand(right)); | |
| 1520 __ Ret(); | |
| 1521 break; | |
| 1522 case Token::SAR: | |
| 1523 // Remove tags from right operand. | |
| 1524 __ GetLeastBitsFromSmi(scratch1, right, 5); | |
| 1525 __ mov(right, Operand(left, ASR, scratch1)); | |
| 1526 // Smi tag result. | |
| 1527 __ bic(right, right, Operand(kSmiTagMask)); | |
| 1528 __ Ret(); | |
| 1529 break; | |
| 1530 case Token::SHR: | |
| 1531 // Remove tags from operands. We can't do this on a 31 bit number | |
| 1532 // because then the 0s get shifted into bit 30 instead of bit 31. | |
| 1533 __ SmiUntag(scratch1, left); | |
| 1534 __ GetLeastBitsFromSmi(scratch2, right, 5); | |
| 1535 __ mov(scratch1, Operand(scratch1, LSR, scratch2)); | |
| 1536 // Unsigned shift is not allowed to produce a negative number, so | |
| 1537 // check the sign bit and the sign bit after Smi tagging. | |
| 1538 __ tst(scratch1, Operand(0xc0000000)); | |
| 1539 __ b(ne, ¬_smi_result); | |
| 1540 // Smi tag result. | |
| 1541 __ SmiTag(right, scratch1); | |
| 1542 __ Ret(); | |
| 1543 break; | |
| 1544 case Token::SHL: | |
| 1545 // Remove tags from operands. | |
| 1546 __ SmiUntag(scratch1, left); | |
| 1547 __ GetLeastBitsFromSmi(scratch2, right, 5); | |
| 1548 __ mov(scratch1, Operand(scratch1, LSL, scratch2)); | |
| 1549 // Check that the signed result fits in a Smi. | |
| 1550 __ TrySmiTag(right, scratch1, ¬_smi_result); | |
| 1551 __ Ret(); | |
| 1552 break; | |
| 1553 default: | |
| 1554 UNREACHABLE(); | |
| 1555 } | |
| 1556 __ bind(¬_smi_result); | |
| 1557 } | |
| 1558 | |
| 1559 | |
| 1560 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, | |
| 1561 Register result, | |
| 1562 Register heap_number_map, | |
| 1563 Register scratch1, | |
| 1564 Register scratch2, | |
| 1565 Label* gc_required, | |
| 1566 OverwriteMode mode); | |
| 1567 | |
| 1568 | |
| 1569 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, | |
| 1570 BinaryOpIC::TypeInfo left_type, | |
| 1571 BinaryOpIC::TypeInfo right_type, | |
| 1572 bool smi_operands, | |
| 1573 Label* not_numbers, | |
| 1574 Label* gc_required, | |
| 1575 Label* miss, | |
| 1576 Token::Value op, | |
| 1577 OverwriteMode mode) { | |
| 1578 Register left = r1; | |
| 1579 Register right = r0; | |
| 1580 Register scratch1 = r6; | |
| 1581 Register scratch2 = r7; | |
| 1582 | |
| 1583 ASSERT(smi_operands || (not_numbers != NULL)); | |
| 1584 if (smi_operands) { | |
| 1585 __ AssertSmi(left); | |
| 1586 __ AssertSmi(right); | |
| 1587 } | |
| 1588 if (left_type == BinaryOpIC::SMI) { | |
| 1589 __ JumpIfNotSmi(left, miss); | |
| 1590 } | |
| 1591 if (right_type == BinaryOpIC::SMI) { | |
| 1592 __ JumpIfNotSmi(right, miss); | |
| 1593 } | |
| 1594 | |
| 1595 Register heap_number_map = r9; | |
| 1596 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
| 1597 | |
| 1598 switch (op) { | |
| 1599 case Token::ADD: | |
| 1600 case Token::SUB: | |
| 1601 case Token::MUL: | |
| 1602 case Token::DIV: | |
| 1603 case Token::MOD: { | |
| 1604 // Allocate new heap number for result. | |
| 1605 Register result = r5; | |
| 1606 BinaryOpStub_GenerateHeapResultAllocation( | |
| 1607 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); | |
| 1608 | |
| 1609 // Load left and right operands into d0 and d1. | |
| 1610 if (smi_operands) { | |
| 1611 __ SmiToDouble(d1, right); | |
| 1612 __ SmiToDouble(d0, left); | |
| 1613 } else { | |
| 1614 // Load right operand into d1. | |
| 1615 if (right_type == BinaryOpIC::INT32) { | |
| 1616 __ LoadNumberAsInt32Double( | |
| 1617 right, d1, heap_number_map, scratch1, d8, miss); | |
| 1618 } else { | |
| 1619 Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; | |
| 1620 __ LoadNumber(right, d1, heap_number_map, scratch1, fail); | |
| 1621 } | |
| 1622 // Load left operand into d0. | |
| 1623 if (left_type == BinaryOpIC::INT32) { | |
| 1624 __ LoadNumberAsInt32Double( | |
| 1625 left, d0, heap_number_map, scratch1, d8, miss); | |
| 1626 } else { | |
| 1627 Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; | |
| 1628 __ LoadNumber( | |
| 1629 left, d0, heap_number_map, scratch1, fail); | |
| 1630 } | |
| 1631 } | |
| 1632 | |
| 1633 // Calculate the result. | |
| 1634 if (op != Token::MOD) { | |
| 1635 // Using VFP registers: | |
| 1636 // d0: Left value | |
| 1637 // d1: Right value | |
| 1638 switch (op) { | |
| 1639 case Token::ADD: | |
| 1640 __ vadd(d5, d0, d1); | |
| 1641 break; | |
| 1642 case Token::SUB: | |
| 1643 __ vsub(d5, d0, d1); | |
| 1644 break; | |
| 1645 case Token::MUL: | |
| 1646 __ vmul(d5, d0, d1); | |
| 1647 break; | |
| 1648 case Token::DIV: | |
| 1649 __ vdiv(d5, d0, d1); | |
| 1650 break; | |
| 1651 default: | |
| 1652 UNREACHABLE(); | |
| 1653 } | |
| 1654 | |
| 1655 __ sub(r0, result, Operand(kHeapObjectTag)); | |
| 1656 __ vstr(d5, r0, HeapNumber::kValueOffset); | |
| 1657 __ add(r0, r0, Operand(kHeapObjectTag)); | |
| 1658 __ Ret(); | |
| 1659 } else { | |
| 1660 // Call the C function to handle the double operation. | |
| 1661 CallCCodeForDoubleOperation(masm, op, result, scratch1); | |
| 1662 if (FLAG_debug_code) { | |
| 1663 __ stop("Unreachable code."); | |
| 1664 } | |
| 1665 } | |
| 1666 break; | |
| 1667 } | |
| 1668 case Token::BIT_OR: | |
| 1669 case Token::BIT_XOR: | |
| 1670 case Token::BIT_AND: | |
| 1671 case Token::SAR: | |
| 1672 case Token::SHR: | |
| 1673 case Token::SHL: { | |
| 1674 if (smi_operands) { | |
| 1675 __ SmiUntag(r3, left); | |
| 1676 __ SmiUntag(r2, right); | |
| 1677 } else { | |
| 1678 // Convert operands to 32-bit integers. Right in r2 and left in r3. | |
| 1679 __ TruncateNumberToI(left, r3, heap_number_map, scratch1, not_numbers); | |
| 1680 __ TruncateNumberToI(right, r2, heap_number_map, scratch1, not_numbers); | |
| 1681 } | |
| 1682 | |
| 1683 Label result_not_a_smi; | |
| 1684 switch (op) { | |
| 1685 case Token::BIT_OR: | |
| 1686 __ orr(r2, r3, Operand(r2)); | |
| 1687 break; | |
| 1688 case Token::BIT_XOR: | |
| 1689 __ eor(r2, r3, Operand(r2)); | |
| 1690 break; | |
| 1691 case Token::BIT_AND: | |
| 1692 __ and_(r2, r3, Operand(r2)); | |
| 1693 break; | |
| 1694 case Token::SAR: | |
| 1695 // Use only the 5 least significant bits of the shift count. | |
| 1696 __ GetLeastBitsFromInt32(r2, r2, 5); | |
| 1697 __ mov(r2, Operand(r3, ASR, r2)); | |
| 1698 break; | |
| 1699 case Token::SHR: | |
| 1700 // Use only the 5 least significant bits of the shift count. | |
| 1701 __ GetLeastBitsFromInt32(r2, r2, 5); | |
| 1702 __ mov(r2, Operand(r3, LSR, r2), SetCC); | |
| 1703 // SHR is special because it is required to produce a positive answer. | |
| 1704 // The code below for writing into heap numbers isn't capable of | |
| 1705 // writing the register as an unsigned int so we go to slow case if we | |
| 1706 // hit this case. | |
| 1707 __ b(mi, &result_not_a_smi); | |
| 1708 break; | |
| 1709 case Token::SHL: | |
| 1710 // Use only the 5 least significant bits of the shift count. | |
| 1711 __ GetLeastBitsFromInt32(r2, r2, 5); | |
| 1712 __ mov(r2, Operand(r3, LSL, r2)); | |
| 1713 break; | |
| 1714 default: | |
| 1715 UNREACHABLE(); | |
| 1716 } | |
| 1717 | |
| 1718 // Check that the *signed* result fits in a smi. | |
| 1719 __ TrySmiTag(r0, r2, &result_not_a_smi); | |
| 1720 __ Ret(); | |
| 1721 | |
| 1722 // Allocate new heap number for result. | |
| 1723 __ bind(&result_not_a_smi); | |
| 1724 Register result = r5; | |
| 1725 if (smi_operands) { | |
| 1726 __ AllocateHeapNumber( | |
| 1727 result, scratch1, scratch2, heap_number_map, gc_required); | |
| 1728 } else { | |
| 1729 BinaryOpStub_GenerateHeapResultAllocation( | |
| 1730 masm, result, heap_number_map, scratch1, scratch2, gc_required, | |
| 1731 mode); | |
| 1732 } | |
| 1733 | |
| 1734 // r2: Answer as signed int32. | |
| 1735 // r5: Heap number to write answer into. | |
| 1736 | |
| 1737 // Nothing can go wrong now, so move the heap number to r0, which is the | |
| 1738 // result. | |
| 1739 __ mov(r0, Operand(r5)); | |
| 1740 | |
| 1741 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As | |
| 1742 // mentioned above SHR needs to always produce a positive result. | |
| 1743 __ vmov(s0, r2); | |
| 1744 if (op == Token::SHR) { | |
| 1745 __ vcvt_f64_u32(d0, s0); | |
| 1746 } else { | |
| 1747 __ vcvt_f64_s32(d0, s0); | |
| 1748 } | |
| 1749 __ sub(r3, r0, Operand(kHeapObjectTag)); | |
| 1750 __ vstr(d0, r3, HeapNumber::kValueOffset); | |
| 1751 __ Ret(); | |
| 1752 break; | |
| 1753 } | |
| 1754 default: | |
| 1755 UNREACHABLE(); | |
| 1756 } | |
| 1757 } | |
| 1758 | |
| 1759 | |
| 1760 // Generate the smi code. If the operation on smis are successful this return is | |
| 1761 // generated. If the result is not a smi and heap number allocation is not | |
| 1762 // requested the code falls through. If number allocation is requested but a | |
| 1763 // heap number cannot be allocated the code jumps to the label gc_required. | |
| 1764 void BinaryOpStub_GenerateSmiCode( | |
| 1765 MacroAssembler* masm, | |
| 1766 Label* use_runtime, | |
| 1767 Label* gc_required, | |
| 1768 Token::Value op, | |
| 1769 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, | |
| 1770 OverwriteMode mode) { | |
| 1771 Label not_smis; | |
| 1772 | |
| 1773 Register left = r1; | |
| 1774 Register right = r0; | |
| 1775 Register scratch1 = r7; | |
| 1776 | |
| 1777 // Perform combined smi check on both operands. | |
| 1778 __ orr(scratch1, left, Operand(right)); | |
| 1779 __ JumpIfNotSmi(scratch1, ¬_smis); | |
| 1780 | |
| 1781 // If the smi-smi operation results in a smi return is generated. | |
| 1782 BinaryOpStub_GenerateSmiSmiOperation(masm, op); | |
| 1783 | |
| 1784 // If heap number results are possible generate the result in an allocated | |
| 1785 // heap number. | |
| 1786 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { | |
| 1787 BinaryOpStub_GenerateFPOperation( | |
| 1788 masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, | |
| 1789 use_runtime, gc_required, ¬_smis, op, mode); | |
| 1790 } | |
| 1791 __ bind(¬_smis); | |
| 1792 } | |
| 1793 | |
| 1794 | |
| 1795 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | |
| 1796 Label right_arg_changed, call_runtime; | |
| 1797 | |
| 1798 if (op_ == Token::MOD && encoded_right_arg_.has_value) { | |
| 1799 // It is guaranteed that the value will fit into a Smi, because if it | |
| 1800 // didn't, we wouldn't be here, see BinaryOp_Patch. | |
| 1801 __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value()))); | |
| 1802 __ b(ne, &right_arg_changed); | |
| 1803 } | |
| 1804 | |
| 1805 if (result_type_ == BinaryOpIC::UNINITIALIZED || | |
| 1806 result_type_ == BinaryOpIC::SMI) { | |
| 1807 // Only allow smi results. | |
| 1808 BinaryOpStub_GenerateSmiCode( | |
| 1809 masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); | |
| 1810 } else { | |
| 1811 // Allow heap number result and don't make a transition if a heap number | |
| 1812 // cannot be allocated. | |
| 1813 BinaryOpStub_GenerateSmiCode( | |
| 1814 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, | |
| 1815 mode_); | |
| 1816 } | |
| 1817 | |
| 1818 // Code falls through if the result is not returned as either a smi or heap | |
| 1819 // number. | |
| 1820 __ bind(&right_arg_changed); | |
| 1821 GenerateTypeTransition(masm); | |
| 1822 | |
| 1823 __ bind(&call_runtime); | |
| 1824 { | |
| 1825 FrameScope scope(masm, StackFrame::INTERNAL); | |
| 1826 GenerateRegisterArgsPush(masm); | |
| 1827 GenerateCallRuntime(masm); | |
| 1828 } | |
| 1829 __ Ret(); | |
| 1830 } | |
| 1831 | |
| 1832 | |
| 1833 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { | |
| 1834 Label call_runtime; | |
| 1835 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); | |
| 1836 ASSERT(op_ == Token::ADD); | |
| 1837 // If both arguments are strings, call the string add stub. | |
| 1838 // Otherwise, do a transition. | |
| 1839 | |
| 1840 // Registers containing left and right operands respectively. | |
| 1841 Register left = r1; | |
| 1842 Register right = r0; | |
| 1843 | |
| 1844 // Test if left operand is a string. | |
| 1845 __ JumpIfSmi(left, &call_runtime); | |
| 1846 __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); | |
| 1847 __ b(ge, &call_runtime); | |
| 1848 | |
| 1849 // Test if right operand is a string. | |
| 1850 __ JumpIfSmi(right, &call_runtime); | |
| 1851 __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); | |
| 1852 __ b(ge, &call_runtime); | |
| 1853 | |
| 1854 StringAddStub string_add_stub( | |
| 1855 (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME)); | |
| 1856 GenerateRegisterArgsPush(masm); | |
| 1857 __ TailCallStub(&string_add_stub); | |
| 1858 | |
| 1859 __ bind(&call_runtime); | |
| 1860 GenerateTypeTransition(masm); | |
| 1861 } | |
| 1862 | |
| 1863 | |
| 1864 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | |
| 1865 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); | |
| 1866 | |
| 1867 Register left = r1; | |
| 1868 Register right = r0; | |
| 1869 Register scratch1 = r7; | |
| 1870 Register scratch2 = r9; | |
| 1871 LowDwVfpRegister double_scratch = d0; | |
| 1872 | |
| 1873 Register heap_number_result = no_reg; | |
| 1874 Register heap_number_map = r6; | |
| 1875 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
| 1876 | |
| 1877 Label call_runtime; | |
| 1878 // Labels for type transition, used for wrong input or output types. | |
| 1879 // Both label are currently actually bound to the same position. We use two | |
| 1880 // different label to differentiate the cause leading to type transition. | |
| 1881 Label transition; | |
| 1882 | |
| 1883 // Smi-smi fast case. | |
| 1884 Label skip; | |
| 1885 __ orr(scratch1, left, right); | |
| 1886 __ JumpIfNotSmi(scratch1, &skip); | |
| 1887 BinaryOpStub_GenerateSmiSmiOperation(masm, op_); | |
| 1888 // Fall through if the result is not a smi. | |
| 1889 __ bind(&skip); | |
| 1890 | |
| 1891 switch (op_) { | |
| 1892 case Token::ADD: | |
| 1893 case Token::SUB: | |
| 1894 case Token::MUL: | |
| 1895 case Token::DIV: | |
| 1896 case Token::MOD: { | |
| 1897 // It could be that only SMIs have been seen at either the left | |
| 1898 // or the right operand. For precise type feedback, patch the IC | |
| 1899 // again if this changes. | |
| 1900 if (left_type_ == BinaryOpIC::SMI) { | |
| 1901 __ JumpIfNotSmi(left, &transition); | |
| 1902 } | |
| 1903 if (right_type_ == BinaryOpIC::SMI) { | |
| 1904 __ JumpIfNotSmi(right, &transition); | |
| 1905 } | |
| 1906 // Load both operands and check that they are 32-bit integer. | |
| 1907 // Jump to type transition if they are not. The registers r0 and r1 (right | |
| 1908 // and left) are preserved for the runtime call. | |
| 1909 __ LoadNumberAsInt32Double( | |
| 1910 right, d1, heap_number_map, scratch1, d8, &transition); | |
| 1911 __ LoadNumberAsInt32Double( | |
| 1912 left, d0, heap_number_map, scratch1, d8, &transition); | |
| 1913 | |
| 1914 if (op_ != Token::MOD) { | |
| 1915 Label return_heap_number; | |
| 1916 switch (op_) { | |
| 1917 case Token::ADD: | |
| 1918 __ vadd(d5, d0, d1); | |
| 1919 break; | |
| 1920 case Token::SUB: | |
| 1921 __ vsub(d5, d0, d1); | |
| 1922 break; | |
| 1923 case Token::MUL: | |
| 1924 __ vmul(d5, d0, d1); | |
| 1925 break; | |
| 1926 case Token::DIV: | |
| 1927 __ vdiv(d5, d0, d1); | |
| 1928 break; | |
| 1929 default: | |
| 1930 UNREACHABLE(); | |
| 1931 } | |
| 1932 | |
| 1933 if (result_type_ <= BinaryOpIC::INT32) { | |
| 1934 __ TryDoubleToInt32Exact(scratch1, d5, d8); | |
| 1935 // If the ne condition is set, result does | |
| 1936 // not fit in a 32-bit integer. | |
| 1937 __ b(ne, &transition); | |
| 1938 // Try to tag the result as a Smi, return heap number on overflow. | |
| 1939 __ SmiTag(scratch1, SetCC); | |
| 1940 __ b(vs, &return_heap_number); | |
| 1941 // Check for minus zero, transition in that case (because we need | |
| 1942 // to return a heap number). | |
| 1943 Label not_zero; | |
| 1944 ASSERT(kSmiTag == 0); | |
| 1945 __ b(ne, ¬_zero); | |
| 1946 __ VmovHigh(scratch2, d5); | |
| 1947 __ tst(scratch2, Operand(HeapNumber::kSignMask)); | |
| 1948 __ b(ne, &transition); | |
| 1949 __ bind(¬_zero); | |
| 1950 __ mov(r0, scratch1); | |
| 1951 __ Ret(); | |
| 1952 } | |
| 1953 | |
| 1954 __ bind(&return_heap_number); | |
| 1955 // Return a heap number, or fall through to type transition or runtime | |
| 1956 // call if we can't. | |
| 1957 // We are using vfp registers so r5 is available. | |
| 1958 heap_number_result = r5; | |
| 1959 BinaryOpStub_GenerateHeapResultAllocation(masm, | |
| 1960 heap_number_result, | |
| 1961 heap_number_map, | |
| 1962 scratch1, | |
| 1963 scratch2, | |
| 1964 &call_runtime, | |
| 1965 mode_); | |
| 1966 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); | |
| 1967 __ vstr(d5, r0, HeapNumber::kValueOffset); | |
| 1968 __ mov(r0, heap_number_result); | |
| 1969 __ Ret(); | |
| 1970 | |
| 1971 // A DIV operation expecting an integer result falls through | |
| 1972 // to type transition. | |
| 1973 | |
| 1974 } else { | |
| 1975 if (encoded_right_arg_.has_value) { | |
| 1976 __ Vmov(d8, fixed_right_arg_value(), scratch1); | |
| 1977 __ VFPCompareAndSetFlags(d1, d8); | |
| 1978 __ b(ne, &transition); | |
| 1979 } | |
| 1980 | |
| 1981 // We preserved r0 and r1 to be able to call runtime. | |
| 1982 // Save the left value on the stack. | |
| 1983 __ Push(r5, r4); | |
| 1984 | |
| 1985 Label pop_and_call_runtime; | |
| 1986 | |
| 1987 // Allocate a heap number to store the result. | |
| 1988 heap_number_result = r5; | |
| 1989 BinaryOpStub_GenerateHeapResultAllocation(masm, | |
| 1990 heap_number_result, | |
| 1991 heap_number_map, | |
| 1992 scratch1, | |
| 1993 scratch2, | |
| 1994 &pop_and_call_runtime, | |
| 1995 mode_); | |
| 1996 | |
| 1997 // Load the left value from the value saved on the stack. | |
| 1998 __ Pop(r1, r0); | |
| 1999 | |
| 2000 // Call the C function to handle the double operation. | |
| 2001 CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); | |
| 2002 if (FLAG_debug_code) { | |
| 2003 __ stop("Unreachable code."); | |
| 2004 } | |
| 2005 | |
| 2006 __ bind(&pop_and_call_runtime); | |
| 2007 __ Drop(2); | |
| 2008 __ b(&call_runtime); | |
| 2009 } | |
| 2010 | |
| 2011 break; | |
| 2012 } | |
| 2013 | |
| 2014 case Token::BIT_OR: | |
| 2015 case Token::BIT_XOR: | |
| 2016 case Token::BIT_AND: | |
| 2017 case Token::SAR: | |
| 2018 case Token::SHR: | |
| 2019 case Token::SHL: { | |
| 2020 Label return_heap_number; | |
| 2021 // Convert operands to 32-bit integers. Right in r2 and left in r3. The | |
| 2022 // registers r0 and r1 (right and left) are preserved for the runtime | |
| 2023 // call. | |
| 2024 __ LoadNumberAsInt32(left, r3, heap_number_map, | |
| 2025 scratch1, d0, d1, &transition); | |
| 2026 __ LoadNumberAsInt32(right, r2, heap_number_map, | |
| 2027 scratch1, d0, d1, &transition); | |
| 2028 | |
| 2029 // The ECMA-262 standard specifies that, for shift operations, only the | |
| 2030 // 5 least significant bits of the shift value should be used. | |
| 2031 switch (op_) { | |
| 2032 case Token::BIT_OR: | |
| 2033 __ orr(r2, r3, Operand(r2)); | |
| 2034 break; | |
| 2035 case Token::BIT_XOR: | |
| 2036 __ eor(r2, r3, Operand(r2)); | |
| 2037 break; | |
| 2038 case Token::BIT_AND: | |
| 2039 __ and_(r2, r3, Operand(r2)); | |
| 2040 break; | |
| 2041 case Token::SAR: | |
| 2042 __ and_(r2, r2, Operand(0x1f)); | |
| 2043 __ mov(r2, Operand(r3, ASR, r2)); | |
| 2044 break; | |
| 2045 case Token::SHR: | |
| 2046 __ and_(r2, r2, Operand(0x1f)); | |
| 2047 __ mov(r2, Operand(r3, LSR, r2), SetCC); | |
| 2048 // SHR is special because it is required to produce a positive answer. | |
| 2049 // We only get a negative result if the shift value (r2) is 0. | |
| 2050 // This result cannot be respresented as a signed 32-bit integer, try | |
| 2051 // to return a heap number if we can. | |
| 2052 __ b(mi, (result_type_ <= BinaryOpIC::INT32) | |
| 2053 ? &transition | |
| 2054 : &return_heap_number); | |
| 2055 break; | |
| 2056 case Token::SHL: | |
| 2057 __ and_(r2, r2, Operand(0x1f)); | |
| 2058 __ mov(r2, Operand(r3, LSL, r2)); | |
| 2059 break; | |
| 2060 default: | |
| 2061 UNREACHABLE(); | |
| 2062 } | |
| 2063 | |
| 2064 // Check if the result fits in a smi. If not try to return a heap number. | |
| 2065 // (We know the result is an int32). | |
| 2066 __ TrySmiTag(r0, r2, &return_heap_number); | |
| 2067 __ Ret(); | |
| 2068 | |
| 2069 __ bind(&return_heap_number); | |
| 2070 heap_number_result = r5; | |
| 2071 BinaryOpStub_GenerateHeapResultAllocation(masm, | |
| 2072 heap_number_result, | |
| 2073 heap_number_map, | |
| 2074 scratch1, | |
| 2075 scratch2, | |
| 2076 &call_runtime, | |
| 2077 mode_); | |
| 2078 | |
| 2079 if (op_ != Token::SHR) { | |
| 2080 // Convert the result to a floating point value. | |
| 2081 __ vmov(double_scratch.low(), r2); | |
| 2082 __ vcvt_f64_s32(double_scratch, double_scratch.low()); | |
| 2083 } else { | |
| 2084 // The result must be interpreted as an unsigned 32-bit integer. | |
| 2085 __ vmov(double_scratch.low(), r2); | |
| 2086 __ vcvt_f64_u32(double_scratch, double_scratch.low()); | |
| 2087 } | |
| 2088 | |
| 2089 // Store the result. | |
| 2090 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); | |
| 2091 __ vstr(double_scratch, r0, HeapNumber::kValueOffset); | |
| 2092 __ mov(r0, heap_number_result); | |
| 2093 __ Ret(); | |
| 2094 | |
| 2095 break; | |
| 2096 } | |
| 2097 | |
| 2098 default: | |
| 2099 UNREACHABLE(); | |
| 2100 } | |
| 2101 | |
| 2102 // We never expect DIV to yield an integer result, so we always generate | |
| 2103 // type transition code for DIV operations expecting an integer result: the | |
| 2104 // code will fall through to this type transition. | |
| 2105 if (transition.is_linked() || | |
| 2106 ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) { | |
| 2107 __ bind(&transition); | |
| 2108 GenerateTypeTransition(masm); | |
| 2109 } | |
| 2110 | |
| 2111 __ bind(&call_runtime); | |
| 2112 { | |
| 2113 FrameScope scope(masm, StackFrame::INTERNAL); | |
| 2114 GenerateRegisterArgsPush(masm); | |
| 2115 GenerateCallRuntime(masm); | |
| 2116 } | |
| 2117 __ Ret(); | |
| 2118 } | |
| 2119 | |
| 2120 | |
| 2121 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { | |
| 2122 Label call_runtime; | |
| 2123 | |
| 2124 if (op_ == Token::ADD) { | |
| 2125 // Handle string addition here, because it is the only operation | |
| 2126 // that does not do a ToNumber conversion on the operands. | |
| 2127 GenerateAddStrings(masm); | |
| 2128 } | |
| 2129 | |
| 2130 // Convert oddball arguments to numbers. | |
| 2131 Label check, done; | |
| 2132 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex); | |
| 2133 __ b(ne, &check); | |
| 2134 if (Token::IsBitOp(op_)) { | |
| 2135 __ mov(r1, Operand(Smi::FromInt(0))); | |
| 2136 } else { | |
| 2137 __ LoadRoot(r1, Heap::kNanValueRootIndex); | |
| 2138 } | |
| 2139 __ jmp(&done); | |
| 2140 __ bind(&check); | |
| 2141 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); | |
| 2142 __ b(ne, &done); | |
| 2143 if (Token::IsBitOp(op_)) { | |
| 2144 __ mov(r0, Operand(Smi::FromInt(0))); | |
| 2145 } else { | |
| 2146 __ LoadRoot(r0, Heap::kNanValueRootIndex); | |
| 2147 } | |
| 2148 __ bind(&done); | |
| 2149 | |
| 2150 GenerateNumberStub(masm); | |
| 2151 } | |
| 2152 | |
| 2153 | |
| 2154 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { | |
| 2155 Label call_runtime, transition; | |
| 2156 BinaryOpStub_GenerateFPOperation( | |
| 2157 masm, left_type_, right_type_, false, | |
| 2158 &transition, &call_runtime, &transition, op_, mode_); | |
| 2159 | |
| 2160 __ bind(&transition); | |
| 2161 GenerateTypeTransition(masm); | |
| 2162 | |
| 2163 __ bind(&call_runtime); | |
| 2164 { | |
| 2165 FrameScope scope(masm, StackFrame::INTERNAL); | |
| 2166 GenerateRegisterArgsPush(masm); | |
| 2167 GenerateCallRuntime(masm); | |
| 2168 } | |
| 2169 __ Ret(); | |
| 2170 } | |
| 2171 | |
| 2172 | |
| 2173 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | |
| 2174 Label call_runtime, call_string_add_or_runtime, transition; | |
| 2175 | |
| 2176 BinaryOpStub_GenerateSmiCode( | |
| 2177 masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); | |
| 2178 | |
| 2179 BinaryOpStub_GenerateFPOperation( | |
| 2180 masm, left_type_, right_type_, false, | |
| 2181 &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); | |
| 2182 | |
| 2183 __ bind(&transition); | |
| 2184 GenerateTypeTransition(masm); | |
| 2185 | |
| 2186 __ bind(&call_string_add_or_runtime); | |
| 2187 if (op_ == Token::ADD) { | |
| 2188 GenerateAddStrings(masm); | |
| 2189 } | |
| 2190 | |
| 2191 __ bind(&call_runtime); | |
| 2192 { | |
| 2193 FrameScope scope(masm, StackFrame::INTERNAL); | |
| 2194 GenerateRegisterArgsPush(masm); | |
| 2195 GenerateCallRuntime(masm); | |
| 2196 } | |
| 2197 __ Ret(); | |
| 2198 } | |
| 2199 | |
| 2200 | |
| 2201 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { | |
| 2202 ASSERT(op_ == Token::ADD); | |
| 2203 Label left_not_string, call_runtime; | |
| 2204 | |
| 2205 Register left = r1; | |
| 2206 Register right = r0; | |
| 2207 | |
| 2208 // Check if left argument is a string. | |
| 2209 __ JumpIfSmi(left, &left_not_string); | |
| 2210 __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); | |
| 2211 __ b(ge, &left_not_string); | |
| 2212 | |
| 2213 StringAddStub string_add_left_stub( | |
| 2214 (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME)); | |
| 2215 GenerateRegisterArgsPush(masm); | |
| 2216 __ TailCallStub(&string_add_left_stub); | |
| 2217 | |
| 2218 // Left operand is not a string, test right. | |
| 2219 __ bind(&left_not_string); | |
| 2220 __ JumpIfSmi(right, &call_runtime); | |
| 2221 __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); | |
| 2222 __ b(ge, &call_runtime); | |
| 2223 | |
| 2224 StringAddStub string_add_right_stub( | |
| 2225 (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME)); | |
| 2226 GenerateRegisterArgsPush(masm); | |
| 2227 __ TailCallStub(&string_add_right_stub); | |
| 2228 | |
| 2229 // At least one argument is not a string. | |
| 2230 __ bind(&call_runtime); | |
| 2231 } | |
| 2232 | |
| 2233 | |
| 2234 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, | |
| 2235 Register result, | |
| 2236 Register heap_number_map, | |
| 2237 Register scratch1, | |
| 2238 Register scratch2, | |
| 2239 Label* gc_required, | |
| 2240 OverwriteMode mode) { | |
| 2241 // Code below will scratch result if allocation fails. To keep both arguments | |
| 2242 // intact for the runtime call result cannot be one of these. | |
| 2243 ASSERT(!result.is(r0) && !result.is(r1)); | |
| 2244 | |
| 2245 if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) { | |
| 2246 Label skip_allocation, allocated; | |
| 2247 Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0; | |
| 2248 // If the overwritable operand is already an object, we skip the | |
| 2249 // allocation of a heap number. | |
| 2250 __ JumpIfNotSmi(overwritable_operand, &skip_allocation); | |
| 2251 // Allocate a heap number for the result. | |
| 2252 __ AllocateHeapNumber( | |
| 2253 result, scratch1, scratch2, heap_number_map, gc_required); | |
| 2254 __ b(&allocated); | |
| 2255 __ bind(&skip_allocation); | |
| 2256 // Use object holding the overwritable operand for result. | |
| 2257 __ mov(result, Operand(overwritable_operand)); | |
| 2258 __ bind(&allocated); | |
| 2259 } else { | |
| 2260 ASSERT(mode == NO_OVERWRITE); | |
| 2261 __ AllocateHeapNumber( | |
| 2262 result, scratch1, scratch2, heap_number_map, gc_required); | |
| 2263 } | |
| 2264 } | |
| 2265 | |
| 2266 | |
| 2267 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { | |
| 2268 __ Push(r1, r0); | |
| 2269 } | |
| 2270 | |
| 2271 | |
| 2272 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { | 1296 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
| 2273 // Untagged case: double input in d2, double result goes | 1297 // Untagged case: double input in d2, double result goes |
| 2274 // into d2. | 1298 // into d2. |
| 2275 // Tagged case: tagged input on top of stack and in r0, | 1299 // Tagged case: tagged input on top of stack and in r0, |
| 2276 // tagged result (heap number) goes into r0. | 1300 // tagged result (heap number) goes into r0. |
| 2277 | 1301 |
| 2278 Label input_not_smi; | 1302 Label input_not_smi; |
| 2279 Label loaded; | 1303 Label loaded; |
| 2280 Label calculate; | 1304 Label calculate; |
| 2281 Label invalid_cache; | 1305 Label invalid_cache; |
| (...skipping 4872 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7154 __ bind(&fast_elements_case); | 6178 __ bind(&fast_elements_case); |
| 7155 GenerateCase(masm, FAST_ELEMENTS); | 6179 GenerateCase(masm, FAST_ELEMENTS); |
| 7156 } | 6180 } |
| 7157 | 6181 |
| 7158 | 6182 |
| 7159 #undef __ | 6183 #undef __ |
| 7160 | 6184 |
| 7161 } } // namespace v8::internal | 6185 } } // namespace v8::internal |
| 7162 | 6186 |
| 7163 #endif // V8_TARGET_ARCH_ARM | 6187 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |