| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 229 Label ok; | 229 Label ok; |
| 230 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear); | 230 JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear); |
| 231 int3(); | 231 int3(); |
| 232 bind(&ok); | 232 bind(&ok); |
| 233 } | 233 } |
| 234 // Load store buffer top. | 234 // Load store buffer top. |
| 235 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex); | 235 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex); |
| 236 // Store pointer to buffer. | 236 // Store pointer to buffer. |
| 237 movp(Operand(scratch, 0), addr); | 237 movp(Operand(scratch, 0), addr); |
| 238 // Increment buffer top. | 238 // Increment buffer top. |
| 239 addq(scratch, Immediate(kPointerSize)); | 239 addp(scratch, Immediate(kPointerSize)); |
| 240 // Write back new top of buffer. | 240 // Write back new top of buffer. |
| 241 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex); | 241 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex); |
| 242 // Call stub on end of buffer. | 242 // Call stub on end of buffer. |
| 243 Label done; | 243 Label done; |
| 244 // Check for end of buffer. | 244 // Check for end of buffer. |
| 245 testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit)); | 245 testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit)); |
| 246 if (and_then == kReturnAtEnd) { | 246 if (and_then == kReturnAtEnd) { |
| 247 Label buffer_overflowed; | 247 Label buffer_overflowed; |
| 248 j(not_equal, &buffer_overflowed, Label::kNear); | 248 j(not_equal, &buffer_overflowed, Label::kNear); |
| 249 ret(0); | 249 ret(0); |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 284 Move(kScratchRegister, ExternalReference::new_space_start(isolate())); | 284 Move(kScratchRegister, ExternalReference::new_space_start(isolate())); |
| 285 cmpq(scratch, kScratchRegister); | 285 cmpq(scratch, kScratchRegister); |
| 286 j(cc, branch, distance); | 286 j(cc, branch, distance); |
| 287 } else { | 287 } else { |
| 288 ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))); | 288 ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))); |
| 289 intptr_t new_space_start = | 289 intptr_t new_space_start = |
| 290 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart()); | 290 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart()); |
| 291 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start), | 291 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start), |
| 292 Assembler::RelocInfoNone()); | 292 Assembler::RelocInfoNone()); |
| 293 if (scratch.is(object)) { | 293 if (scratch.is(object)) { |
| 294 addq(scratch, kScratchRegister); | 294 addp(scratch, kScratchRegister); |
| 295 } else { | 295 } else { |
| 296 lea(scratch, Operand(object, kScratchRegister, times_1, 0)); | 296 lea(scratch, Operand(object, kScratchRegister, times_1, 0)); |
| 297 } | 297 } |
| 298 and_(scratch, | 298 and_(scratch, |
| 299 Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask()))); | 299 Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask()))); |
| 300 j(cc, branch, distance); | 300 j(cc, branch, distance); |
| 301 } | 301 } |
| 302 } | 302 } |
| 303 | 303 |
| 304 | 304 |
| (...skipping 248 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 553 } | 553 } |
| 554 | 554 |
| 555 | 555 |
| 556 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { | 556 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { |
| 557 return has_frame_ || !stub->SometimesSetsUpAFrame(); | 557 return has_frame_ || !stub->SometimesSetsUpAFrame(); |
| 558 } | 558 } |
| 559 | 559 |
| 560 | 560 |
| 561 void MacroAssembler::IllegalOperation(int num_arguments) { | 561 void MacroAssembler::IllegalOperation(int num_arguments) { |
| 562 if (num_arguments > 0) { | 562 if (num_arguments > 0) { |
| 563 addq(rsp, Immediate(num_arguments * kPointerSize)); | 563 addp(rsp, Immediate(num_arguments * kPointerSize)); |
| 564 } | 564 } |
| 565 LoadRoot(rax, Heap::kUndefinedValueRootIndex); | 565 LoadRoot(rax, Heap::kUndefinedValueRootIndex); |
| 566 } | 566 } |
| 567 | 567 |
| 568 | 568 |
| 569 void MacroAssembler::IndexFromHash(Register hash, Register index) { | 569 void MacroAssembler::IndexFromHash(Register hash, Register index) { |
| 570 // The assert checks that the constants for the maximum number of digits | 570 // The assert checks that the constants for the maximum number of digits |
| 571 // for an array index cached in the hash field and the number of bits | 571 // for an array index cached in the hash field and the number of bits |
| 572 // reserved for it does not conflict. | 572 // reserved for it does not conflict. |
| 573 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < | 573 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < |
| (...skipping 305 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 879 // store the registers in any particular way, but we do have to store and | 879 // store the registers in any particular way, but we do have to store and |
| 880 // restore them. | 880 // restore them. |
| 881 for (int i = 0; i < kNumberOfSavedRegs; i++) { | 881 for (int i = 0; i < kNumberOfSavedRegs; i++) { |
| 882 Register reg = saved_regs[i]; | 882 Register reg = saved_regs[i]; |
| 883 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) { | 883 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) { |
| 884 pushq(reg); | 884 pushq(reg); |
| 885 } | 885 } |
| 886 } | 886 } |
| 887 // R12 to r15 are callee save on all platforms. | 887 // R12 to r15 are callee save on all platforms. |
| 888 if (fp_mode == kSaveFPRegs) { | 888 if (fp_mode == kSaveFPRegs) { |
| 889 subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); | 889 subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); |
| 890 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { | 890 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { |
| 891 XMMRegister reg = XMMRegister::from_code(i); | 891 XMMRegister reg = XMMRegister::from_code(i); |
| 892 movsd(Operand(rsp, i * kDoubleSize), reg); | 892 movsd(Operand(rsp, i * kDoubleSize), reg); |
| 893 } | 893 } |
| 894 } | 894 } |
| 895 } | 895 } |
| 896 | 896 |
| 897 | 897 |
| 898 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, | 898 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, |
| 899 Register exclusion1, | 899 Register exclusion1, |
| 900 Register exclusion2, | 900 Register exclusion2, |
| 901 Register exclusion3) { | 901 Register exclusion3) { |
| 902 if (fp_mode == kSaveFPRegs) { | 902 if (fp_mode == kSaveFPRegs) { |
| 903 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { | 903 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { |
| 904 XMMRegister reg = XMMRegister::from_code(i); | 904 XMMRegister reg = XMMRegister::from_code(i); |
| 905 movsd(reg, Operand(rsp, i * kDoubleSize)); | 905 movsd(reg, Operand(rsp, i * kDoubleSize)); |
| 906 } | 906 } |
| 907 addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); | 907 addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); |
| 908 } | 908 } |
| 909 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) { | 909 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) { |
| 910 Register reg = saved_regs[i]; | 910 Register reg = saved_regs[i]; |
| 911 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) { | 911 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) { |
| 912 popq(reg); | 912 popq(reg); |
| 913 } | 913 } |
| 914 } | 914 } |
| 915 } | 915 } |
| 916 | 916 |
| 917 | 917 |
| (...skipping 524 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1442 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { | 1442 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { |
| 1443 if (constant->value() == 0) { | 1443 if (constant->value() == 0) { |
| 1444 if (!dst.is(src)) { | 1444 if (!dst.is(src)) { |
| 1445 movp(dst, src); | 1445 movp(dst, src); |
| 1446 } | 1446 } |
| 1447 return; | 1447 return; |
| 1448 } else if (dst.is(src)) { | 1448 } else if (dst.is(src)) { |
| 1449 ASSERT(!dst.is(kScratchRegister)); | 1449 ASSERT(!dst.is(kScratchRegister)); |
| 1450 switch (constant->value()) { | 1450 switch (constant->value()) { |
| 1451 case 1: | 1451 case 1: |
| 1452 addq(dst, kSmiConstantRegister); | 1452 addp(dst, kSmiConstantRegister); |
| 1453 return; | 1453 return; |
| 1454 case 2: | 1454 case 2: |
| 1455 lea(dst, Operand(src, kSmiConstantRegister, times_2, 0)); | 1455 lea(dst, Operand(src, kSmiConstantRegister, times_2, 0)); |
| 1456 return; | 1456 return; |
| 1457 case 4: | 1457 case 4: |
| 1458 lea(dst, Operand(src, kSmiConstantRegister, times_4, 0)); | 1458 lea(dst, Operand(src, kSmiConstantRegister, times_4, 0)); |
| 1459 return; | 1459 return; |
| 1460 case 8: | 1460 case 8: |
| 1461 lea(dst, Operand(src, kSmiConstantRegister, times_8, 0)); | 1461 lea(dst, Operand(src, kSmiConstantRegister, times_8, 0)); |
| 1462 return; | 1462 return; |
| 1463 default: | 1463 default: |
| 1464 Register constant_reg = GetSmiConstant(constant); | 1464 Register constant_reg = GetSmiConstant(constant); |
| 1465 addq(dst, constant_reg); | 1465 addp(dst, constant_reg); |
| 1466 return; | 1466 return; |
| 1467 } | 1467 } |
| 1468 } else { | 1468 } else { |
| 1469 switch (constant->value()) { | 1469 switch (constant->value()) { |
| 1470 case 1: | 1470 case 1: |
| 1471 lea(dst, Operand(src, kSmiConstantRegister, times_1, 0)); | 1471 lea(dst, Operand(src, kSmiConstantRegister, times_1, 0)); |
| 1472 return; | 1472 return; |
| 1473 case 2: | 1473 case 2: |
| 1474 lea(dst, Operand(src, kSmiConstantRegister, times_2, 0)); | 1474 lea(dst, Operand(src, kSmiConstantRegister, times_2, 0)); |
| 1475 return; | 1475 return; |
| 1476 case 4: | 1476 case 4: |
| 1477 lea(dst, Operand(src, kSmiConstantRegister, times_4, 0)); | 1477 lea(dst, Operand(src, kSmiConstantRegister, times_4, 0)); |
| 1478 return; | 1478 return; |
| 1479 case 8: | 1479 case 8: |
| 1480 lea(dst, Operand(src, kSmiConstantRegister, times_8, 0)); | 1480 lea(dst, Operand(src, kSmiConstantRegister, times_8, 0)); |
| 1481 return; | 1481 return; |
| 1482 default: | 1482 default: |
| 1483 LoadSmiConstant(dst, constant); | 1483 LoadSmiConstant(dst, constant); |
| 1484 addq(dst, src); | 1484 addp(dst, src); |
| 1485 return; | 1485 return; |
| 1486 } | 1486 } |
| 1487 } | 1487 } |
| 1488 } | 1488 } |
| 1489 | 1489 |
| 1490 | 1490 |
| 1491 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { | 1491 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { |
| 1492 if (constant->value() != 0) { | 1492 if (constant->value() != 0) { |
| 1493 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value())); | 1493 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value())); |
| 1494 } | 1494 } |
| 1495 } | 1495 } |
| 1496 | 1496 |
| 1497 | 1497 |
| 1498 void MacroAssembler::SmiAddConstant(Register dst, | 1498 void MacroAssembler::SmiAddConstant(Register dst, |
| 1499 Register src, | 1499 Register src, |
| 1500 Smi* constant, | 1500 Smi* constant, |
| 1501 SmiOperationExecutionMode mode, | 1501 SmiOperationExecutionMode mode, |
| 1502 Label* bailout_label, | 1502 Label* bailout_label, |
| 1503 Label::Distance near_jump) { | 1503 Label::Distance near_jump) { |
| 1504 if (constant->value() == 0) { | 1504 if (constant->value() == 0) { |
| 1505 if (!dst.is(src)) { | 1505 if (!dst.is(src)) { |
| 1506 movp(dst, src); | 1506 movp(dst, src); |
| 1507 } | 1507 } |
| 1508 } else if (dst.is(src)) { | 1508 } else if (dst.is(src)) { |
| 1509 ASSERT(!dst.is(kScratchRegister)); | 1509 ASSERT(!dst.is(kScratchRegister)); |
| 1510 LoadSmiConstant(kScratchRegister, constant); | 1510 LoadSmiConstant(kScratchRegister, constant); |
| 1511 addq(dst, kScratchRegister); | 1511 addp(dst, kScratchRegister); |
| 1512 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) { | 1512 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) { |
| 1513 j(no_overflow, bailout_label, near_jump); | 1513 j(no_overflow, bailout_label, near_jump); |
| 1514 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); | 1514 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); |
| 1515 subq(dst, kScratchRegister); | 1515 subp(dst, kScratchRegister); |
| 1516 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) { | 1516 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) { |
| 1517 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) { | 1517 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) { |
| 1518 Label done; | 1518 Label done; |
| 1519 j(no_overflow, &done, Label::kNear); | 1519 j(no_overflow, &done, Label::kNear); |
| 1520 subq(dst, kScratchRegister); | 1520 subp(dst, kScratchRegister); |
| 1521 jmp(bailout_label, near_jump); | 1521 jmp(bailout_label, near_jump); |
| 1522 bind(&done); | 1522 bind(&done); |
| 1523 } else { | 1523 } else { |
| 1524 // Bailout if overflow without reserving src. | 1524 // Bailout if overflow without reserving src. |
| 1525 j(overflow, bailout_label, near_jump); | 1525 j(overflow, bailout_label, near_jump); |
| 1526 } | 1526 } |
| 1527 } else { | 1527 } else { |
| 1528 CHECK(mode.IsEmpty()); | 1528 CHECK(mode.IsEmpty()); |
| 1529 } | 1529 } |
| 1530 } else { | 1530 } else { |
| 1531 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); | 1531 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); |
| 1532 ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW)); | 1532 ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW)); |
| 1533 LoadSmiConstant(dst, constant); | 1533 LoadSmiConstant(dst, constant); |
| 1534 addq(dst, src); | 1534 addp(dst, src); |
| 1535 j(overflow, bailout_label, near_jump); | 1535 j(overflow, bailout_label, near_jump); |
| 1536 } | 1536 } |
| 1537 } | 1537 } |
| 1538 | 1538 |
| 1539 | 1539 |
| 1540 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { | 1540 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { |
| 1541 if (constant->value() == 0) { | 1541 if (constant->value() == 0) { |
| 1542 if (!dst.is(src)) { | 1542 if (!dst.is(src)) { |
| 1543 movp(dst, src); | 1543 movp(dst, src); |
| 1544 } | 1544 } |
| 1545 } else if (dst.is(src)) { | 1545 } else if (dst.is(src)) { |
| 1546 ASSERT(!dst.is(kScratchRegister)); | 1546 ASSERT(!dst.is(kScratchRegister)); |
| 1547 Register constant_reg = GetSmiConstant(constant); | 1547 Register constant_reg = GetSmiConstant(constant); |
| 1548 subq(dst, constant_reg); | 1548 subp(dst, constant_reg); |
| 1549 } else { | 1549 } else { |
| 1550 if (constant->value() == Smi::kMinValue) { | 1550 if (constant->value() == Smi::kMinValue) { |
| 1551 LoadSmiConstant(dst, constant); | 1551 LoadSmiConstant(dst, constant); |
| 1552 // Adding and subtracting the min-value gives the same result, it only | 1552 // Adding and subtracting the min-value gives the same result, it only |
| 1553 // differs on the overflow bit, which we don't check here. | 1553 // differs on the overflow bit, which we don't check here. |
| 1554 addq(dst, src); | 1554 addp(dst, src); |
| 1555 } else { | 1555 } else { |
| 1556 // Subtract by adding the negation. | 1556 // Subtract by adding the negation. |
| 1557 LoadSmiConstant(dst, Smi::FromInt(-constant->value())); | 1557 LoadSmiConstant(dst, Smi::FromInt(-constant->value())); |
| 1558 addq(dst, src); | 1558 addp(dst, src); |
| 1559 } | 1559 } |
| 1560 } | 1560 } |
| 1561 } | 1561 } |
| 1562 | 1562 |
| 1563 | 1563 |
| 1564 void MacroAssembler::SmiSubConstant(Register dst, | 1564 void MacroAssembler::SmiSubConstant(Register dst, |
| 1565 Register src, | 1565 Register src, |
| 1566 Smi* constant, | 1566 Smi* constant, |
| 1567 SmiOperationExecutionMode mode, | 1567 SmiOperationExecutionMode mode, |
| 1568 Label* bailout_label, | 1568 Label* bailout_label, |
| 1569 Label::Distance near_jump) { | 1569 Label::Distance near_jump) { |
| 1570 if (constant->value() == 0) { | 1570 if (constant->value() == 0) { |
| 1571 if (!dst.is(src)) { | 1571 if (!dst.is(src)) { |
| 1572 movp(dst, src); | 1572 movp(dst, src); |
| 1573 } | 1573 } |
| 1574 } else if (dst.is(src)) { | 1574 } else if (dst.is(src)) { |
| 1575 ASSERT(!dst.is(kScratchRegister)); | 1575 ASSERT(!dst.is(kScratchRegister)); |
| 1576 LoadSmiConstant(kScratchRegister, constant); | 1576 LoadSmiConstant(kScratchRegister, constant); |
| 1577 subq(dst, kScratchRegister); | 1577 subp(dst, kScratchRegister); |
| 1578 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) { | 1578 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) { |
| 1579 j(no_overflow, bailout_label, near_jump); | 1579 j(no_overflow, bailout_label, near_jump); |
| 1580 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); | 1580 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); |
| 1581 addq(dst, kScratchRegister); | 1581 addp(dst, kScratchRegister); |
| 1582 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) { | 1582 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) { |
| 1583 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) { | 1583 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) { |
| 1584 Label done; | 1584 Label done; |
| 1585 j(no_overflow, &done, Label::kNear); | 1585 j(no_overflow, &done, Label::kNear); |
| 1586 addq(dst, kScratchRegister); | 1586 addp(dst, kScratchRegister); |
| 1587 jmp(bailout_label, near_jump); | 1587 jmp(bailout_label, near_jump); |
| 1588 bind(&done); | 1588 bind(&done); |
| 1589 } else { | 1589 } else { |
| 1590 // Bailout if overflow without reserving src. | 1590 // Bailout if overflow without reserving src. |
| 1591 j(overflow, bailout_label, near_jump); | 1591 j(overflow, bailout_label, near_jump); |
| 1592 } | 1592 } |
| 1593 } else { | 1593 } else { |
| 1594 CHECK(mode.IsEmpty()); | 1594 CHECK(mode.IsEmpty()); |
| 1595 } | 1595 } |
| 1596 } else { | 1596 } else { |
| 1597 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); | 1597 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); |
| 1598 ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW)); | 1598 ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW)); |
| 1599 if (constant->value() == Smi::kMinValue) { | 1599 if (constant->value() == Smi::kMinValue) { |
| 1600 ASSERT(!dst.is(kScratchRegister)); | 1600 ASSERT(!dst.is(kScratchRegister)); |
| 1601 movp(dst, src); | 1601 movp(dst, src); |
| 1602 LoadSmiConstant(kScratchRegister, constant); | 1602 LoadSmiConstant(kScratchRegister, constant); |
| 1603 subq(dst, kScratchRegister); | 1603 subp(dst, kScratchRegister); |
| 1604 j(overflow, bailout_label, near_jump); | 1604 j(overflow, bailout_label, near_jump); |
| 1605 } else { | 1605 } else { |
| 1606 // Subtract by adding the negation. | 1606 // Subtract by adding the negation. |
| 1607 LoadSmiConstant(dst, Smi::FromInt(-(constant->value()))); | 1607 LoadSmiConstant(dst, Smi::FromInt(-(constant->value()))); |
| 1608 addq(dst, src); | 1608 addp(dst, src); |
| 1609 j(overflow, bailout_label, near_jump); | 1609 j(overflow, bailout_label, near_jump); |
| 1610 } | 1610 } |
| 1611 } | 1611 } |
| 1612 } | 1612 } |
| 1613 | 1613 |
| 1614 | 1614 |
| 1615 void MacroAssembler::SmiNeg(Register dst, | 1615 void MacroAssembler::SmiNeg(Register dst, |
| 1616 Register src, | 1616 Register src, |
| 1617 Label* on_smi_result, | 1617 Label* on_smi_result, |
| 1618 Label::Distance near_jump) { | 1618 Label::Distance near_jump) { |
| (...skipping 17 matching lines...) Expand all Loading... |
| 1636 | 1636 |
| 1637 template<class T> | 1637 template<class T> |
| 1638 static void SmiAddHelper(MacroAssembler* masm, | 1638 static void SmiAddHelper(MacroAssembler* masm, |
| 1639 Register dst, | 1639 Register dst, |
| 1640 Register src1, | 1640 Register src1, |
| 1641 T src2, | 1641 T src2, |
| 1642 Label* on_not_smi_result, | 1642 Label* on_not_smi_result, |
| 1643 Label::Distance near_jump) { | 1643 Label::Distance near_jump) { |
| 1644 if (dst.is(src1)) { | 1644 if (dst.is(src1)) { |
| 1645 Label done; | 1645 Label done; |
| 1646 masm->addq(dst, src2); | 1646 masm->addp(dst, src2); |
| 1647 masm->j(no_overflow, &done, Label::kNear); | 1647 masm->j(no_overflow, &done, Label::kNear); |
| 1648 // Restore src1. | 1648 // Restore src1. |
| 1649 masm->subq(dst, src2); | 1649 masm->subp(dst, src2); |
| 1650 masm->jmp(on_not_smi_result, near_jump); | 1650 masm->jmp(on_not_smi_result, near_jump); |
| 1651 masm->bind(&done); | 1651 masm->bind(&done); |
| 1652 } else { | 1652 } else { |
| 1653 masm->movp(dst, src1); | 1653 masm->movp(dst, src1); |
| 1654 masm->addq(dst, src2); | 1654 masm->addp(dst, src2); |
| 1655 masm->j(overflow, on_not_smi_result, near_jump); | 1655 masm->j(overflow, on_not_smi_result, near_jump); |
| 1656 } | 1656 } |
| 1657 } | 1657 } |
| 1658 | 1658 |
| 1659 | 1659 |
| 1660 void MacroAssembler::SmiAdd(Register dst, | 1660 void MacroAssembler::SmiAdd(Register dst, |
| 1661 Register src1, | 1661 Register src1, |
| 1662 Register src2, | 1662 Register src2, |
| 1663 Label* on_not_smi_result, | 1663 Label* on_not_smi_result, |
| 1664 Label::Distance near_jump) { | 1664 Label::Distance near_jump) { |
| (...skipping 15 matching lines...) Expand all Loading... |
| 1680 | 1680 |
| 1681 | 1681 |
| 1682 void MacroAssembler::SmiAdd(Register dst, | 1682 void MacroAssembler::SmiAdd(Register dst, |
| 1683 Register src1, | 1683 Register src1, |
| 1684 Register src2) { | 1684 Register src2) { |
| 1685 // No overflow checking. Use only when it's known that | 1685 // No overflow checking. Use only when it's known that |
| 1686 // overflowing is impossible. | 1686 // overflowing is impossible. |
| 1687 if (!dst.is(src1)) { | 1687 if (!dst.is(src1)) { |
| 1688 if (emit_debug_code()) { | 1688 if (emit_debug_code()) { |
| 1689 movp(kScratchRegister, src1); | 1689 movp(kScratchRegister, src1); |
| 1690 addq(kScratchRegister, src2); | 1690 addp(kScratchRegister, src2); |
| 1691 Check(no_overflow, kSmiAdditionOverflow); | 1691 Check(no_overflow, kSmiAdditionOverflow); |
| 1692 } | 1692 } |
| 1693 lea(dst, Operand(src1, src2, times_1, 0)); | 1693 lea(dst, Operand(src1, src2, times_1, 0)); |
| 1694 } else { | 1694 } else { |
| 1695 addq(dst, src2); | 1695 addp(dst, src2); |
| 1696 Assert(no_overflow, kSmiAdditionOverflow); | 1696 Assert(no_overflow, kSmiAdditionOverflow); |
| 1697 } | 1697 } |
| 1698 } | 1698 } |
| 1699 | 1699 |
| 1700 | 1700 |
| 1701 template<class T> | 1701 template<class T> |
| 1702 static void SmiSubHelper(MacroAssembler* masm, | 1702 static void SmiSubHelper(MacroAssembler* masm, |
| 1703 Register dst, | 1703 Register dst, |
| 1704 Register src1, | 1704 Register src1, |
| 1705 T src2, | 1705 T src2, |
| 1706 Label* on_not_smi_result, | 1706 Label* on_not_smi_result, |
| 1707 Label::Distance near_jump) { | 1707 Label::Distance near_jump) { |
| 1708 if (dst.is(src1)) { | 1708 if (dst.is(src1)) { |
| 1709 Label done; | 1709 Label done; |
| 1710 masm->subq(dst, src2); | 1710 masm->subp(dst, src2); |
| 1711 masm->j(no_overflow, &done, Label::kNear); | 1711 masm->j(no_overflow, &done, Label::kNear); |
| 1712 // Restore src1. | 1712 // Restore src1. |
| 1713 masm->addq(dst, src2); | 1713 masm->addp(dst, src2); |
| 1714 masm->jmp(on_not_smi_result, near_jump); | 1714 masm->jmp(on_not_smi_result, near_jump); |
| 1715 masm->bind(&done); | 1715 masm->bind(&done); |
| 1716 } else { | 1716 } else { |
| 1717 masm->movp(dst, src1); | 1717 masm->movp(dst, src1); |
| 1718 masm->subq(dst, src2); | 1718 masm->subp(dst, src2); |
| 1719 masm->j(overflow, on_not_smi_result, near_jump); | 1719 masm->j(overflow, on_not_smi_result, near_jump); |
| 1720 } | 1720 } |
| 1721 } | 1721 } |
| 1722 | 1722 |
| 1723 | 1723 |
| 1724 void MacroAssembler::SmiSub(Register dst, | 1724 void MacroAssembler::SmiSub(Register dst, |
| 1725 Register src1, | 1725 Register src1, |
| 1726 Register src2, | 1726 Register src2, |
| 1727 Label* on_not_smi_result, | 1727 Label* on_not_smi_result, |
| 1728 Label::Distance near_jump) { | 1728 Label::Distance near_jump) { |
| (...skipping 17 matching lines...) Expand all Loading... |
| 1746 template<class T> | 1746 template<class T> |
| 1747 static void SmiSubNoOverflowHelper(MacroAssembler* masm, | 1747 static void SmiSubNoOverflowHelper(MacroAssembler* masm, |
| 1748 Register dst, | 1748 Register dst, |
| 1749 Register src1, | 1749 Register src1, |
| 1750 T src2) { | 1750 T src2) { |
| 1751 // No overflow checking. Use only when it's known that | 1751 // No overflow checking. Use only when it's known that |
| 1752 // overflowing is impossible (e.g., subtracting two positive smis). | 1752 // overflowing is impossible (e.g., subtracting two positive smis). |
| 1753 if (!dst.is(src1)) { | 1753 if (!dst.is(src1)) { |
| 1754 masm->movp(dst, src1); | 1754 masm->movp(dst, src1); |
| 1755 } | 1755 } |
| 1756 masm->subq(dst, src2); | 1756 masm->subp(dst, src2); |
| 1757 masm->Assert(no_overflow, kSmiSubtractionOverflow); | 1757 masm->Assert(no_overflow, kSmiSubtractionOverflow); |
| 1758 } | 1758 } |
| 1759 | 1759 |
| 1760 | 1760 |
| 1761 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) { | 1761 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) { |
| 1762 ASSERT(!dst.is(src2)); | 1762 ASSERT(!dst.is(src2)); |
| 1763 SmiSubNoOverflowHelper<Register>(this, dst, src1, src2); | 1763 SmiSubNoOverflowHelper<Register>(this, dst, src1, src2); |
| 1764 } | 1764 } |
| 1765 | 1765 |
| 1766 | 1766 |
| (...skipping 11 matching lines...) Expand all Loading... |
| 1778 Label::Distance near_jump) { | 1778 Label::Distance near_jump) { |
| 1779 ASSERT(!dst.is(src2)); | 1779 ASSERT(!dst.is(src2)); |
| 1780 ASSERT(!dst.is(kScratchRegister)); | 1780 ASSERT(!dst.is(kScratchRegister)); |
| 1781 ASSERT(!src1.is(kScratchRegister)); | 1781 ASSERT(!src1.is(kScratchRegister)); |
| 1782 ASSERT(!src2.is(kScratchRegister)); | 1782 ASSERT(!src2.is(kScratchRegister)); |
| 1783 | 1783 |
| 1784 if (dst.is(src1)) { | 1784 if (dst.is(src1)) { |
| 1785 Label failure, zero_correct_result; | 1785 Label failure, zero_correct_result; |
| 1786 movp(kScratchRegister, src1); // Create backup for later testing. | 1786 movp(kScratchRegister, src1); // Create backup for later testing. |
| 1787 SmiToInteger64(dst, src1); | 1787 SmiToInteger64(dst, src1); |
| 1788 imul(dst, src2); | 1788 imulp(dst, src2); |
| 1789 j(overflow, &failure, Label::kNear); | 1789 j(overflow, &failure, Label::kNear); |
| 1790 | 1790 |
| 1791 // Check for negative zero result. If product is zero, and one | 1791 // Check for negative zero result. If product is zero, and one |
| 1792 // argument is negative, go to slow case. | 1792 // argument is negative, go to slow case. |
| 1793 Label correct_result; | 1793 Label correct_result; |
| 1794 testq(dst, dst); | 1794 testq(dst, dst); |
| 1795 j(not_zero, &correct_result, Label::kNear); | 1795 j(not_zero, &correct_result, Label::kNear); |
| 1796 | 1796 |
| 1797 movp(dst, kScratchRegister); | 1797 movp(dst, kScratchRegister); |
| 1798 xor_(dst, src2); | 1798 xor_(dst, src2); |
| 1799 // Result was positive zero. | 1799 // Result was positive zero. |
| 1800 j(positive, &zero_correct_result, Label::kNear); | 1800 j(positive, &zero_correct_result, Label::kNear); |
| 1801 | 1801 |
| 1802 bind(&failure); // Reused failure exit, restores src1. | 1802 bind(&failure); // Reused failure exit, restores src1. |
| 1803 movp(src1, kScratchRegister); | 1803 movp(src1, kScratchRegister); |
| 1804 jmp(on_not_smi_result, near_jump); | 1804 jmp(on_not_smi_result, near_jump); |
| 1805 | 1805 |
| 1806 bind(&zero_correct_result); | 1806 bind(&zero_correct_result); |
| 1807 Set(dst, 0); | 1807 Set(dst, 0); |
| 1808 | 1808 |
| 1809 bind(&correct_result); | 1809 bind(&correct_result); |
| 1810 } else { | 1810 } else { |
| 1811 SmiToInteger64(dst, src1); | 1811 SmiToInteger64(dst, src1); |
| 1812 imul(dst, src2); | 1812 imulp(dst, src2); |
| 1813 j(overflow, on_not_smi_result, near_jump); | 1813 j(overflow, on_not_smi_result, near_jump); |
| 1814 // Check for negative zero result. If product is zero, and one | 1814 // Check for negative zero result. If product is zero, and one |
| 1815 // argument is negative, go to slow case. | 1815 // argument is negative, go to slow case. |
| 1816 Label correct_result; | 1816 Label correct_result; |
| 1817 testq(dst, dst); | 1817 testq(dst, dst); |
| 1818 j(not_zero, &correct_result, Label::kNear); | 1818 j(not_zero, &correct_result, Label::kNear); |
| 1819 // One of src1 and src2 is zero, the check whether the other is | 1819 // One of src1 and src2 is zero, the check whether the other is |
| 1820 // negative. | 1820 // negative. |
| 1821 movp(kScratchRegister, src1); | 1821 movp(kScratchRegister, src1); |
| 1822 xor_(kScratchRegister, src2); | 1822 xor_(kScratchRegister, src2); |
| (...skipping 346 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2169 ASSERT_EQ(0, Smi::FromInt(0)); | 2169 ASSERT_EQ(0, Smi::FromInt(0)); |
| 2170 movl(kScratchRegister, Immediate(kSmiTagMask)); | 2170 movl(kScratchRegister, Immediate(kSmiTagMask)); |
| 2171 and_(kScratchRegister, src1); | 2171 and_(kScratchRegister, src1); |
| 2172 testl(kScratchRegister, src2); | 2172 testl(kScratchRegister, src2); |
| 2173 // If non-zero then both are smis. | 2173 // If non-zero then both are smis. |
| 2174 j(not_zero, on_not_smis, near_jump); | 2174 j(not_zero, on_not_smis, near_jump); |
| 2175 | 2175 |
| 2176 // Exactly one operand is a smi. | 2176 // Exactly one operand is a smi. |
| 2177 ASSERT_EQ(1, static_cast<int>(kSmiTagMask)); | 2177 ASSERT_EQ(1, static_cast<int>(kSmiTagMask)); |
| 2178 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one. | 2178 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one. |
| 2179 subq(kScratchRegister, Immediate(1)); | 2179 subp(kScratchRegister, Immediate(1)); |
| 2180 // If src1 is a smi, then scratch register all 1s, else it is all 0s. | 2180 // If src1 is a smi, then scratch register all 1s, else it is all 0s. |
| 2181 movp(dst, src1); | 2181 movp(dst, src1); |
| 2182 xor_(dst, src2); | 2182 xor_(dst, src2); |
| 2183 and_(dst, kScratchRegister); | 2183 and_(dst, kScratchRegister); |
| 2184 // If src1 is a smi, dst holds src1 ^ src2, else it is zero. | 2184 // If src1 is a smi, dst holds src1 ^ src2, else it is zero. |
| 2185 xor_(dst, src1); | 2185 xor_(dst, src1); |
| 2186 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi. | 2186 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi. |
| 2187 } | 2187 } |
| 2188 | 2188 |
| 2189 | 2189 |
| (...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2282 Register scratch = scratch2; | 2282 Register scratch = scratch2; |
| 2283 | 2283 |
| 2284 // Load the number string cache. | 2284 // Load the number string cache. |
| 2285 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); | 2285 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); |
| 2286 | 2286 |
| 2287 // Make the hash mask from the length of the number string cache. It | 2287 // Make the hash mask from the length of the number string cache. It |
| 2288 // contains two elements (number and string) for each cache entry. | 2288 // contains two elements (number and string) for each cache entry. |
| 2289 SmiToInteger32( | 2289 SmiToInteger32( |
| 2290 mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); | 2290 mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); |
| 2291 shrl(mask, Immediate(1)); | 2291 shrl(mask, Immediate(1)); |
| 2292 subq(mask, Immediate(1)); // Make mask. | 2292 subp(mask, Immediate(1)); // Make mask. |
| 2293 | 2293 |
| 2294 // Calculate the entry in the number string cache. The hash value in the | 2294 // Calculate the entry in the number string cache. The hash value in the |
| 2295 // number string cache for smis is just the smi value, and the hash for | 2295 // number string cache for smis is just the smi value, and the hash for |
| 2296 // doubles is the xor of the upper and lower words. See | 2296 // doubles is the xor of the upper and lower words. See |
| 2297 // Heap::GetNumberStringCache. | 2297 // Heap::GetNumberStringCache. |
| 2298 Label is_smi; | 2298 Label is_smi; |
| 2299 Label load_result_from_cache; | 2299 Label load_result_from_cache; |
| 2300 JumpIfSmi(object, &is_smi); | 2300 JumpIfSmi(object, &is_smi); |
| 2301 CheckMap(object, | 2301 CheckMap(object, |
| 2302 isolate()->factory()->heap_number_map(), | 2302 isolate()->factory()->heap_number_map(), |
| (...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2560 load_rax(cell.location(), RelocInfo::CELL); | 2560 load_rax(cell.location(), RelocInfo::CELL); |
| 2561 } else { | 2561 } else { |
| 2562 Move(dst, cell, RelocInfo::CELL); | 2562 Move(dst, cell, RelocInfo::CELL); |
| 2563 movp(dst, Operand(dst, 0)); | 2563 movp(dst, Operand(dst, 0)); |
| 2564 } | 2564 } |
| 2565 } | 2565 } |
| 2566 | 2566 |
| 2567 | 2567 |
| 2568 void MacroAssembler::Drop(int stack_elements) { | 2568 void MacroAssembler::Drop(int stack_elements) { |
| 2569 if (stack_elements > 0) { | 2569 if (stack_elements > 0) { |
| 2570 addq(rsp, Immediate(stack_elements * kPointerSize)); | 2570 addp(rsp, Immediate(stack_elements * kPointerSize)); |
| 2571 } | 2571 } |
| 2572 } | 2572 } |
| 2573 | 2573 |
| 2574 | 2574 |
| 2575 void MacroAssembler::Push(Register src) { | 2575 void MacroAssembler::Push(Register src) { |
| 2576 if (kPointerSize == kInt64Size) { | 2576 if (kPointerSize == kInt64Size) { |
| 2577 pushq(src); | 2577 pushq(src); |
| 2578 } else { | 2578 } else { |
| 2579 ASSERT(kPointerSize == kInt32Size); | 2579 ASSERT(kPointerSize == kInt32Size); |
| 2580 // x32 uses 64-bit push for rbp in the prologue. | 2580 // x32 uses 64-bit push for rbp in the prologue. |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2637 popq(dst); | 2637 popq(dst); |
| 2638 } else { | 2638 } else { |
| 2639 ASSERT(kPointerSize == kInt32Size); | 2639 ASSERT(kPointerSize == kInt32Size); |
| 2640 Register scratch = dst.AddressUsesRegister(kScratchRegister) | 2640 Register scratch = dst.AddressUsesRegister(kScratchRegister) |
| 2641 ? kSmiConstantRegister : kScratchRegister; | 2641 ? kSmiConstantRegister : kScratchRegister; |
| 2642 movp(scratch, Operand(rsp, 0)); | 2642 movp(scratch, Operand(rsp, 0)); |
| 2643 movp(dst, scratch); | 2643 movp(dst, scratch); |
| 2644 leal(rsp, Operand(rsp, 4)); | 2644 leal(rsp, Operand(rsp, 4)); |
| 2645 if (scratch.is(kSmiConstantRegister)) { | 2645 if (scratch.is(kSmiConstantRegister)) { |
| 2646 // Restore kSmiConstantRegister. | 2646 // Restore kSmiConstantRegister. |
| 2647 movp(kSmiConstantRegister, Smi::FromInt(kSmiConstantRegisterValue), | 2647 movp(kSmiConstantRegister, |
| 2648 reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)), |
| 2648 Assembler::RelocInfoNone()); | 2649 Assembler::RelocInfoNone()); |
| 2649 } | 2650 } |
| 2650 } | 2651 } |
| 2651 } | 2652 } |
| 2652 | 2653 |
| 2653 | 2654 |
| 2654 void MacroAssembler::TestBit(const Operand& src, int bits) { | 2655 void MacroAssembler::TestBit(const Operand& src, int bits) { |
| 2655 int byte_offset = bits / kBitsPerByte; | 2656 int byte_offset = bits / kBitsPerByte; |
| 2656 int bit_in_byte = bits & (kBitsPerByte - 1); | 2657 int bit_in_byte = bits & (kBitsPerByte - 1); |
| 2657 testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte)); | 2658 testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte)); |
| (...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2781 Pop(rdi); | 2782 Pop(rdi); |
| 2782 Pop(rsi); | 2783 Pop(rsi); |
| 2783 Pop(rbx); | 2784 Pop(rbx); |
| 2784 Pop(rdx); | 2785 Pop(rdx); |
| 2785 Pop(rcx); | 2786 Pop(rcx); |
| 2786 Pop(rax); | 2787 Pop(rax); |
| 2787 } | 2788 } |
| 2788 | 2789 |
| 2789 | 2790 |
| 2790 void MacroAssembler::Dropad() { | 2791 void MacroAssembler::Dropad() { |
| 2791 addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize)); | 2792 addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize)); |
| 2792 } | 2793 } |
| 2793 | 2794 |
| 2794 | 2795 |
| 2795 // Order general registers are pushed by Pushad: | 2796 // Order general registers are pushed by Pushad: |
| 2796 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15. | 2797 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15. |
| 2797 const int | 2798 const int |
| 2798 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = { | 2799 MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = { |
| 2799 0, | 2800 0, |
| 2800 1, | 2801 1, |
| 2801 2, | 2802 2, |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2872 Push(ExternalOperand(handler_address)); | 2873 Push(ExternalOperand(handler_address)); |
| 2873 // Set this new handler as the current one. | 2874 // Set this new handler as the current one. |
| 2874 movp(ExternalOperand(handler_address), rsp); | 2875 movp(ExternalOperand(handler_address), rsp); |
| 2875 } | 2876 } |
| 2876 | 2877 |
| 2877 | 2878 |
| 2878 void MacroAssembler::PopTryHandler() { | 2879 void MacroAssembler::PopTryHandler() { |
| 2879 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 2880 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
| 2880 ExternalReference handler_address(Isolate::kHandlerAddress, isolate()); | 2881 ExternalReference handler_address(Isolate::kHandlerAddress, isolate()); |
| 2881 Pop(ExternalOperand(handler_address)); | 2882 Pop(ExternalOperand(handler_address)); |
| 2882 addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize)); | 2883 addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize)); |
| 2883 } | 2884 } |
| 2884 | 2885 |
| 2885 | 2886 |
| 2886 void MacroAssembler::JumpToHandlerEntry() { | 2887 void MacroAssembler::JumpToHandlerEntry() { |
| 2887 // Compute the handler entry address and jump to it. The handler table is | 2888 // Compute the handler entry address and jump to it. The handler table is |
| 2888 // a fixed array of (smi-tagged) code offsets. | 2889 // a fixed array of (smi-tagged) code offsets. |
| 2889 // rax = exception, rdi = code object, rdx = state. | 2890 // rax = exception, rdi = code object, rdx = state. |
| 2890 movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset)); | 2891 movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset)); |
| 2891 shr(rdx, Immediate(StackHandler::kKindWidth)); | 2892 shr(rdx, Immediate(StackHandler::kKindWidth)); |
| 2892 movp(rdx, | 2893 movp(rdx, |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2986 void MacroAssembler::Ret() { | 2987 void MacroAssembler::Ret() { |
| 2987 ret(0); | 2988 ret(0); |
| 2988 } | 2989 } |
| 2989 | 2990 |
| 2990 | 2991 |
| 2991 void MacroAssembler::Ret(int bytes_dropped, Register scratch) { | 2992 void MacroAssembler::Ret(int bytes_dropped, Register scratch) { |
| 2992 if (is_uint16(bytes_dropped)) { | 2993 if (is_uint16(bytes_dropped)) { |
| 2993 ret(bytes_dropped); | 2994 ret(bytes_dropped); |
| 2994 } else { | 2995 } else { |
| 2995 PopReturnAddressTo(scratch); | 2996 PopReturnAddressTo(scratch); |
| 2996 addq(rsp, Immediate(bytes_dropped)); | 2997 addp(rsp, Immediate(bytes_dropped)); |
| 2997 PushReturnAddressFrom(scratch); | 2998 PushReturnAddressFrom(scratch); |
| 2998 ret(0); | 2999 ret(0); |
| 2999 } | 3000 } |
| 3000 } | 3001 } |
| 3001 | 3002 |
| 3002 | 3003 |
| 3003 void MacroAssembler::FCmp() { | 3004 void MacroAssembler::FCmp() { |
| 3004 fucomip(); | 3005 fucomip(); |
| 3005 fstp(0); | 3006 fstp(0); |
| 3006 } | 3007 } |
| (...skipping 185 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3192 Register input_reg) { | 3193 Register input_reg) { |
| 3193 Label done; | 3194 Label done; |
| 3194 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 3195 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 3195 cvttsd2siq(result_reg, xmm0); | 3196 cvttsd2siq(result_reg, xmm0); |
| 3196 Set(kScratchRegister, V8_UINT64_C(0x8000000000000000)); | 3197 Set(kScratchRegister, V8_UINT64_C(0x8000000000000000)); |
| 3197 cmpq(result_reg, kScratchRegister); | 3198 cmpq(result_reg, kScratchRegister); |
| 3198 j(not_equal, &done, Label::kNear); | 3199 j(not_equal, &done, Label::kNear); |
| 3199 | 3200 |
| 3200 // Slow case. | 3201 // Slow case. |
| 3201 if (input_reg.is(result_reg)) { | 3202 if (input_reg.is(result_reg)) { |
| 3202 subq(rsp, Immediate(kDoubleSize)); | 3203 subp(rsp, Immediate(kDoubleSize)); |
| 3203 movsd(MemOperand(rsp, 0), xmm0); | 3204 movsd(MemOperand(rsp, 0), xmm0); |
| 3204 SlowTruncateToI(result_reg, rsp, 0); | 3205 SlowTruncateToI(result_reg, rsp, 0); |
| 3205 addq(rsp, Immediate(kDoubleSize)); | 3206 addp(rsp, Immediate(kDoubleSize)); |
| 3206 } else { | 3207 } else { |
| 3207 SlowTruncateToI(result_reg, input_reg); | 3208 SlowTruncateToI(result_reg, input_reg); |
| 3208 } | 3209 } |
| 3209 | 3210 |
| 3210 bind(&done); | 3211 bind(&done); |
| 3211 } | 3212 } |
| 3212 | 3213 |
| 3213 | 3214 |
| 3214 void MacroAssembler::TruncateDoubleToI(Register result_reg, | 3215 void MacroAssembler::TruncateDoubleToI(Register result_reg, |
| 3215 XMMRegister input_reg) { | 3216 XMMRegister input_reg) { |
| 3216 Label done; | 3217 Label done; |
| 3217 cvttsd2siq(result_reg, input_reg); | 3218 cvttsd2siq(result_reg, input_reg); |
| 3218 movq(kScratchRegister, V8_INT64_C(0x8000000000000000)); | 3219 movq(kScratchRegister, V8_INT64_C(0x8000000000000000)); |
| 3219 cmpq(result_reg, kScratchRegister); | 3220 cmpq(result_reg, kScratchRegister); |
| 3220 j(not_equal, &done, Label::kNear); | 3221 j(not_equal, &done, Label::kNear); |
| 3221 | 3222 |
| 3222 subq(rsp, Immediate(kDoubleSize)); | 3223 subp(rsp, Immediate(kDoubleSize)); |
| 3223 movsd(MemOperand(rsp, 0), input_reg); | 3224 movsd(MemOperand(rsp, 0), input_reg); |
| 3224 SlowTruncateToI(result_reg, rsp, 0); | 3225 SlowTruncateToI(result_reg, rsp, 0); |
| 3225 addq(rsp, Immediate(kDoubleSize)); | 3226 addp(rsp, Immediate(kDoubleSize)); |
| 3226 | 3227 |
| 3227 bind(&done); | 3228 bind(&done); |
| 3228 } | 3229 } |
| 3229 | 3230 |
| 3230 | 3231 |
| 3231 void MacroAssembler::DoubleToI(Register result_reg, | 3232 void MacroAssembler::DoubleToI(Register result_reg, |
| 3232 XMMRegister input_reg, | 3233 XMMRegister input_reg, |
| 3233 XMMRegister scratch, | 3234 XMMRegister scratch, |
| 3234 MinusZeroMode minus_zero_mode, | 3235 MinusZeroMode minus_zero_mode, |
| 3235 Label* conversion_failed, | 3236 Label* conversion_failed, |
| (...skipping 473 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3709 j(equal, &invoke, Label::kNear); | 3710 j(equal, &invoke, Label::kNear); |
| 3710 ASSERT(actual.reg().is(rax)); | 3711 ASSERT(actual.reg().is(rax)); |
| 3711 ASSERT(expected.reg().is(rbx)); | 3712 ASSERT(expected.reg().is(rbx)); |
| 3712 } | 3713 } |
| 3713 } | 3714 } |
| 3714 | 3715 |
| 3715 if (!definitely_matches) { | 3716 if (!definitely_matches) { |
| 3716 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline(); | 3717 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline(); |
| 3717 if (!code_constant.is_null()) { | 3718 if (!code_constant.is_null()) { |
| 3718 Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT); | 3719 Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT); |
| 3719 addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag)); | 3720 addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag)); |
| 3720 } else if (!code_register.is(rdx)) { | 3721 } else if (!code_register.is(rdx)) { |
| 3721 movp(rdx, code_register); | 3722 movp(rdx, code_register); |
| 3722 } | 3723 } |
| 3723 | 3724 |
| 3724 if (flag == CALL_FUNCTION) { | 3725 if (flag == CALL_FUNCTION) { |
| 3725 call_wrapper.BeforeCall(CallSize(adaptor)); | 3726 call_wrapper.BeforeCall(CallSize(adaptor)); |
| 3726 Call(adaptor, RelocInfo::CODE_TARGET); | 3727 Call(adaptor, RelocInfo::CODE_TARGET); |
| 3727 call_wrapper.AfterCall(); | 3728 call_wrapper.AfterCall(); |
| 3728 if (!*definitely_mismatches) { | 3729 if (!*definitely_mismatches) { |
| 3729 jmp(done, near_jump); | 3730 jmp(done, near_jump); |
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3817 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space, | 3818 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space, |
| 3818 bool save_doubles) { | 3819 bool save_doubles) { |
| 3819 #ifdef _WIN64 | 3820 #ifdef _WIN64 |
| 3820 const int kShadowSpace = 4; | 3821 const int kShadowSpace = 4; |
| 3821 arg_stack_space += kShadowSpace; | 3822 arg_stack_space += kShadowSpace; |
| 3822 #endif | 3823 #endif |
| 3823 // Optionally save all XMM registers. | 3824 // Optionally save all XMM registers. |
| 3824 if (save_doubles) { | 3825 if (save_doubles) { |
| 3825 int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize + | 3826 int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize + |
| 3826 arg_stack_space * kRegisterSize; | 3827 arg_stack_space * kRegisterSize; |
| 3827 subq(rsp, Immediate(space)); | 3828 subp(rsp, Immediate(space)); |
| 3828 int offset = -2 * kPointerSize; | 3829 int offset = -2 * kPointerSize; |
| 3829 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) { | 3830 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) { |
| 3830 XMMRegister reg = XMMRegister::FromAllocationIndex(i); | 3831 XMMRegister reg = XMMRegister::FromAllocationIndex(i); |
| 3831 movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg); | 3832 movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg); |
| 3832 } | 3833 } |
| 3833 } else if (arg_stack_space > 0) { | 3834 } else if (arg_stack_space > 0) { |
| 3834 subq(rsp, Immediate(arg_stack_space * kRegisterSize)); | 3835 subp(rsp, Immediate(arg_stack_space * kRegisterSize)); |
| 3835 } | 3836 } |
| 3836 | 3837 |
| 3837 // Get the required frame alignment for the OS. | 3838 // Get the required frame alignment for the OS. |
| 3838 const int kFrameAlignment = OS::ActivationFrameAlignment(); | 3839 const int kFrameAlignment = OS::ActivationFrameAlignment(); |
| 3839 if (kFrameAlignment > 0) { | 3840 if (kFrameAlignment > 0) { |
| 3840 ASSERT(IsPowerOf2(kFrameAlignment)); | 3841 ASSERT(IsPowerOf2(kFrameAlignment)); |
| 3841 ASSERT(is_int8(kFrameAlignment)); | 3842 ASSERT(is_int8(kFrameAlignment)); |
| 3842 and_(rsp, Immediate(-kFrameAlignment)); | 3843 and_(rsp, Immediate(-kFrameAlignment)); |
| 3843 } | 3844 } |
| 3844 | 3845 |
| (...skipping 341 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4186 | 4187 |
| 4187 // Calculate new top and bail out if new space is exhausted. | 4188 // Calculate new top and bail out if new space is exhausted. |
| 4188 ExternalReference allocation_limit = | 4189 ExternalReference allocation_limit = |
| 4189 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 4190 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
| 4190 | 4191 |
| 4191 Register top_reg = result_end.is_valid() ? result_end : result; | 4192 Register top_reg = result_end.is_valid() ? result_end : result; |
| 4192 | 4193 |
| 4193 if (!top_reg.is(result)) { | 4194 if (!top_reg.is(result)) { |
| 4194 movp(top_reg, result); | 4195 movp(top_reg, result); |
| 4195 } | 4196 } |
| 4196 addq(top_reg, Immediate(object_size)); | 4197 addp(top_reg, Immediate(object_size)); |
| 4197 j(carry, gc_required); | 4198 j(carry, gc_required); |
| 4198 Operand limit_operand = ExternalOperand(allocation_limit); | 4199 Operand limit_operand = ExternalOperand(allocation_limit); |
| 4199 cmpq(top_reg, limit_operand); | 4200 cmpq(top_reg, limit_operand); |
| 4200 j(above, gc_required); | 4201 j(above, gc_required); |
| 4201 | 4202 |
| 4202 // Update allocation top. | 4203 // Update allocation top. |
| 4203 UpdateAllocationTopHelper(top_reg, scratch, flags); | 4204 UpdateAllocationTopHelper(top_reg, scratch, flags); |
| 4204 | 4205 |
| 4205 bool tag_result = (flags & TAG_OBJECT) != 0; | 4206 bool tag_result = (flags & TAG_OBJECT) != 0; |
| 4206 if (top_reg.is(result)) { | 4207 if (top_reg.is(result)) { |
| 4207 if (tag_result) { | 4208 if (tag_result) { |
| 4208 subq(result, Immediate(object_size - kHeapObjectTag)); | 4209 subp(result, Immediate(object_size - kHeapObjectTag)); |
| 4209 } else { | 4210 } else { |
| 4210 subq(result, Immediate(object_size)); | 4211 subp(result, Immediate(object_size)); |
| 4211 } | 4212 } |
| 4212 } else if (tag_result) { | 4213 } else if (tag_result) { |
| 4213 // Tag the result if requested. | 4214 // Tag the result if requested. |
| 4214 ASSERT(kHeapObjectTag == 1); | 4215 ASSERT(kHeapObjectTag == 1); |
| 4215 incq(result); | 4216 incq(result); |
| 4216 } | 4217 } |
| 4217 } | 4218 } |
| 4218 | 4219 |
| 4219 | 4220 |
| 4220 void MacroAssembler::Allocate(int header_size, | 4221 void MacroAssembler::Allocate(int header_size, |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4262 testq(result, Immediate(kDoubleAlignmentMask)); | 4263 testq(result, Immediate(kDoubleAlignmentMask)); |
| 4263 Check(zero, kAllocationIsNotDoubleAligned); | 4264 Check(zero, kAllocationIsNotDoubleAligned); |
| 4264 } | 4265 } |
| 4265 | 4266 |
| 4266 // Calculate new top and bail out if new space is exhausted. | 4267 // Calculate new top and bail out if new space is exhausted. |
| 4267 ExternalReference allocation_limit = | 4268 ExternalReference allocation_limit = |
| 4268 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 4269 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
| 4269 if (!object_size.is(result_end)) { | 4270 if (!object_size.is(result_end)) { |
| 4270 movp(result_end, object_size); | 4271 movp(result_end, object_size); |
| 4271 } | 4272 } |
| 4272 addq(result_end, result); | 4273 addp(result_end, result); |
| 4273 j(carry, gc_required); | 4274 j(carry, gc_required); |
| 4274 Operand limit_operand = ExternalOperand(allocation_limit); | 4275 Operand limit_operand = ExternalOperand(allocation_limit); |
| 4275 cmpq(result_end, limit_operand); | 4276 cmpq(result_end, limit_operand); |
| 4276 j(above, gc_required); | 4277 j(above, gc_required); |
| 4277 | 4278 |
| 4278 // Update allocation top. | 4279 // Update allocation top. |
| 4279 UpdateAllocationTopHelper(result_end, scratch, flags); | 4280 UpdateAllocationTopHelper(result_end, scratch, flags); |
| 4280 | 4281 |
| 4281 // Tag the result if requested. | 4282 // Tag the result if requested. |
| 4282 if ((flags & TAG_OBJECT) != 0) { | 4283 if ((flags & TAG_OBJECT) != 0) { |
| 4283 addq(result, Immediate(kHeapObjectTag)); | 4284 addp(result, Immediate(kHeapObjectTag)); |
| 4284 } | 4285 } |
| 4285 } | 4286 } |
| 4286 | 4287 |
| 4287 | 4288 |
| 4288 void MacroAssembler::UndoAllocationInNewSpace(Register object) { | 4289 void MacroAssembler::UndoAllocationInNewSpace(Register object) { |
| 4289 ExternalReference new_space_allocation_top = | 4290 ExternalReference new_space_allocation_top = |
| 4290 ExternalReference::new_space_allocation_top_address(isolate()); | 4291 ExternalReference::new_space_allocation_top_address(isolate()); |
| 4291 | 4292 |
| 4292 // Make sure the object has no tag before resetting top. | 4293 // Make sure the object has no tag before resetting top. |
| 4293 and_(object, Immediate(~kHeapObjectTagMask)); | 4294 and_(object, Immediate(~kHeapObjectTagMask)); |
| (...skipping 27 matching lines...) Expand all Loading... |
| 4321 // Calculate the number of bytes needed for the characters in the string while | 4322 // Calculate the number of bytes needed for the characters in the string while |
| 4322 // observing object alignment. | 4323 // observing object alignment. |
| 4323 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize & | 4324 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize & |
| 4324 kObjectAlignmentMask; | 4325 kObjectAlignmentMask; |
| 4325 ASSERT(kShortSize == 2); | 4326 ASSERT(kShortSize == 2); |
| 4326 // scratch1 = length * 2 + kObjectAlignmentMask. | 4327 // scratch1 = length * 2 + kObjectAlignmentMask. |
| 4327 lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask + | 4328 lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask + |
| 4328 kHeaderAlignment)); | 4329 kHeaderAlignment)); |
| 4329 and_(scratch1, Immediate(~kObjectAlignmentMask)); | 4330 and_(scratch1, Immediate(~kObjectAlignmentMask)); |
| 4330 if (kHeaderAlignment > 0) { | 4331 if (kHeaderAlignment > 0) { |
| 4331 subq(scratch1, Immediate(kHeaderAlignment)); | 4332 subp(scratch1, Immediate(kHeaderAlignment)); |
| 4332 } | 4333 } |
| 4333 | 4334 |
| 4334 // Allocate two byte string in new space. | 4335 // Allocate two byte string in new space. |
| 4335 Allocate(SeqTwoByteString::kHeaderSize, | 4336 Allocate(SeqTwoByteString::kHeaderSize, |
| 4336 times_1, | 4337 times_1, |
| 4337 scratch1, | 4338 scratch1, |
| 4338 result, | 4339 result, |
| 4339 scratch2, | 4340 scratch2, |
| 4340 scratch3, | 4341 scratch3, |
| 4341 gc_required, | 4342 gc_required, |
| (...skipping 14 matching lines...) Expand all Loading... |
| 4356 Register scratch1, | 4357 Register scratch1, |
| 4357 Register scratch2, | 4358 Register scratch2, |
| 4358 Register scratch3, | 4359 Register scratch3, |
| 4359 Label* gc_required) { | 4360 Label* gc_required) { |
| 4360 // Calculate the number of bytes needed for the characters in the string while | 4361 // Calculate the number of bytes needed for the characters in the string while |
| 4361 // observing object alignment. | 4362 // observing object alignment. |
| 4362 const int kHeaderAlignment = SeqOneByteString::kHeaderSize & | 4363 const int kHeaderAlignment = SeqOneByteString::kHeaderSize & |
| 4363 kObjectAlignmentMask; | 4364 kObjectAlignmentMask; |
| 4364 movl(scratch1, length); | 4365 movl(scratch1, length); |
| 4365 ASSERT(kCharSize == 1); | 4366 ASSERT(kCharSize == 1); |
| 4366 addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment)); | 4367 addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment)); |
| 4367 and_(scratch1, Immediate(~kObjectAlignmentMask)); | 4368 and_(scratch1, Immediate(~kObjectAlignmentMask)); |
| 4368 if (kHeaderAlignment > 0) { | 4369 if (kHeaderAlignment > 0) { |
| 4369 subq(scratch1, Immediate(kHeaderAlignment)); | 4370 subp(scratch1, Immediate(kHeaderAlignment)); |
| 4370 } | 4371 } |
| 4371 | 4372 |
| 4372 // Allocate ASCII string in new space. | 4373 // Allocate ASCII string in new space. |
| 4373 Allocate(SeqOneByteString::kHeaderSize, | 4374 Allocate(SeqOneByteString::kHeaderSize, |
| 4374 times_1, | 4375 times_1, |
| 4375 scratch1, | 4376 scratch1, |
| 4376 result, | 4377 result, |
| 4377 scratch2, | 4378 scratch2, |
| 4378 scratch3, | 4379 scratch3, |
| 4379 gc_required, | 4380 gc_required, |
| (...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4510 // Because source is 8-byte aligned in our uses of this function, | 4511 // Because source is 8-byte aligned in our uses of this function, |
| 4511 // we keep source aligned for the rep movs operation by copying the odd bytes | 4512 // we keep source aligned for the rep movs operation by copying the odd bytes |
| 4512 // at the end of the ranges. | 4513 // at the end of the ranges. |
| 4513 movp(scratch, length); | 4514 movp(scratch, length); |
| 4514 shrl(length, Immediate(kPointerSizeLog2)); | 4515 shrl(length, Immediate(kPointerSizeLog2)); |
| 4515 repmovsq(); | 4516 repmovsq(); |
| 4516 // Move remaining bytes of length. | 4517 // Move remaining bytes of length. |
| 4517 andl(scratch, Immediate(kPointerSize - 1)); | 4518 andl(scratch, Immediate(kPointerSize - 1)); |
| 4518 movp(length, Operand(source, scratch, times_1, -kPointerSize)); | 4519 movp(length, Operand(source, scratch, times_1, -kPointerSize)); |
| 4519 movp(Operand(destination, scratch, times_1, -kPointerSize), length); | 4520 movp(Operand(destination, scratch, times_1, -kPointerSize), length); |
| 4520 addq(destination, scratch); | 4521 addp(destination, scratch); |
| 4521 | 4522 |
| 4522 if (min_length <= kLongStringLimit) { | 4523 if (min_length <= kLongStringLimit) { |
| 4523 jmp(&done, Label::kNear); | 4524 jmp(&done, Label::kNear); |
| 4524 bind(&len24); | 4525 bind(&len24); |
| 4525 movp(scratch, Operand(source, 2 * kPointerSize)); | 4526 movp(scratch, Operand(source, 2 * kPointerSize)); |
| 4526 movp(Operand(destination, 2 * kPointerSize), scratch); | 4527 movp(Operand(destination, 2 * kPointerSize), scratch); |
| 4527 bind(&len16); | 4528 bind(&len16); |
| 4528 movp(scratch, Operand(source, kPointerSize)); | 4529 movp(scratch, Operand(source, kPointerSize)); |
| 4529 movp(Operand(destination, kPointerSize), scratch); | 4530 movp(Operand(destination, kPointerSize), scratch); |
| 4530 bind(&len8); | 4531 bind(&len8); |
| 4531 movp(scratch, Operand(source, 0)); | 4532 movp(scratch, Operand(source, 0)); |
| 4532 movp(Operand(destination, 0), scratch); | 4533 movp(Operand(destination, 0), scratch); |
| 4533 // Move remaining bytes of length. | 4534 // Move remaining bytes of length. |
| 4534 movp(scratch, Operand(source, length, times_1, -kPointerSize)); | 4535 movp(scratch, Operand(source, length, times_1, -kPointerSize)); |
| 4535 movp(Operand(destination, length, times_1, -kPointerSize), scratch); | 4536 movp(Operand(destination, length, times_1, -kPointerSize), scratch); |
| 4536 addq(destination, length); | 4537 addp(destination, length); |
| 4537 jmp(&done, Label::kNear); | 4538 jmp(&done, Label::kNear); |
| 4538 | 4539 |
| 4539 bind(&short_string); | 4540 bind(&short_string); |
| 4540 if (min_length == 0) { | 4541 if (min_length == 0) { |
| 4541 testl(length, length); | 4542 testl(length, length); |
| 4542 j(zero, &done, Label::kNear); | 4543 j(zero, &done, Label::kNear); |
| 4543 } | 4544 } |
| 4544 | 4545 |
| 4545 bind(&short_loop); | 4546 bind(&short_loop); |
| 4546 movb(scratch, Operand(source, 0)); | 4547 movb(scratch, Operand(source, 0)); |
| 4547 movb(Operand(destination, 0), scratch); | 4548 movb(Operand(destination, 0), scratch); |
| 4548 incq(source); | 4549 incq(source); |
| 4549 incq(destination); | 4550 incq(destination); |
| 4550 decl(length); | 4551 decl(length); |
| 4551 j(not_zero, &short_loop); | 4552 j(not_zero, &short_loop); |
| 4552 } | 4553 } |
| 4553 | 4554 |
| 4554 bind(&done); | 4555 bind(&done); |
| 4555 } | 4556 } |
| 4556 | 4557 |
| 4557 | 4558 |
| 4558 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, | 4559 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, |
| 4559 Register end_offset, | 4560 Register end_offset, |
| 4560 Register filler) { | 4561 Register filler) { |
| 4561 Label loop, entry; | 4562 Label loop, entry; |
| 4562 jmp(&entry); | 4563 jmp(&entry); |
| 4563 bind(&loop); | 4564 bind(&loop); |
| 4564 movp(Operand(start_offset, 0), filler); | 4565 movp(Operand(start_offset, 0), filler); |
| 4565 addq(start_offset, Immediate(kPointerSize)); | 4566 addp(start_offset, Immediate(kPointerSize)); |
| 4566 bind(&entry); | 4567 bind(&entry); |
| 4567 cmpq(start_offset, end_offset); | 4568 cmpq(start_offset, end_offset); |
| 4568 j(less, &loop); | 4569 j(less, &loop); |
| 4569 } | 4570 } |
| 4570 | 4571 |
| 4571 | 4572 |
| 4572 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { | 4573 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { |
| 4573 if (context_chain_length > 0) { | 4574 if (context_chain_length > 0) { |
| 4574 // Move up the chain of contexts to the context containing the slot. | 4575 // Move up the chain of contexts to the context containing the slot. |
| 4575 movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX))); | 4576 movp(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4709 void MacroAssembler::PrepareCallCFunction(int num_arguments) { | 4710 void MacroAssembler::PrepareCallCFunction(int num_arguments) { |
| 4710 int frame_alignment = OS::ActivationFrameAlignment(); | 4711 int frame_alignment = OS::ActivationFrameAlignment(); |
| 4711 ASSERT(frame_alignment != 0); | 4712 ASSERT(frame_alignment != 0); |
| 4712 ASSERT(num_arguments >= 0); | 4713 ASSERT(num_arguments >= 0); |
| 4713 | 4714 |
| 4714 // Make stack end at alignment and allocate space for arguments and old rsp. | 4715 // Make stack end at alignment and allocate space for arguments and old rsp. |
| 4715 movp(kScratchRegister, rsp); | 4716 movp(kScratchRegister, rsp); |
| 4716 ASSERT(IsPowerOf2(frame_alignment)); | 4717 ASSERT(IsPowerOf2(frame_alignment)); |
| 4717 int argument_slots_on_stack = | 4718 int argument_slots_on_stack = |
| 4718 ArgumentStackSlotsForCFunctionCall(num_arguments); | 4719 ArgumentStackSlotsForCFunctionCall(num_arguments); |
| 4719 subq(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize)); | 4720 subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize)); |
| 4720 and_(rsp, Immediate(-frame_alignment)); | 4721 and_(rsp, Immediate(-frame_alignment)); |
| 4721 movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister); | 4722 movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister); |
| 4722 } | 4723 } |
| 4723 | 4724 |
| 4724 | 4725 |
| 4725 void MacroAssembler::CallCFunction(ExternalReference function, | 4726 void MacroAssembler::CallCFunction(ExternalReference function, |
| 4726 int num_arguments) { | 4727 int num_arguments) { |
| 4727 LoadAddress(rax, function); | 4728 LoadAddress(rax, function); |
| 4728 CallCFunction(rax, num_arguments); | 4729 CallCFunction(rax, num_arguments); |
| 4729 } | 4730 } |
| (...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4867 // Sign extended 32 bit immediate. | 4868 // Sign extended 32 bit immediate. |
| 4868 and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); | 4869 and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); |
| 4869 movp(rcx, addr_reg); | 4870 movp(rcx, addr_reg); |
| 4870 int shift = | 4871 int shift = |
| 4871 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2; | 4872 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2; |
| 4872 shrl(rcx, Immediate(shift)); | 4873 shrl(rcx, Immediate(shift)); |
| 4873 and_(rcx, | 4874 and_(rcx, |
| 4874 Immediate((Page::kPageAlignmentMask >> shift) & | 4875 Immediate((Page::kPageAlignmentMask >> shift) & |
| 4875 ~(Bitmap::kBytesPerCell - 1))); | 4876 ~(Bitmap::kBytesPerCell - 1))); |
| 4876 | 4877 |
| 4877 addq(bitmap_reg, rcx); | 4878 addp(bitmap_reg, rcx); |
| 4878 movp(rcx, addr_reg); | 4879 movp(rcx, addr_reg); |
| 4879 shrl(rcx, Immediate(kPointerSizeLog2)); | 4880 shrl(rcx, Immediate(kPointerSizeLog2)); |
| 4880 and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1)); | 4881 and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1)); |
| 4881 movl(mask_reg, Immediate(1)); | 4882 movl(mask_reg, Immediate(1)); |
| 4882 shl_cl(mask_reg); | 4883 shl_cl(mask_reg); |
| 4883 } | 4884 } |
| 4884 | 4885 |
| 4885 | 4886 |
| 4886 void MacroAssembler::EnsureNotWhite( | 4887 void MacroAssembler::EnsureNotWhite( |
| 4887 Register value, | 4888 Register value, |
| (...skipping 15 matching lines...) Expand all Loading... |
| 4903 // Since both black and grey have a 1 in the first position and white does | 4904 // Since both black and grey have a 1 in the first position and white does |
| 4904 // not have a 1 there we only need to check one bit. | 4905 // not have a 1 there we only need to check one bit. |
| 4905 testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); | 4906 testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); |
| 4906 j(not_zero, &done, Label::kNear); | 4907 j(not_zero, &done, Label::kNear); |
| 4907 | 4908 |
| 4908 if (emit_debug_code()) { | 4909 if (emit_debug_code()) { |
| 4909 // Check for impossible bit pattern. | 4910 // Check for impossible bit pattern. |
| 4910 Label ok; | 4911 Label ok; |
| 4911 Push(mask_scratch); | 4912 Push(mask_scratch); |
| 4912 // shl. May overflow making the check conservative. | 4913 // shl. May overflow making the check conservative. |
| 4913 addq(mask_scratch, mask_scratch); | 4914 addp(mask_scratch, mask_scratch); |
| 4914 testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); | 4915 testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); |
| 4915 j(zero, &ok, Label::kNear); | 4916 j(zero, &ok, Label::kNear); |
| 4916 int3(); | 4917 int3(); |
| 4917 bind(&ok); | 4918 bind(&ok); |
| 4918 Pop(mask_scratch); | 4919 Pop(mask_scratch); |
| 4919 } | 4920 } |
| 4920 | 4921 |
| 4921 // Value is white. We check whether it is data that doesn't need scanning. | 4922 // Value is white. We check whether it is data that doesn't need scanning. |
| 4922 // Currently only checks for HeapNumber and non-cons strings. | 4923 // Currently only checks for HeapNumber and non-cons strings. |
| 4923 Register map = rcx; // Holds map while checking type. | 4924 Register map = rcx; // Holds map while checking type. |
| (...skipping 29 matching lines...) Expand all Loading... |
| 4953 testb(instance_type, Immediate(kExternalStringTag)); | 4954 testb(instance_type, Immediate(kExternalStringTag)); |
| 4954 j(zero, ¬_external, Label::kNear); | 4955 j(zero, ¬_external, Label::kNear); |
| 4955 movp(length, Immediate(ExternalString::kSize)); | 4956 movp(length, Immediate(ExternalString::kSize)); |
| 4956 jmp(&is_data_object, Label::kNear); | 4957 jmp(&is_data_object, Label::kNear); |
| 4957 | 4958 |
| 4958 bind(¬_external); | 4959 bind(¬_external); |
| 4959 // Sequential string, either ASCII or UC16. | 4960 // Sequential string, either ASCII or UC16. |
| 4960 ASSERT(kOneByteStringTag == 0x04); | 4961 ASSERT(kOneByteStringTag == 0x04); |
| 4961 and_(length, Immediate(kStringEncodingMask)); | 4962 and_(length, Immediate(kStringEncodingMask)); |
| 4962 xor_(length, Immediate(kStringEncodingMask)); | 4963 xor_(length, Immediate(kStringEncodingMask)); |
| 4963 addq(length, Immediate(0x04)); | 4964 addp(length, Immediate(0x04)); |
| 4964 // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2. | 4965 // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2. |
| 4965 imul(length, FieldOperand(value, String::kLengthOffset)); | 4966 imulp(length, FieldOperand(value, String::kLengthOffset)); |
| 4966 shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize)); | 4967 shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize)); |
| 4967 addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); | 4968 addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); |
| 4968 and_(length, Immediate(~kObjectAlignmentMask)); | 4969 and_(length, Immediate(~kObjectAlignmentMask)); |
| 4969 | 4970 |
| 4970 bind(&is_data_object); | 4971 bind(&is_data_object); |
| 4971 // Value is a data object, and it is white. Mark it black. Since we know | 4972 // Value is a data object, and it is white. Mark it black. Since we know |
| 4972 // that the object is white we can make it black by flipping one bit. | 4973 // that the object is white we can make it black by flipping one bit. |
| 4973 or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); | 4974 or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); |
| 4974 | 4975 |
| 4975 and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask)); | 4976 and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask)); |
| 4976 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length); | 4977 addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length); |
| 4977 | 4978 |
| (...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5082 if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift())); | 5083 if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift())); |
| 5083 movl(rax, dividend); | 5084 movl(rax, dividend); |
| 5084 shrl(rax, Immediate(31)); | 5085 shrl(rax, Immediate(31)); |
| 5085 addl(rdx, rax); | 5086 addl(rdx, rax); |
| 5086 } | 5087 } |
| 5087 | 5088 |
| 5088 | 5089 |
| 5089 } } // namespace v8::internal | 5090 } } // namespace v8::internal |
| 5090 | 5091 |
| 5091 #endif // V8_TARGET_ARCH_X64 | 5092 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |