| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // | 2 // |
| 3 // Redistribution and use in source and binary forms, with or without | 3 // Redistribution and use in source and binary forms, with or without |
| 4 // modification, are permitted provided that the following conditions are | 4 // modification, are permitted provided that the following conditions are |
| 5 // met: | 5 // met: |
| 6 // | 6 // |
| 7 // * Redistributions of source code must retain the above copyright | 7 // * Redistributions of source code must retain the above copyright |
| 8 // notice, this list of conditions and the following disclaimer. | 8 // notice, this list of conditions and the following disclaimer. |
| 9 // * Redistributions in binary form must reproduce the above | 9 // * Redistributions in binary form must reproduce the above |
| 10 // copyright notice, this list of conditions and the following | 10 // copyright notice, this list of conditions and the following |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 113 } | 113 } |
| 114 | 114 |
| 115 | 115 |
| 116 // This function defines the list of registers which are associated with a | 116 // This function defines the list of registers which are associated with a |
| 117 // safepoint slot. Safepoint register slots are saved contiguously on the stack. | 117 // safepoint slot. Safepoint register slots are saved contiguously on the stack. |
| 118 // MacroAssembler::SafepointRegisterStackIndex handles mapping from register | 118 // MacroAssembler::SafepointRegisterStackIndex handles mapping from register |
| 119 // code to index in the safepoint register slots. Any change here can affect | 119 // code to index in the safepoint register slots. Any change here can affect |
| 120 // this mapping. | 120 // this mapping. |
| 121 CPURegList CPURegList::GetSafepointSavedRegisters() { | 121 CPURegList CPURegList::GetSafepointSavedRegisters() { |
| 122 CPURegList list = CPURegList::GetCalleeSaved(); | 122 CPURegList list = CPURegList::GetCalleeSaved(); |
| 123 list.Combine(CPURegList(CPURegister::kRegister, kXRegSize, kJSCallerSaved)); | 123 list.Combine( |
| 124 CPURegList(CPURegister::kRegister, kXRegSizeInBits, kJSCallerSaved)); |
| 124 | 125 |
| 125 // Note that unfortunately we can't use symbolic names for registers and have | 126 // Note that unfortunately we can't use symbolic names for registers and have |
| 126 // to directly use register codes. This is because this function is used to | 127 // to directly use register codes. This is because this function is used to |
| 127 // initialize some static variables and we can't rely on register variables | 128 // initialize some static variables and we can't rely on register variables |
| 128 // to be initialized due to static initialization order issues in C++. | 129 // to be initialized due to static initialization order issues in C++. |
| 129 | 130 |
| 130 // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be | 131 // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be |
| 131 // preserved outside of the macro assembler. | 132 // preserved outside of the macro assembler. |
| 132 list.Remove(16); | 133 list.Remove(16); |
| 133 list.Remove(17); | 134 list.Remove(17); |
| (...skipping 19 matching lines...) Expand all Loading... |
| 153 | 154 |
| 154 | 155 |
| 155 bool RelocInfo::IsCodedSpecially() { | 156 bool RelocInfo::IsCodedSpecially() { |
| 156 // The deserializer needs to know whether a pointer is specially coded. Being | 157 // The deserializer needs to know whether a pointer is specially coded. Being |
| 157 // specially coded on A64 means that it is a movz/movk sequence. We don't | 158 // specially coded on A64 means that it is a movz/movk sequence. We don't |
| 158 // generate those for relocatable pointers. | 159 // generate those for relocatable pointers. |
| 159 return false; | 160 return false; |
| 160 } | 161 } |
| 161 | 162 |
| 162 | 163 |
| 164 bool RelocInfo::IsInConstantPool() { |
| 165 Instruction* instr = reinterpret_cast<Instruction*>(pc_); |
| 166 return instr->IsLdrLiteralX(); |
| 167 } |
| 168 |
| 169 |
| 163 void RelocInfo::PatchCode(byte* instructions, int instruction_count) { | 170 void RelocInfo::PatchCode(byte* instructions, int instruction_count) { |
| 164 // Patch the code at the current address with the supplied instructions. | 171 // Patch the code at the current address with the supplied instructions. |
| 165 Instr* pc = reinterpret_cast<Instr*>(pc_); | 172 Instr* pc = reinterpret_cast<Instr*>(pc_); |
| 166 Instr* instr = reinterpret_cast<Instr*>(instructions); | 173 Instr* instr = reinterpret_cast<Instr*>(instructions); |
| 167 for (int i = 0; i < instruction_count; i++) { | 174 for (int i = 0; i < instruction_count; i++) { |
| 168 *(pc + i) = *(instr + i); | 175 *(pc + i) = *(instr + i); |
| 169 } | 176 } |
| 170 | 177 |
| 171 // Indicate that code has changed. | 178 // Indicate that code has changed. |
| 172 CPU::FlushICache(pc_, instruction_count * kInstructionSize); | 179 CPU::FlushICache(pc_, instruction_count * kInstructionSize); |
| (...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 279 | 286 |
| 280 | 287 |
| 281 // Assembler | 288 // Assembler |
| 282 | 289 |
| 283 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) | 290 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) |
| 284 : AssemblerBase(isolate, buffer, buffer_size), | 291 : AssemblerBase(isolate, buffer, buffer_size), |
| 285 recorded_ast_id_(TypeFeedbackId::None()), | 292 recorded_ast_id_(TypeFeedbackId::None()), |
| 286 unresolved_branches_(), | 293 unresolved_branches_(), |
| 287 positions_recorder_(this) { | 294 positions_recorder_(this) { |
| 288 const_pool_blocked_nesting_ = 0; | 295 const_pool_blocked_nesting_ = 0; |
| 296 veneer_pool_blocked_nesting_ = 0; |
| 289 Reset(); | 297 Reset(); |
| 290 } | 298 } |
| 291 | 299 |
| 292 | 300 |
| 293 Assembler::~Assembler() { | 301 Assembler::~Assembler() { |
| 294 ASSERT(num_pending_reloc_info_ == 0); | 302 ASSERT(num_pending_reloc_info_ == 0); |
| 295 ASSERT(const_pool_blocked_nesting_ == 0); | 303 ASSERT(const_pool_blocked_nesting_ == 0); |
| 304 ASSERT(veneer_pool_blocked_nesting_ == 0); |
| 296 } | 305 } |
| 297 | 306 |
| 298 | 307 |
| 299 void Assembler::Reset() { | 308 void Assembler::Reset() { |
| 300 #ifdef DEBUG | 309 #ifdef DEBUG |
| 301 ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_)); | 310 ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_)); |
| 302 ASSERT(const_pool_blocked_nesting_ == 0); | 311 ASSERT(const_pool_blocked_nesting_ == 0); |
| 312 ASSERT(veneer_pool_blocked_nesting_ == 0); |
| 313 ASSERT(unresolved_branches_.empty()); |
| 303 memset(buffer_, 0, pc_ - buffer_); | 314 memset(buffer_, 0, pc_ - buffer_); |
| 304 #endif | 315 #endif |
| 305 pc_ = buffer_; | 316 pc_ = buffer_; |
| 306 reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_), | 317 reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_), |
| 307 reinterpret_cast<byte*>(pc_)); | 318 reinterpret_cast<byte*>(pc_)); |
| 308 num_pending_reloc_info_ = 0; | 319 num_pending_reloc_info_ = 0; |
| 309 next_buffer_check_ = 0; | 320 next_constant_pool_check_ = 0; |
| 321 next_veneer_pool_check_ = kMaxInt; |
| 310 no_const_pool_before_ = 0; | 322 no_const_pool_before_ = 0; |
| 311 first_const_pool_use_ = -1; | 323 first_const_pool_use_ = -1; |
| 312 ClearRecordedAstId(); | 324 ClearRecordedAstId(); |
| 313 } | 325 } |
| 314 | 326 |
| 315 | 327 |
| 316 void Assembler::GetCode(CodeDesc* desc) { | 328 void Assembler::GetCode(CodeDesc* desc) { |
| 317 // Emit constant pool if necessary. | 329 // Emit constant pool if necessary. |
| 318 CheckConstPool(true, false); | 330 CheckConstPool(true, false); |
| 319 ASSERT(num_pending_reloc_info_ == 0); | 331 ASSERT(num_pending_reloc_info_ == 0); |
| (...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 527 } | 539 } |
| 528 // The instruction at pc is now the last link in the label's chain. | 540 // The instruction at pc is now the last link in the label's chain. |
| 529 label->link_to(pc_offset()); | 541 label->link_to(pc_offset()); |
| 530 } | 542 } |
| 531 | 543 |
| 532 return offset; | 544 return offset; |
| 533 } | 545 } |
| 534 | 546 |
| 535 | 547 |
| 536 void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) { | 548 void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) { |
| 549 if (unresolved_branches_.empty()) { |
| 550 ASSERT(next_veneer_pool_check_ == kMaxInt); |
| 551 return; |
| 552 } |
| 553 |
| 537 // Branches to this label will be resolved when the label is bound below. | 554 // Branches to this label will be resolved when the label is bound below. |
| 538 std::multimap<int, FarBranchInfo>::iterator it_tmp, it; | 555 std::multimap<int, FarBranchInfo>::iterator it_tmp, it; |
| 539 it = unresolved_branches_.begin(); | 556 it = unresolved_branches_.begin(); |
| 540 while (it != unresolved_branches_.end()) { | 557 while (it != unresolved_branches_.end()) { |
| 541 it_tmp = it++; | 558 it_tmp = it++; |
| 542 if (it_tmp->second.label_ == label) { | 559 if (it_tmp->second.label_ == label) { |
| 543 CHECK(it_tmp->first >= pc_offset()); | 560 CHECK(it_tmp->first >= pc_offset()); |
| 544 unresolved_branches_.erase(it_tmp); | 561 unresolved_branches_.erase(it_tmp); |
| 545 } | 562 } |
| 546 } | 563 } |
| 564 if (unresolved_branches_.empty()) { |
| 565 next_veneer_pool_check_ = kMaxInt; |
| 566 } else { |
| 567 next_veneer_pool_check_ = |
| 568 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin; |
| 569 } |
| 547 } | 570 } |
| 548 | 571 |
| 549 | 572 |
| 550 void Assembler::StartBlockConstPool() { | 573 void Assembler::StartBlockConstPool() { |
| 551 if (const_pool_blocked_nesting_++ == 0) { | 574 if (const_pool_blocked_nesting_++ == 0) { |
| 552 // Prevent constant pool checks happening by setting the next check to | 575 // Prevent constant pool checks happening by setting the next check to |
| 553 // the biggest possible offset. | 576 // the biggest possible offset. |
| 554 next_buffer_check_ = kMaxInt; | 577 next_constant_pool_check_ = kMaxInt; |
| 555 } | 578 } |
| 556 } | 579 } |
| 557 | 580 |
| 558 | 581 |
| 559 void Assembler::EndBlockConstPool() { | 582 void Assembler::EndBlockConstPool() { |
| 560 if (--const_pool_blocked_nesting_ == 0) { | 583 if (--const_pool_blocked_nesting_ == 0) { |
| 561 // Check the constant pool hasn't been blocked for too long. | 584 // Check the constant pool hasn't been blocked for too long. |
| 562 ASSERT((num_pending_reloc_info_ == 0) || | 585 ASSERT((num_pending_reloc_info_ == 0) || |
| 563 (pc_offset() < (first_const_pool_use_ + kMaxDistToPool))); | 586 (pc_offset() < (first_const_pool_use_ + kMaxDistToConstPool))); |
| 564 // Two cases: | 587 // Two cases: |
| 565 // * no_const_pool_before_ >= next_buffer_check_ and the emission is | 588 // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is |
| 566 // still blocked | 589 // still blocked |
| 567 // * no_const_pool_before_ < next_buffer_check_ and the next emit will | 590 // * no_const_pool_before_ < next_constant_pool_check_ and the next emit |
| 568 // trigger a check. | 591 // will trigger a check. |
| 569 next_buffer_check_ = no_const_pool_before_; | 592 next_constant_pool_check_ = no_const_pool_before_; |
| 570 } | 593 } |
| 571 } | 594 } |
| 572 | 595 |
| 573 | 596 |
| 574 bool Assembler::is_const_pool_blocked() const { | 597 bool Assembler::is_const_pool_blocked() const { |
| 575 return (const_pool_blocked_nesting_ > 0) || | 598 return (const_pool_blocked_nesting_ > 0) || |
| 576 (pc_offset() < no_const_pool_before_); | 599 (pc_offset() < no_const_pool_before_); |
| 577 } | 600 } |
| 578 | 601 |
| 579 | 602 |
| (...skipping 21 matching lines...) Expand all Loading... |
| 601 } | 624 } |
| 602 | 625 |
| 603 | 626 |
| 604 void Assembler::ConstantPoolMarker(uint32_t size) { | 627 void Assembler::ConstantPoolMarker(uint32_t size) { |
| 605 ASSERT(is_const_pool_blocked()); | 628 ASSERT(is_const_pool_blocked()); |
| 606 // + 1 is for the crash guard. | 629 // + 1 is for the crash guard. |
| 607 Emit(LDR_x_lit | ImmLLiteral(2 * size + 1) | Rt(xzr)); | 630 Emit(LDR_x_lit | ImmLLiteral(2 * size + 1) | Rt(xzr)); |
| 608 } | 631 } |
| 609 | 632 |
| 610 | 633 |
| 634 void Assembler::EmitPoolGuard() { |
| 635 // We must generate only one instruction as this is used in scopes that |
| 636 // control the size of the code generated. |
| 637 Emit(BLR | Rn(xzr)); |
| 638 } |
| 639 |
| 640 |
| 611 void Assembler::ConstantPoolGuard() { | 641 void Assembler::ConstantPoolGuard() { |
| 612 #ifdef DEBUG | 642 #ifdef DEBUG |
| 613 // Currently this is only used after a constant pool marker. | 643 // Currently this is only used after a constant pool marker. |
| 614 ASSERT(is_const_pool_blocked()); | 644 ASSERT(is_const_pool_blocked()); |
| 615 Instruction* instr = reinterpret_cast<Instruction*>(pc_); | 645 Instruction* instr = reinterpret_cast<Instruction*>(pc_); |
| 616 ASSERT(instr->preceding()->IsLdrLiteralX() && | 646 ASSERT(instr->preceding()->IsLdrLiteralX() && |
| 617 instr->preceding()->Rt() == xzr.code()); | 647 instr->preceding()->Rt() == xzr.code()); |
| 618 #endif | 648 #endif |
| 619 | 649 EmitPoolGuard(); |
| 620 // We must generate only one instruction. | |
| 621 Emit(BLR | Rn(xzr)); | |
| 622 } | 650 } |
| 623 | 651 |
| 624 | 652 |
| 653 void Assembler::StartBlockVeneerPool() { |
| 654 ++veneer_pool_blocked_nesting_; |
| 655 } |
| 656 |
| 657 |
| 658 void Assembler::EndBlockVeneerPool() { |
| 659 if (--veneer_pool_blocked_nesting_ == 0) { |
| 660 // Check the veneer pool hasn't been blocked for too long. |
| 661 ASSERT(unresolved_branches_.empty() || |
| 662 (pc_offset() < unresolved_branches_first_limit())); |
| 663 } |
| 664 } |
| 665 |
| 666 |
| 625 void Assembler::br(const Register& xn) { | 667 void Assembler::br(const Register& xn) { |
| 626 positions_recorder()->WriteRecordedPositions(); | 668 positions_recorder()->WriteRecordedPositions(); |
| 627 ASSERT(xn.Is64Bits()); | 669 ASSERT(xn.Is64Bits()); |
| 628 Emit(BR | Rn(xn)); | 670 Emit(BR | Rn(xn)); |
| 629 } | 671 } |
| 630 | 672 |
| 631 | 673 |
| 632 void Assembler::blr(const Register& xn) { | 674 void Assembler::blr(const Register& xn) { |
| 633 positions_recorder()->WriteRecordedPositions(); | 675 positions_recorder()->WriteRecordedPositions(); |
| 634 ASSERT(xn.Is64Bits()); | 676 ASSERT(xn.Is64Bits()); |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 705 Label* label) { | 747 Label* label) { |
| 706 positions_recorder()->WriteRecordedPositions(); | 748 positions_recorder()->WriteRecordedPositions(); |
| 707 cbnz(rt, LinkAndGetInstructionOffsetTo(label)); | 749 cbnz(rt, LinkAndGetInstructionOffsetTo(label)); |
| 708 } | 750 } |
| 709 | 751 |
| 710 | 752 |
| 711 void Assembler::tbz(const Register& rt, | 753 void Assembler::tbz(const Register& rt, |
| 712 unsigned bit_pos, | 754 unsigned bit_pos, |
| 713 int imm14) { | 755 int imm14) { |
| 714 positions_recorder()->WriteRecordedPositions(); | 756 positions_recorder()->WriteRecordedPositions(); |
| 715 ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize))); | 757 ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits))); |
| 716 Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); | 758 Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); |
| 717 } | 759 } |
| 718 | 760 |
| 719 | 761 |
| 720 void Assembler::tbz(const Register& rt, | 762 void Assembler::tbz(const Register& rt, |
| 721 unsigned bit_pos, | 763 unsigned bit_pos, |
| 722 Label* label) { | 764 Label* label) { |
| 723 positions_recorder()->WriteRecordedPositions(); | 765 positions_recorder()->WriteRecordedPositions(); |
| 724 tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label)); | 766 tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label)); |
| 725 } | 767 } |
| 726 | 768 |
| 727 | 769 |
| 728 void Assembler::tbnz(const Register& rt, | 770 void Assembler::tbnz(const Register& rt, |
| 729 unsigned bit_pos, | 771 unsigned bit_pos, |
| 730 int imm14) { | 772 int imm14) { |
| 731 positions_recorder()->WriteRecordedPositions(); | 773 positions_recorder()->WriteRecordedPositions(); |
| 732 ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize))); | 774 ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits))); |
| 733 Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); | 775 Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); |
| 734 } | 776 } |
| 735 | 777 |
| 736 | 778 |
| 737 void Assembler::tbnz(const Register& rt, | 779 void Assembler::tbnz(const Register& rt, |
| 738 unsigned bit_pos, | 780 unsigned bit_pos, |
| 739 Label* label) { | 781 Label* label) { |
| 740 positions_recorder()->WriteRecordedPositions(); | 782 positions_recorder()->WriteRecordedPositions(); |
| 741 tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label)); | 783 tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label)); |
| 742 } | 784 } |
| (...skipping 627 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1370 | 1412 |
| 1371 | 1413 |
| 1372 void Assembler::ldrsw(const Register& rt, const MemOperand& src) { | 1414 void Assembler::ldrsw(const Register& rt, const MemOperand& src) { |
| 1373 ASSERT(rt.Is64Bits()); | 1415 ASSERT(rt.Is64Bits()); |
| 1374 LoadStore(rt, src, LDRSW_x); | 1416 LoadStore(rt, src, LDRSW_x); |
| 1375 } | 1417 } |
| 1376 | 1418 |
| 1377 | 1419 |
| 1378 void Assembler::ldr(const Register& rt, uint64_t imm) { | 1420 void Assembler::ldr(const Register& rt, uint64_t imm) { |
| 1379 // TODO(all): Constant pool may be garbage collected. Hence we cannot store | 1421 // TODO(all): Constant pool may be garbage collected. Hence we cannot store |
| 1380 // TODO(all): arbitrary values in them. Manually move it for now. | 1422 // arbitrary values in them. Manually move it for now. Fix |
| 1381 // TODO(all): Fix MacroAssembler::Fmov when this is implemented. | 1423 // MacroAssembler::Fmov when this is implemented. |
| 1382 UNIMPLEMENTED(); | 1424 UNIMPLEMENTED(); |
| 1383 } | 1425 } |
| 1384 | 1426 |
| 1385 | 1427 |
| 1386 void Assembler::ldr(const FPRegister& ft, double imm) { | 1428 void Assembler::ldr(const FPRegister& ft, double imm) { |
| 1387 // TODO(all): Constant pool may be garbage collected. Hence we cannot store | 1429 // TODO(all): Constant pool may be garbage collected. Hence we cannot store |
| 1388 // TODO(all): arbitrary values in them. Manually move it for now. | 1430 // arbitrary values in them. Manually move it for now. Fix |
| 1389 // TODO(all): Fix MacroAssembler::Fmov when this is implemented. | 1431 // MacroAssembler::Fmov when this is implemented. |
| 1390 UNIMPLEMENTED(); | 1432 UNIMPLEMENTED(); |
| 1391 } | 1433 } |
| 1392 | 1434 |
| 1435 |
| 1436 void Assembler::ldr(const FPRegister& ft, float imm) { |
| 1437 // TODO(all): Constant pool may be garbage collected. Hence we cannot store |
| 1438 // arbitrary values in them. Manually move it for now. Fix |
| 1439 // MacroAssembler::Fmov when this is implemented. |
| 1440 UNIMPLEMENTED(); |
| 1441 } |
| 1442 |
| 1393 | 1443 |
| 1394 void Assembler::mov(const Register& rd, const Register& rm) { | 1444 void Assembler::mov(const Register& rd, const Register& rm) { |
| 1395 // Moves involving the stack pointer are encoded as add immediate with | 1445 // Moves involving the stack pointer are encoded as add immediate with |
| 1396 // second operand of zero. Otherwise, orr with first operand zr is | 1446 // second operand of zero. Otherwise, orr with first operand zr is |
| 1397 // used. | 1447 // used. |
| 1398 if (rd.IsSP() || rm.IsSP()) { | 1448 if (rd.IsSP() || rm.IsSP()) { |
| 1399 add(rd, rm, 0); | 1449 add(rd, rm, 0); |
| 1400 } else { | 1450 } else { |
| 1401 orr(rd, AppropriateZeroRegFor(rd), rm); | 1451 orr(rd, AppropriateZeroRegFor(rd), rm); |
| 1402 } | 1452 } |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1434 Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type)); | 1484 Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type)); |
| 1435 } | 1485 } |
| 1436 | 1486 |
| 1437 | 1487 |
| 1438 void Assembler::isb() { | 1488 void Assembler::isb() { |
| 1439 Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll)); | 1489 Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll)); |
| 1440 } | 1490 } |
| 1441 | 1491 |
| 1442 | 1492 |
| 1443 void Assembler::fmov(FPRegister fd, double imm) { | 1493 void Assembler::fmov(FPRegister fd, double imm) { |
| 1444 if (fd.Is64Bits() && IsImmFP64(imm)) { | 1494 ASSERT(fd.Is64Bits()); |
| 1445 Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm)); | 1495 ASSERT(IsImmFP64(imm)); |
| 1446 } else if (fd.Is32Bits() && IsImmFP32(imm)) { | 1496 Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm)); |
| 1447 Emit(FMOV_s_imm | Rd(fd) | ImmFP32(static_cast<float>(imm))); | |
| 1448 } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) { | |
| 1449 Register zr = AppropriateZeroRegFor(fd); | |
| 1450 fmov(fd, zr); | |
| 1451 } else { | |
| 1452 ldr(fd, imm); | |
| 1453 } | |
| 1454 } | 1497 } |
| 1455 | 1498 |
| 1456 | 1499 |
| 1500 void Assembler::fmov(FPRegister fd, float imm) { |
| 1501 ASSERT(fd.Is32Bits()); |
| 1502 ASSERT(IsImmFP32(imm)); |
| 1503 Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm)); |
| 1504 } |
| 1505 |
| 1506 |
| 1457 void Assembler::fmov(Register rd, FPRegister fn) { | 1507 void Assembler::fmov(Register rd, FPRegister fn) { |
| 1458 ASSERT(rd.SizeInBits() == fn.SizeInBits()); | 1508 ASSERT(rd.SizeInBits() == fn.SizeInBits()); |
| 1459 FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd; | 1509 FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd; |
| 1460 Emit(op | Rd(rd) | Rn(fn)); | 1510 Emit(op | Rd(rd) | Rn(fn)); |
| 1461 } | 1511 } |
| 1462 | 1512 |
| 1463 | 1513 |
| 1464 void Assembler::fmov(FPRegister fd, Register rn) { | 1514 void Assembler::fmov(FPRegister fd, Register rn) { |
| 1465 ASSERT(fd.SizeInBits() == rn.SizeInBits()); | 1515 ASSERT(fd.SizeInBits() == rn.SizeInBits()); |
| 1466 FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx; | 1516 FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx; |
| (...skipping 396 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1863 | 1913 |
| 1864 void Assembler::debug(const char* message, uint32_t code, Instr params) { | 1914 void Assembler::debug(const char* message, uint32_t code, Instr params) { |
| 1865 #ifdef USE_SIMULATOR | 1915 #ifdef USE_SIMULATOR |
| 1866 // Don't generate simulator specific code if we are building a snapshot, which | 1916 // Don't generate simulator specific code if we are building a snapshot, which |
| 1867 // might be run on real hardware. | 1917 // might be run on real hardware. |
| 1868 if (!Serializer::enabled()) { | 1918 if (!Serializer::enabled()) { |
| 1869 #ifdef DEBUG | 1919 #ifdef DEBUG |
| 1870 Serializer::TooLateToEnableNow(); | 1920 Serializer::TooLateToEnableNow(); |
| 1871 #endif | 1921 #endif |
| 1872 // The arguments to the debug marker need to be contiguous in memory, so | 1922 // The arguments to the debug marker need to be contiguous in memory, so |
| 1873 // make sure we don't try to emit a literal pool. | 1923 // make sure we don't try to emit pools. |
| 1874 BlockConstPoolScope scope(this); | 1924 BlockPoolsScope scope(this); |
| 1875 | 1925 |
| 1876 Label start; | 1926 Label start; |
| 1877 bind(&start); | 1927 bind(&start); |
| 1878 | 1928 |
| 1879 // Refer to instructions-a64.h for a description of the marker and its | 1929 // Refer to instructions-a64.h for a description of the marker and its |
| 1880 // arguments. | 1930 // arguments. |
| 1881 hlt(kImmExceptionIsDebug); | 1931 hlt(kImmExceptionIsDebug); |
| 1882 ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset); | 1932 ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset); |
| 1883 dc32(code); | 1933 dc32(code); |
| 1884 ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset); | 1934 ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset); |
| (...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2042 if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) { | 2092 if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) { |
| 2043 switch (extend) { | 2093 switch (extend) { |
| 2044 case UXTB: | 2094 case UXTB: |
| 2045 case UXTH: | 2095 case UXTH: |
| 2046 case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break; | 2096 case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break; |
| 2047 case SXTB: | 2097 case SXTB: |
| 2048 case SXTH: | 2098 case SXTH: |
| 2049 case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break; | 2099 case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break; |
| 2050 case UXTX: | 2100 case UXTX: |
| 2051 case SXTX: { | 2101 case SXTX: { |
| 2052 ASSERT(rn.SizeInBits() == kXRegSize); | 2102 ASSERT(rn.SizeInBits() == kXRegSizeInBits); |
| 2053 // Nothing to extend. Just shift. | 2103 // Nothing to extend. Just shift. |
| 2054 lsl(rd, rn_, left_shift); | 2104 lsl(rd, rn_, left_shift); |
| 2055 break; | 2105 break; |
| 2056 } | 2106 } |
| 2057 default: UNREACHABLE(); | 2107 default: UNREACHABLE(); |
| 2058 } | 2108 } |
| 2059 } else { | 2109 } else { |
| 2060 // No need to extend as the extended bits would be shifted away. | 2110 // No need to extend as the extended bits would be shifted away. |
| 2061 lsl(rd, rn_, left_shift); | 2111 lsl(rd, rn_, left_shift); |
| 2062 } | 2112 } |
| (...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2187 // imm_s and imm_r are updated with immediates encoded in the format required | 2237 // imm_s and imm_r are updated with immediates encoded in the format required |
| 2188 // by the corresponding fields in the logical instruction. | 2238 // by the corresponding fields in the logical instruction. |
| 2189 // If it can not be encoded, the function returns false, and the values pointed | 2239 // If it can not be encoded, the function returns false, and the values pointed |
| 2190 // to by n, imm_s and imm_r are undefined. | 2240 // to by n, imm_s and imm_r are undefined. |
| 2191 bool Assembler::IsImmLogical(uint64_t value, | 2241 bool Assembler::IsImmLogical(uint64_t value, |
| 2192 unsigned width, | 2242 unsigned width, |
| 2193 unsigned* n, | 2243 unsigned* n, |
| 2194 unsigned* imm_s, | 2244 unsigned* imm_s, |
| 2195 unsigned* imm_r) { | 2245 unsigned* imm_r) { |
| 2196 ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL)); | 2246 ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL)); |
| 2197 ASSERT((width == kWRegSize) || (width == kXRegSize)); | 2247 ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits)); |
| 2198 | 2248 |
| 2199 // Logical immediates are encoded using parameters n, imm_s and imm_r using | 2249 // Logical immediates are encoded using parameters n, imm_s and imm_r using |
| 2200 // the following table: | 2250 // the following table: |
| 2201 // | 2251 // |
| 2202 // N imms immr size S R | 2252 // N imms immr size S R |
| 2203 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) | 2253 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) |
| 2204 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) | 2254 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) |
| 2205 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) | 2255 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) |
| 2206 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) | 2256 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr) |
| 2207 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) | 2257 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr) |
| 2208 // 0 11110s xxxxxr 2 UInt(s) UInt(r) | 2258 // 0 11110s xxxxxr 2 UInt(s) UInt(r) |
| 2209 // (s bits must not be all set) | 2259 // (s bits must not be all set) |
| 2210 // | 2260 // |
| 2211 // A pattern is constructed of size bits, where the least significant S+1 | 2261 // A pattern is constructed of size bits, where the least significant S+1 |
| 2212 // bits are set. The pattern is rotated right by R, and repeated across a | 2262 // bits are set. The pattern is rotated right by R, and repeated across a |
| 2213 // 32 or 64-bit value, depending on destination register width. | 2263 // 32 or 64-bit value, depending on destination register width. |
| 2214 // | 2264 // |
| 2215 // To test if an arbitary immediate can be encoded using this scheme, an | 2265 // To test if an arbitary immediate can be encoded using this scheme, an |
| 2216 // iterative algorithm is used. | 2266 // iterative algorithm is used. |
| 2217 // | 2267 // |
| 2218 // TODO(mcapewel) This code does not consider using X/W register overlap to | 2268 // TODO(mcapewel) This code does not consider using X/W register overlap to |
| 2219 // support 64-bit immediates where the top 32-bits are zero, and the bottom | 2269 // support 64-bit immediates where the top 32-bits are zero, and the bottom |
| 2220 // 32-bits are an encodable logical immediate. | 2270 // 32-bits are an encodable logical immediate. |
| 2221 | 2271 |
| 2222 // 1. If the value has all set or all clear bits, it can't be encoded. | 2272 // 1. If the value has all set or all clear bits, it can't be encoded. |
| 2223 if ((value == 0) || (value == 0xffffffffffffffffUL) || | 2273 if ((value == 0) || (value == 0xffffffffffffffffUL) || |
| 2224 ((width == kWRegSize) && (value == 0xffffffff))) { | 2274 ((width == kWRegSizeInBits) && (value == 0xffffffff))) { |
| 2225 return false; | 2275 return false; |
| 2226 } | 2276 } |
| 2227 | 2277 |
| 2228 unsigned lead_zero = CountLeadingZeros(value, width); | 2278 unsigned lead_zero = CountLeadingZeros(value, width); |
| 2229 unsigned lead_one = CountLeadingZeros(~value, width); | 2279 unsigned lead_one = CountLeadingZeros(~value, width); |
| 2230 unsigned trail_zero = CountTrailingZeros(value, width); | 2280 unsigned trail_zero = CountTrailingZeros(value, width); |
| 2231 unsigned trail_one = CountTrailingZeros(~value, width); | 2281 unsigned trail_one = CountTrailingZeros(~value, width); |
| 2232 unsigned set_bits = CountSetBits(value, width); | 2282 unsigned set_bits = CountSetBits(value, width); |
| 2233 | 2283 |
| 2234 // The fixed bits in the immediate s field. | 2284 // The fixed bits in the immediate s field. |
| 2235 // If width == 64 (X reg), start at 0xFFFFFF80. | 2285 // If width == 64 (X reg), start at 0xFFFFFF80. |
| 2236 // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit | 2286 // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit |
| 2237 // widths won't be executed. | 2287 // widths won't be executed. |
| 2238 int imm_s_fixed = (width == kXRegSize) ? -128 : -64; | 2288 int imm_s_fixed = (width == kXRegSizeInBits) ? -128 : -64; |
| 2239 int imm_s_mask = 0x3F; | 2289 int imm_s_mask = 0x3F; |
| 2240 | 2290 |
| 2241 for (;;) { | 2291 for (;;) { |
| 2242 // 2. If the value is two bits wide, it can be encoded. | 2292 // 2. If the value is two bits wide, it can be encoded. |
| 2243 if (width == 2) { | 2293 if (width == 2) { |
| 2244 *n = 0; | 2294 *n = 0; |
| 2245 *imm_s = 0x3C; | 2295 *imm_s = 0x3C; |
| 2246 *imm_r = (value & 3) - 1; | 2296 *imm_r = (value & 3) - 1; |
| 2247 return true; | 2297 return true; |
| 2248 } | 2298 } |
| (...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2390 } | 2440 } |
| 2391 } | 2441 } |
| 2392 } | 2442 } |
| 2393 | 2443 |
| 2394 | 2444 |
| 2395 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { | 2445 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { |
| 2396 // We do not try to reuse pool constants. | 2446 // We do not try to reuse pool constants. |
| 2397 RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL); | 2447 RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL); |
| 2398 if (((rmode >= RelocInfo::JS_RETURN) && | 2448 if (((rmode >= RelocInfo::JS_RETURN) && |
| 2399 (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) || | 2449 (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) || |
| 2400 (rmode == RelocInfo::CONST_POOL)) { | 2450 (rmode == RelocInfo::CONST_POOL) || |
| 2451 (rmode == RelocInfo::VENEER_POOL)) { |
| 2401 // Adjust code for new modes. | 2452 // Adjust code for new modes. |
| 2402 ASSERT(RelocInfo::IsDebugBreakSlot(rmode) | 2453 ASSERT(RelocInfo::IsDebugBreakSlot(rmode) |
| 2403 || RelocInfo::IsJSReturn(rmode) | 2454 || RelocInfo::IsJSReturn(rmode) |
| 2404 || RelocInfo::IsComment(rmode) | 2455 || RelocInfo::IsComment(rmode) |
| 2405 || RelocInfo::IsPosition(rmode) | 2456 || RelocInfo::IsPosition(rmode) |
| 2406 || RelocInfo::IsConstPool(rmode)); | 2457 || RelocInfo::IsConstPool(rmode) |
| 2458 || RelocInfo::IsVeneerPool(rmode)); |
| 2407 // These modes do not need an entry in the constant pool. | 2459 // These modes do not need an entry in the constant pool. |
| 2408 } else { | 2460 } else { |
| 2409 ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); | 2461 ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); |
| 2410 if (num_pending_reloc_info_ == 0) { | 2462 if (num_pending_reloc_info_ == 0) { |
| 2411 first_const_pool_use_ = pc_offset(); | 2463 first_const_pool_use_ = pc_offset(); |
| 2412 } | 2464 } |
| 2413 pending_reloc_info_[num_pending_reloc_info_++] = rinfo; | 2465 pending_reloc_info_[num_pending_reloc_info_++] = rinfo; |
| 2414 // Make sure the constant pool is not emitted in place of the next | 2466 // Make sure the constant pool is not emitted in place of the next |
| 2415 // instruction for which we just recorded relocation info. | 2467 // instruction for which we just recorded relocation info. |
| 2416 BlockConstPoolFor(1); | 2468 BlockConstPoolFor(1); |
| (...skipping 21 matching lines...) Expand all Loading... |
| 2438 reloc_info_writer.Write(&rinfo); | 2490 reloc_info_writer.Write(&rinfo); |
| 2439 } | 2491 } |
| 2440 } | 2492 } |
| 2441 } | 2493 } |
| 2442 | 2494 |
| 2443 | 2495 |
| 2444 void Assembler::BlockConstPoolFor(int instructions) { | 2496 void Assembler::BlockConstPoolFor(int instructions) { |
| 2445 int pc_limit = pc_offset() + instructions * kInstructionSize; | 2497 int pc_limit = pc_offset() + instructions * kInstructionSize; |
| 2446 if (no_const_pool_before_ < pc_limit) { | 2498 if (no_const_pool_before_ < pc_limit) { |
| 2447 // If there are some pending entries, the constant pool cannot be blocked | 2499 // If there are some pending entries, the constant pool cannot be blocked |
| 2448 // further than first_const_pool_use_ + kMaxDistToPool | 2500 // further than first_const_pool_use_ + kMaxDistToConstPool |
| 2449 ASSERT((num_pending_reloc_info_ == 0) || | 2501 ASSERT((num_pending_reloc_info_ == 0) || |
| 2450 (pc_limit < (first_const_pool_use_ + kMaxDistToPool))); | 2502 (pc_limit < (first_const_pool_use_ + kMaxDistToConstPool))); |
| 2451 no_const_pool_before_ = pc_limit; | 2503 no_const_pool_before_ = pc_limit; |
| 2452 } | 2504 } |
| 2453 | 2505 |
| 2454 if (next_buffer_check_ < no_const_pool_before_) { | 2506 if (next_constant_pool_check_ < no_const_pool_before_) { |
| 2455 next_buffer_check_ = no_const_pool_before_; | 2507 next_constant_pool_check_ = no_const_pool_before_; |
| 2456 } | 2508 } |
| 2457 } | 2509 } |
| 2458 | 2510 |
| 2459 | 2511 |
| 2460 void Assembler::CheckConstPool(bool force_emit, bool require_jump) { | 2512 void Assembler::CheckConstPool(bool force_emit, bool require_jump) { |
| 2461 // Some short sequence of instruction mustn't be broken up by constant pool | 2513 // Some short sequence of instruction mustn't be broken up by constant pool |
| 2462 // emission, such sequences are protected by calls to BlockConstPoolFor and | 2514 // emission, such sequences are protected by calls to BlockConstPoolFor and |
| 2463 // BlockConstPoolScope. | 2515 // BlockConstPoolScope. |
| 2464 if (is_const_pool_blocked()) { | 2516 if (is_const_pool_blocked()) { |
| 2465 // Something is wrong if emission is forced and blocked at the same time. | 2517 // Something is wrong if emission is forced and blocked at the same time. |
| 2466 ASSERT(!force_emit); | 2518 ASSERT(!force_emit); |
| 2467 return; | 2519 return; |
| 2468 } | 2520 } |
| 2469 | 2521 |
| 2470 // There is nothing to do if there are no pending constant pool entries. | 2522 // There is nothing to do if there are no pending constant pool entries. |
| 2471 if (num_pending_reloc_info_ == 0) { | 2523 if (num_pending_reloc_info_ == 0) { |
| 2472 // Calculate the offset of the next check. | 2524 // Calculate the offset of the next check. |
| 2473 next_buffer_check_ = pc_offset() + kCheckPoolInterval; | 2525 next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval; |
| 2474 return; | 2526 return; |
| 2475 } | 2527 } |
| 2476 | 2528 |
| 2477 // We emit a constant pool when: | 2529 // We emit a constant pool when: |
| 2478 // * requested to do so by parameter force_emit (e.g. after each function). | 2530 // * requested to do so by parameter force_emit (e.g. after each function). |
| 2479 // * the distance to the first instruction accessing the constant pool is | 2531 // * the distance to the first instruction accessing the constant pool is |
| 2480 // kAvgDistToPool or more. | 2532 // kAvgDistToConstPool or more. |
| 2481 // * no jump is required and the distance to the first instruction accessing | 2533 // * no jump is required and the distance to the first instruction accessing |
| 2482 // the constant pool is at least kMaxDistToPool / 2. | 2534 // the constant pool is at least kMaxDistToPConstool / 2. |
| 2483 ASSERT(first_const_pool_use_ >= 0); | 2535 ASSERT(first_const_pool_use_ >= 0); |
| 2484 int dist = pc_offset() - first_const_pool_use_; | 2536 int dist = pc_offset() - first_const_pool_use_; |
| 2485 if (!force_emit && dist < kAvgDistToPool && | 2537 if (!force_emit && dist < kAvgDistToConstPool && |
| 2486 (require_jump || (dist < (kMaxDistToPool / 2)))) { | 2538 (require_jump || (dist < (kMaxDistToConstPool / 2)))) { |
| 2487 return; | 2539 return; |
| 2488 } | 2540 } |
| 2489 | 2541 |
| 2542 int jump_instr = require_jump ? kInstructionSize : 0; |
| 2543 int size_pool_marker = kInstructionSize; |
| 2544 int size_pool_guard = kInstructionSize; |
| 2545 int pool_size = jump_instr + size_pool_marker + size_pool_guard + |
| 2546 num_pending_reloc_info_ * kPointerSize; |
| 2547 int needed_space = pool_size + kGap; |
| 2548 |
| 2549 // Emit veneers for branches that would go out of range during emission of the |
| 2550 // constant pool. |
| 2551 CheckVeneerPool(require_jump, kVeneerDistanceMargin + pool_size); |
| 2552 |
| 2490 Label size_check; | 2553 Label size_check; |
| 2491 bind(&size_check); | 2554 bind(&size_check); |
| 2492 | 2555 |
| 2493 // Check that the code buffer is large enough before emitting the constant | 2556 // Check that the code buffer is large enough before emitting the constant |
| 2494 // pool (include the jump over the pool, the constant pool marker, the | 2557 // pool (include the jump over the pool, the constant pool marker, the |
| 2495 // constant pool guard, and the gap to the relocation information). | 2558 // constant pool guard, and the gap to the relocation information). |
| 2496 int jump_instr = require_jump ? kInstructionSize : 0; | |
| 2497 int size_pool_marker = kInstructionSize; | |
| 2498 int size_pool_guard = kInstructionSize; | |
| 2499 int pool_size = jump_instr + size_pool_marker + size_pool_guard + | |
| 2500 num_pending_reloc_info_ * kPointerSize; | |
| 2501 int needed_space = pool_size + kGap; | |
| 2502 while (buffer_space() <= needed_space) { | 2559 while (buffer_space() <= needed_space) { |
| 2503 GrowBuffer(); | 2560 GrowBuffer(); |
| 2504 } | 2561 } |
| 2505 | 2562 |
| 2506 { | 2563 { |
| 2507 // Block recursive calls to CheckConstPool. | 2564 // Block recursive calls to CheckConstPool and protect from veneer pools. |
| 2508 BlockConstPoolScope block_const_pool(this); | 2565 BlockPoolsScope block_pools(this); |
| 2509 RecordComment("[ Constant Pool"); | 2566 RecordComment("[ Constant Pool"); |
| 2510 RecordConstPool(pool_size); | 2567 RecordConstPool(pool_size); |
| 2511 | 2568 |
| 2512 // Emit jump over constant pool if necessary. | 2569 // Emit jump over constant pool if necessary. |
| 2513 Label after_pool; | 2570 Label after_pool; |
| 2514 if (require_jump) { | 2571 if (require_jump) { |
| 2515 b(&after_pool); | 2572 b(&after_pool); |
| 2516 } | 2573 } |
| 2517 | 2574 |
| 2518 // Emit a constant pool header. The header has two goals: | 2575 // Emit a constant pool header. The header has two goals: |
| 2519 // 1) Encode the size of the constant pool, for use by the disassembler. | 2576 // 1) Encode the size of the constant pool, for use by the disassembler. |
| 2520 // 2) Terminate the program, to try to prevent execution from accidentally | 2577 // 2) Terminate the program, to try to prevent execution from accidentally |
| 2521 // flowing into the constant pool. | 2578 // flowing into the constant pool. |
| 2522 // The header is therefore made of two a64 instructions: | 2579 // The header is therefore made of two a64 instructions: |
| 2523 // ldr xzr, #<size of the constant pool in 32-bit words> | 2580 // ldr xzr, #<size of the constant pool in 32-bit words> |
| 2524 // blr xzr | 2581 // blr xzr |
| 2525 // If executed the code will likely segfault and lr will point to the | 2582 // If executed the code will likely segfault and lr will point to the |
| 2526 // beginning of the constant pool. | 2583 // beginning of the constant pool. |
| 2527 // TODO(all): currently each relocated constant is 64 bits, consider adding | 2584 // TODO(all): currently each relocated constant is 64 bits, consider adding |
| 2528 // support for 32-bit entries. | 2585 // support for 32-bit entries. |
| 2529 ConstantPoolMarker(2 * num_pending_reloc_info_); | 2586 ConstantPoolMarker(2 * num_pending_reloc_info_); |
| 2530 ConstantPoolGuard(); | 2587 ConstantPoolGuard(); |
| 2531 | 2588 |
| 2532 // Emit constant pool entries. | 2589 // Emit constant pool entries. |
| 2533 for (int i = 0; i < num_pending_reloc_info_; i++) { | 2590 for (int i = 0; i < num_pending_reloc_info_; i++) { |
| 2534 RelocInfo& rinfo = pending_reloc_info_[i]; | 2591 RelocInfo& rinfo = pending_reloc_info_[i]; |
| 2535 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && | 2592 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && |
| 2536 rinfo.rmode() != RelocInfo::POSITION && | 2593 rinfo.rmode() != RelocInfo::POSITION && |
| 2537 rinfo.rmode() != RelocInfo::STATEMENT_POSITION && | 2594 rinfo.rmode() != RelocInfo::STATEMENT_POSITION && |
| 2538 rinfo.rmode() != RelocInfo::CONST_POOL); | 2595 rinfo.rmode() != RelocInfo::CONST_POOL && |
| 2596 rinfo.rmode() != RelocInfo::VENEER_POOL); |
| 2539 | 2597 |
| 2540 Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc()); | 2598 Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc()); |
| 2541 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. | 2599 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. |
| 2542 ASSERT(instr->IsLdrLiteral() && | 2600 ASSERT(instr->IsLdrLiteral() && |
| 2543 instr->ImmLLiteral() == 0); | 2601 instr->ImmLLiteral() == 0); |
| 2544 | 2602 |
| 2545 instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_)); | 2603 instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_)); |
| 2546 dc64(rinfo.data()); | 2604 dc64(rinfo.data()); |
| 2547 } | 2605 } |
| 2548 | 2606 |
| 2549 num_pending_reloc_info_ = 0; | 2607 num_pending_reloc_info_ = 0; |
| 2550 first_const_pool_use_ = -1; | 2608 first_const_pool_use_ = -1; |
| 2551 | 2609 |
| 2552 RecordComment("]"); | 2610 RecordComment("]"); |
| 2553 | 2611 |
| 2554 if (after_pool.is_linked()) { | 2612 if (after_pool.is_linked()) { |
| 2555 bind(&after_pool); | 2613 bind(&after_pool); |
| 2556 } | 2614 } |
| 2557 } | 2615 } |
| 2558 | 2616 |
| 2559 // Since a constant pool was just emitted, move the check offset forward by | 2617 // Since a constant pool was just emitted, move the check offset forward by |
| 2560 // the standard interval. | 2618 // the standard interval. |
| 2561 next_buffer_check_ = pc_offset() + kCheckPoolInterval; | 2619 next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval; |
| 2562 | 2620 |
| 2563 ASSERT(SizeOfCodeGeneratedSince(&size_check) == | 2621 ASSERT(SizeOfCodeGeneratedSince(&size_check) == |
| 2564 static_cast<unsigned>(pool_size)); | 2622 static_cast<unsigned>(pool_size)); |
| 2565 } | 2623 } |
| 2566 | 2624 |
| 2567 | 2625 |
| 2626 bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) { |
| 2627 // Account for the branch around the veneers and the guard. |
| 2628 int protection_offset = 2 * kInstructionSize; |
| 2629 return pc_offset() > max_reachable_pc - margin - protection_offset - |
| 2630 static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize); |
| 2631 } |
| 2632 |
| 2633 |
| 2634 void Assembler::RecordVeneerPool(int location_offset, int size) { |
| 2635 #ifdef ENABLE_DEBUGGER_SUPPORT |
| 2636 RelocInfo rinfo(buffer_ + location_offset, |
| 2637 RelocInfo::VENEER_POOL, static_cast<intptr_t>(size), |
| 2638 NULL); |
| 2639 reloc_info_writer.Write(&rinfo); |
| 2640 #endif |
| 2641 } |
| 2642 |
| 2643 |
| 2644 void Assembler::EmitVeneers(bool need_protection, int margin) { |
| 2645 BlockPoolsScope scope(this); |
| 2646 RecordComment("[ Veneers"); |
| 2647 |
| 2648 // The exact size of the veneer pool must be recorded (see the comment at the |
| 2649 // declaration site of RecordConstPool()), but computing the number of |
| 2650 // veneers that will be generated is not obvious. So instead we remember the |
| 2651 // current position and will record the size after the pool has been |
| 2652 // generated. |
| 2653 Label size_check; |
| 2654 bind(&size_check); |
| 2655 int veneer_pool_relocinfo_loc = pc_offset(); |
| 2656 |
| 2657 Label end; |
| 2658 if (need_protection) { |
| 2659 b(&end); |
| 2660 } |
| 2661 |
| 2662 EmitVeneersGuard(); |
| 2663 |
| 2664 Label veneer_size_check; |
| 2665 |
| 2666 std::multimap<int, FarBranchInfo>::iterator it, it_to_delete; |
| 2667 |
| 2668 it = unresolved_branches_.begin(); |
| 2669 while (it != unresolved_branches_.end()) { |
| 2670 if (ShouldEmitVeneer(it->first, margin)) { |
| 2671 Instruction* branch = InstructionAt(it->second.pc_offset_); |
| 2672 Label* label = it->second.label_; |
| 2673 |
| 2674 #ifdef DEBUG |
| 2675 bind(&veneer_size_check); |
| 2676 #endif |
| 2677 // Patch the branch to point to the current position, and emit a branch |
| 2678 // to the label. |
| 2679 Instruction* veneer = reinterpret_cast<Instruction*>(pc_); |
| 2680 RemoveBranchFromLabelLinkChain(branch, label, veneer); |
| 2681 branch->SetImmPCOffsetTarget(veneer); |
| 2682 b(label); |
| 2683 #ifdef DEBUG |
| 2684 ASSERT(SizeOfCodeGeneratedSince(&veneer_size_check) <= |
| 2685 static_cast<uint64_t>(kMaxVeneerCodeSize)); |
| 2686 veneer_size_check.Unuse(); |
| 2687 #endif |
| 2688 |
| 2689 it_to_delete = it++; |
| 2690 unresolved_branches_.erase(it_to_delete); |
| 2691 } else { |
| 2692 ++it; |
| 2693 } |
| 2694 } |
| 2695 |
| 2696 // Record the veneer pool size. |
| 2697 int pool_size = SizeOfCodeGeneratedSince(&size_check); |
| 2698 RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size); |
| 2699 |
| 2700 if (unresolved_branches_.empty()) { |
| 2701 next_veneer_pool_check_ = kMaxInt; |
| 2702 } else { |
| 2703 next_veneer_pool_check_ = |
| 2704 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin; |
| 2705 } |
| 2706 |
| 2707 bind(&end); |
| 2708 |
| 2709 RecordComment("]"); |
| 2710 } |
| 2711 |
| 2712 |
| 2713 void Assembler::CheckVeneerPool(bool require_jump, |
| 2714 int margin) { |
| 2715 // There is nothing to do if there are no pending veneer pool entries. |
| 2716 if (unresolved_branches_.empty()) { |
| 2717 ASSERT(next_veneer_pool_check_ == kMaxInt); |
| 2718 return; |
| 2719 } |
| 2720 |
| 2721 ASSERT(pc_offset() < unresolved_branches_first_limit()); |
| 2722 |
| 2723 // Some short sequence of instruction mustn't be broken up by veneer pool |
| 2724 // emission, such sequences are protected by calls to BlockVeneerPoolFor and |
| 2725 // BlockVeneerPoolScope. |
| 2726 if (is_veneer_pool_blocked()) { |
| 2727 return; |
| 2728 } |
| 2729 |
| 2730 if (!require_jump) { |
| 2731 // Prefer emitting veneers protected by an existing instruction. |
| 2732 margin *= kVeneerNoProtectionFactor; |
| 2733 } |
| 2734 if (ShouldEmitVeneers(margin)) { |
| 2735 EmitVeneers(require_jump, margin); |
| 2736 } else { |
| 2737 next_veneer_pool_check_ = |
| 2738 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin; |
| 2739 } |
| 2740 } |
| 2741 |
| 2742 |
| 2568 void Assembler::RecordComment(const char* msg) { | 2743 void Assembler::RecordComment(const char* msg) { |
| 2569 if (FLAG_code_comments) { | 2744 if (FLAG_code_comments) { |
| 2570 CheckBuffer(); | 2745 CheckBuffer(); |
| 2571 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); | 2746 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg)); |
| 2572 } | 2747 } |
| 2573 } | 2748 } |
| 2574 | 2749 |
| 2575 | 2750 |
| 2576 int Assembler::buffer_space() const { | 2751 int Assembler::buffer_space() const { |
| 2577 return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_); | 2752 return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_); |
| (...skipping 19 matching lines...) Expand all Loading... |
| 2597 // code. | 2772 // code. |
| 2598 #ifdef ENABLE_DEBUGGER_SUPPORT | 2773 #ifdef ENABLE_DEBUGGER_SUPPORT |
| 2599 RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size)); | 2774 RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size)); |
| 2600 #endif | 2775 #endif |
| 2601 } | 2776 } |
| 2602 | 2777 |
| 2603 | 2778 |
| 2604 } } // namespace v8::internal | 2779 } } // namespace v8::internal |
| 2605 | 2780 |
| 2606 #endif // V8_TARGET_ARCH_A64 | 2781 #endif // V8_TARGET_ARCH_A64 |
| OLD | NEW |