OLD | NEW |
1 //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===// | 1 //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===// |
2 // | 2 // |
3 // The LLVM Compiler Infrastructure | 3 // The LLVM Compiler Infrastructure |
4 // | 4 // |
5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
7 // | 7 // |
8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
9 | 9 |
10 #define DEBUG_TYPE "assembler" | 10 #define DEBUG_TYPE "assembler" |
(...skipping 475 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
486 | 486 |
487 case MCFragment::FT_Dwarf: | 487 case MCFragment::FT_Dwarf: |
488 return cast<MCDwarfLineAddrFragment>(F).getContents().size(); | 488 return cast<MCDwarfLineAddrFragment>(F).getContents().size(); |
489 case MCFragment::FT_DwarfFrame: | 489 case MCFragment::FT_DwarfFrame: |
490 return cast<MCDwarfCallFrameFragment>(F).getContents().size(); | 490 return cast<MCDwarfCallFrameFragment>(F).getContents().size(); |
491 } | 491 } |
492 | 492 |
493 llvm_unreachable("invalid fragment kind"); | 493 llvm_unreachable("invalid fragment kind"); |
494 } | 494 } |
495 | 495 |
496 void MCAsmLayout::LayoutFragment(MCFragment *F) { | |
497 MCFragment *Prev = F->getPrevNode(); | |
498 | |
499 // We should never try to recompute something which is up-to-date. | |
500 assert(!isFragmentUpToDate(F) && "Attempt to recompute up-to-date fragment!"); | |
501 // We should never try to compute the fragment layout if it's predecessor | |
502 // isn't up-to-date. | |
503 assert((!Prev || isFragmentUpToDate(Prev)) && | |
504 "Attempt to compute fragment before it's predecessor!"); | |
505 | |
506 ++stats::FragmentLayouts; | |
507 | |
508 // Compute fragment offset and size. | |
509 uint64_t Offset = 0; | |
510 if (Prev) | |
511 Offset += Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev); | |
512 // @LOCALMOD-BEGIN | |
513 F->BundlePadding = getAssembler().ComputeBundlePadding(*this, F, Offset); | |
514 Offset += F->BundlePadding; | |
515 // @LOCALMOD-END | |
516 F->Offset = Offset; | |
517 LastValidFragment[F->getParent()] = F; | |
518 } | |
519 | |
520 // @LOCALMOD-BEGIN | 496 // @LOCALMOD-BEGIN |
521 // Returns number of bytes of padding needed to align to bundle start. | 497 // Returns number of bytes of padding needed to align to bundle start. |
522 static uint64_t AddressToBundlePadding(uint64_t Address, uint64_t BundleMask) { | 498 static uint64_t AddressToBundlePadding(uint64_t Address, uint64_t BundleMask) { |
523 return (~Address + 1) & BundleMask; | 499 return (~Address + 1) & BundleMask; |
524 } | 500 } |
525 | 501 |
526 uint64_t MCAssembler::getBundleSize() const { | 502 static uint64_t ComputeBundleMask(uint64_t BundleSize) { |
527 return getBackend().getBundleSize(); | |
528 } | |
529 | |
530 uint64_t MCAssembler::getBundleMask() const { | |
531 uint64_t BundleSize = getBundleSize(); | |
532 uint64_t BundleMask = BundleSize - 1; | 503 uint64_t BundleMask = BundleSize - 1; |
533 assert(BundleSize != 0); | 504 assert(BundleSize != 0); |
534 assert((BundleSize & BundleMask) == 0 && | 505 assert((BundleSize & BundleMask) == 0 && |
535 "Bundle size must be a power of 2!"); | 506 "Bundle size must be a power of 2!"); |
536 return BundleMask; | 507 return BundleMask; |
537 } | 508 } |
538 | 509 |
539 static unsigned ComputeGroupSize(MCFragment *F) { | 510 static unsigned ComputeGroupSize(MCFragment *F) { |
540 if (!F->isBundleGroupStart()) { | 511 if (!F->isBundleGroupStart()) { |
541 return 0; | 512 return 0; |
(...skipping 21 matching lines...) Expand all Loading... |
563 GroupSize += cast<MCTinyFragment>(Cur)->getContents().size(); | 534 GroupSize += cast<MCTinyFragment>(Cur)->getContents().size(); |
564 break; | 535 break; |
565 } | 536 } |
566 if (Cur->isBundleGroupEnd()) | 537 if (Cur->isBundleGroupEnd()) |
567 break; | 538 break; |
568 Cur = Cur->getNextNode(); | 539 Cur = Cur->getNextNode(); |
569 } | 540 } |
570 return GroupSize; | 541 return GroupSize; |
571 } | 542 } |
572 | 543 |
573 uint8_t MCAssembler::ComputeBundlePadding(const MCAsmLayout &Layout, | 544 static uint8_t ComputeBundlePadding(const MCAssembler &Asm, |
574 MCFragment *F, | 545 const MCAsmLayout &Layout, |
575 uint64_t FragmentOffset) const { | 546 MCFragment *F, |
| 547 uint64_t FragmentOffset) { |
576 if (!F->getParent()->isBundlingEnabled()) | 548 if (!F->getParent()->isBundlingEnabled()) |
577 return 0; | 549 return 0; |
578 | 550 |
579 uint64_t BundleSize = getBundleSize(); | 551 uint64_t BundleSize = Asm.getBackend().getBundleSize(); |
580 uint64_t BundleMask = getBundleMask(); | 552 uint64_t BundleMask = ComputeBundleMask(BundleSize); |
581 unsigned GroupSize = ComputeGroupSize(F); | 553 unsigned GroupSize = ComputeGroupSize(F); |
582 | 554 |
583 if (GroupSize > BundleSize) { | 555 if (GroupSize > BundleSize) { |
584 // EmitFill creates large groups consisting of repeated single bytes. | 556 // EmitFill creates large groups consisting of repeated single bytes. |
585 // These should be safe at any alignment, and in any case we cannot | 557 // These should be safe at any alignment, and in any case we cannot |
586 // fix them up here. | 558 // fix them up here. |
587 return 0; | 559 return 0; |
588 } | 560 } |
589 | 561 |
590 uint64_t Padding = 0; | 562 uint64_t Padding = 0; |
591 uint64_t OffsetInBundle = FragmentOffset & BundleMask; | 563 uint64_t OffsetInBundle = FragmentOffset & BundleMask; |
592 | 564 |
593 if (OffsetInBundle + GroupSize > BundleSize || | 565 if (OffsetInBundle + GroupSize > BundleSize || |
594 F->getBundleAlign() == MCFragment::BundleAlignStart) { | 566 F->getBundleAlign() == MCFragment::BundleAlignStart) { |
595 // If this group would cross the bundle boundary, or this group must be | 567 // If this group would cross the bundle boundary, or this group must be |
596 // aligned to the start of a bundle, then pad up to start of the next bundle | 568 // aligned to the start of a bundle, then pad up to start of the next bundle |
597 Padding += AddressToBundlePadding(OffsetInBundle, BundleMask); | 569 Padding += AddressToBundlePadding(OffsetInBundle, BundleMask); |
598 OffsetInBundle = 0; | 570 OffsetInBundle = 0; |
599 } | 571 } |
600 if (F->getBundleAlign() == MCFragment::BundleAlignEnd) { | 572 if (F->getBundleAlign() == MCFragment::BundleAlignEnd) { |
601 // Push to the end of the bundle | 573 // Push to the end of the bundle |
602 Padding += AddressToBundlePadding(OffsetInBundle + GroupSize, BundleMask); | 574 Padding += AddressToBundlePadding(OffsetInBundle + GroupSize, BundleMask); |
603 } | 575 } |
604 return Padding; | 576 return Padding; |
605 } | 577 } |
606 // @LOCALMOD-END | |
607 | 578 |
608 | |
609 | |
610 | |
611 // @LOCALMOD-BEGIN | |
612 // Write out BundlePadding bytes in NOPs, being careful not to cross a bundle | 579 // Write out BundlePadding bytes in NOPs, being careful not to cross a bundle |
613 // boundary. | 580 // boundary. |
614 static void WriteBundlePadding(const MCAssembler &Asm, | 581 static void WriteBundlePadding(const MCAssembler &Asm, |
615 const MCAsmLayout &Layout, | 582 const MCAsmLayout &Layout, |
616 uint64_t Offset, uint64_t TotalPadding, | 583 uint64_t Offset, uint64_t TotalPadding, |
617 MCObjectWriter *OW) { | 584 MCObjectWriter *OW) { |
618 uint64_t BundleSize = Asm.getBundleSize(); | 585 uint64_t BundleSize = Asm.getBackend().getBundleSize(); |
619 uint64_t BundleMask = Asm.getBundleMask(); | 586 uint64_t BundleMask = ComputeBundleMask(BundleSize); |
620 uint64_t PaddingLeft = TotalPadding; | 587 uint64_t PaddingLeft = TotalPadding; |
621 uint64_t StartPos = Offset; | 588 uint64_t StartPos = Offset; |
622 | 589 |
623 bool FirstWrite = true; | 590 bool FirstWrite = true; |
624 while (PaddingLeft > 0) { | 591 while (PaddingLeft > 0) { |
625 uint64_t NopsToWrite = | 592 uint64_t NopsToWrite = |
626 FirstWrite ? AddressToBundlePadding(StartPos, BundleMask) : | 593 FirstWrite ? AddressToBundlePadding(StartPos, BundleMask) : |
627 BundleSize; | 594 BundleSize; |
628 if (NopsToWrite > PaddingLeft) | 595 if (NopsToWrite > PaddingLeft) |
629 NopsToWrite = PaddingLeft; | 596 NopsToWrite = PaddingLeft; |
630 if (!Asm.getBackend().writeNopData(NopsToWrite, OW)) | 597 if (!Asm.getBackend().writeNopData(NopsToWrite, OW)) |
631 report_fatal_error("unable to write nop sequence of " + | 598 report_fatal_error("unable to write nop sequence of " + |
632 Twine(NopsToWrite) + " bytes"); | 599 Twine(NopsToWrite) + " bytes"); |
633 PaddingLeft -= NopsToWrite; | 600 PaddingLeft -= NopsToWrite; |
634 FirstWrite = false; | 601 FirstWrite = false; |
635 } | 602 } |
636 } | 603 } |
637 // @LOCALMOD-END | 604 // @LOCALMOD-END |
638 | 605 |
| 606 void MCAsmLayout::LayoutFragment(MCFragment *F) { |
| 607 MCFragment *Prev = F->getPrevNode(); |
| 608 |
| 609 // We should never try to recompute something which is up-to-date. |
| 610 assert(!isFragmentUpToDate(F) && "Attempt to recompute up-to-date fragment!"); |
| 611 // We should never try to compute the fragment layout if it's predecessor |
| 612 // isn't up-to-date. |
| 613 assert((!Prev || isFragmentUpToDate(Prev)) && |
| 614 "Attempt to compute fragment before it's predecessor!"); |
| 615 |
| 616 ++stats::FragmentLayouts; |
| 617 |
| 618 // Compute fragment offset and size. |
| 619 uint64_t Offset = 0; |
| 620 if (Prev) |
| 621 Offset += Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev); |
| 622 // @LOCALMOD-BEGIN |
| 623 F->BundlePadding = ComputeBundlePadding(getAssembler(), *this, F, Offset); |
| 624 Offset += F->BundlePadding; |
| 625 // @LOCALMOD-END |
| 626 F->Offset = Offset; |
| 627 LastValidFragment[F->getParent()] = F; |
| 628 } |
| 629 |
| 630 |
639 /// WriteFragmentData - Write the \p F data to the output file. | 631 /// WriteFragmentData - Write the \p F data to the output file. |
640 static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout, | 632 static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout, |
641 const MCFragment &F) { | 633 const MCFragment &F) { |
642 MCObjectWriter *OW = &Asm.getWriter(); | 634 MCObjectWriter *OW = &Asm.getWriter(); |
643 // @LOCALMOD-BEGIN | 635 // @LOCALMOD-BEGIN |
644 if (F.getParent()->isBundlingEnabled()) { | 636 if (F.getParent()->isBundlingEnabled()) { |
645 uint64_t BundlePadding = Layout.getFragmentPadding(&F); | 637 uint64_t BundlePadding = Layout.getFragmentPadding(&F); |
646 uint64_t PaddingOffset = Layout.getFragmentOffset(&F) - BundlePadding; | 638 uint64_t PaddingOffset = Layout.getFragmentOffset(&F) - BundlePadding; |
647 WriteBundlePadding(Asm, Layout, PaddingOffset, BundlePadding, OW); | 639 WriteBundlePadding(Asm, Layout, PaddingOffset, BundlePadding, OW); |
648 } | 640 } |
(...skipping 21 matching lines...) Expand all Loading... |
670 Twine(AF.getValueSize()) + | 662 Twine(AF.getValueSize()) + |
671 "' is not a divisor of padding size '" + | 663 "' is not a divisor of padding size '" + |
672 Twine(FragmentSize) + "'"); | 664 Twine(FragmentSize) + "'"); |
673 | 665 |
674 // See if we are aligning with nops, and if so do that first to try to fill | 666 // See if we are aligning with nops, and if so do that first to try to fill |
675 // the Count bytes. Then if that did not fill any bytes or there are any | 667 // the Count bytes. Then if that did not fill any bytes or there are any |
676 // bytes left to fill use the Value and ValueSize to fill the rest. | 668 // bytes left to fill use the Value and ValueSize to fill the rest. |
677 // If we are aligning with nops, ask that target to emit the right data. | 669 // If we are aligning with nops, ask that target to emit the right data. |
678 if (AF.hasEmitNops()) { | 670 if (AF.hasEmitNops()) { |
679 // @LOCALMOD-BEGIN | 671 // @LOCALMOD-BEGIN |
680 if (Asm.getBundleSize()) { | 672 if (Asm.getBackend().getBundleSize()) { |
681 WriteBundlePadding(Asm, Layout, | 673 WriteBundlePadding(Asm, Layout, |
682 Layout.getFragmentOffset(&F), | 674 Layout.getFragmentOffset(&F), |
683 FragmentSize, | 675 FragmentSize, |
684 OW); | 676 OW); |
685 break; | 677 break; |
686 } | 678 } |
687 // @LOCALMOD-END | 679 // @LOCALMOD-END |
688 | 680 |
689 if (!Asm.getBackend().writeNopData(Count, OW)) | 681 if (!Asm.getBackend().writeNopData(Count, OW)) |
690 report_fatal_error("unable to write nop sequence of " + | 682 report_fatal_error("unable to write nop sequence of " + |
(...skipping 594 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1285 | 1277 |
1286 // anchors for MC*Fragment vtables | 1278 // anchors for MC*Fragment vtables |
1287 void MCDataFragment::anchor() { } | 1279 void MCDataFragment::anchor() { } |
1288 void MCInstFragment::anchor() { } | 1280 void MCInstFragment::anchor() { } |
1289 void MCAlignFragment::anchor() { } | 1281 void MCAlignFragment::anchor() { } |
1290 void MCFillFragment::anchor() { } | 1282 void MCFillFragment::anchor() { } |
1291 void MCOrgFragment::anchor() { } | 1283 void MCOrgFragment::anchor() { } |
1292 void MCLEBFragment::anchor() { } | 1284 void MCLEBFragment::anchor() { } |
1293 void MCDwarfLineAddrFragment::anchor() { } | 1285 void MCDwarfLineAddrFragment::anchor() { } |
1294 void MCDwarfCallFrameFragment::anchor() { } | 1286 void MCDwarfCallFrameFragment::anchor() { } |
OLD | NEW |