Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(17)

Side by Side Diff: src/IceTargetLoweringX8632.cpp

Issue 619893002: Subzero: Auto-awesome iterators. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Add TODOs for rbegin/rend Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 //===- subzero/src/IceTargetLoweringX8632.cpp - x86-32 lowering -----------===// 1 //===- subzero/src/IceTargetLoweringX8632.cpp - x86-32 lowering -----------===//
2 // 2 //
3 // The Subzero Code Generator 3 // The Subzero Code Generator
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 // 9 //
10 // This file implements the TargetLoweringX8632 class, which 10 // This file implements the TargetLoweringX8632 class, which
(...skipping 526 matching lines...) Expand 10 before | Expand all | Expand 10 after
537 } 537 }
538 } 538 }
539 539
540 void TargetX8632::sortByAlignment(VarList &Dest, const VarList &Source) const { 540 void TargetX8632::sortByAlignment(VarList &Dest, const VarList &Source) const {
541 // Sort the variables into buckets according to the log of their width 541 // Sort the variables into buckets according to the log of their width
542 // in bytes. 542 // in bytes.
543 const SizeT NumBuckets = 543 const SizeT NumBuckets =
544 X86_LOG2_OF_MAX_STACK_SLOT_SIZE - X86_LOG2_OF_MIN_STACK_SLOT_SIZE + 1; 544 X86_LOG2_OF_MAX_STACK_SLOT_SIZE - X86_LOG2_OF_MIN_STACK_SLOT_SIZE + 1;
545 VarList Buckets[NumBuckets]; 545 VarList Buckets[NumBuckets];
546 546
547 for (VarList::const_iterator I = Source.begin(), E = Source.end(); I != E; 547 for (Variable *Var : Source) {
548 ++I) {
549 Variable *Var = *I;
550 uint32_t NaturalAlignment = typeWidthInBytesOnStack(Var->getType()); 548 uint32_t NaturalAlignment = typeWidthInBytesOnStack(Var->getType());
551 SizeT LogNaturalAlignment = llvm::findFirstSet(NaturalAlignment); 549 SizeT LogNaturalAlignment = llvm::findFirstSet(NaturalAlignment);
552 assert(LogNaturalAlignment >= X86_LOG2_OF_MIN_STACK_SLOT_SIZE); 550 assert(LogNaturalAlignment >= X86_LOG2_OF_MIN_STACK_SLOT_SIZE);
553 assert(LogNaturalAlignment <= X86_LOG2_OF_MAX_STACK_SLOT_SIZE); 551 assert(LogNaturalAlignment <= X86_LOG2_OF_MAX_STACK_SLOT_SIZE);
554 SizeT BucketIndex = LogNaturalAlignment - X86_LOG2_OF_MIN_STACK_SLOT_SIZE; 552 SizeT BucketIndex = LogNaturalAlignment - X86_LOG2_OF_MIN_STACK_SLOT_SIZE;
555 Buckets[BucketIndex].push_back(Var); 553 Buckets[BucketIndex].push_back(Var);
556 } 554 }
557 555
558 for (SizeT I = 0, E = NumBuckets; I < E; ++I) { 556 for (SizeT I = 0, E = NumBuckets; I < E; ++I) {
559 VarList &List = Buckets[NumBuckets - I - 1]; 557 VarList &List = Buckets[NumBuckets - I - 1];
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
680 const VarList &Variables = Func->getVariables(); 678 const VarList &Variables = Func->getVariables();
681 const VarList &Args = Func->getArgs(); 679 const VarList &Args = Func->getArgs();
682 VarList SpilledVariables, SortedSpilledVariables, VariablesLinkedToSpillSlots; 680 VarList SpilledVariables, SortedSpilledVariables, VariablesLinkedToSpillSlots;
683 681
684 // If there is a separate locals area, this specifies the alignment 682 // If there is a separate locals area, this specifies the alignment
685 // for it. 683 // for it.
686 uint32_t LocalsSlotsAlignmentBytes = 0; 684 uint32_t LocalsSlotsAlignmentBytes = 0;
687 // The entire spill locations area gets aligned to largest natural 685 // The entire spill locations area gets aligned to largest natural
688 // alignment of the variables that have a spill slot. 686 // alignment of the variables that have a spill slot.
689 uint32_t SpillAreaAlignmentBytes = 0; 687 uint32_t SpillAreaAlignmentBytes = 0;
690 for (VarList::const_iterator I = Variables.begin(), E = Variables.end(); 688 for (Variable *Var : Variables) {
691 I != E; ++I) {
692 Variable *Var = *I;
693 if (Var->hasReg()) { 689 if (Var->hasReg()) {
694 RegsUsed[Var->getRegNum()] = true; 690 RegsUsed[Var->getRegNum()] = true;
695 continue; 691 continue;
696 } 692 }
697 // An argument either does not need a stack slot (if passed in a 693 // An argument either does not need a stack slot (if passed in a
698 // register) or already has one (if passed on the stack). 694 // register) or already has one (if passed on the stack).
699 if (Var->getIsArg()) 695 if (Var->getIsArg())
700 continue; 696 continue;
701 // An unreferenced variable doesn't need a stack slot. 697 // An unreferenced variable doesn't need a stack slot.
702 if (ComputedLiveRanges && Var->getLiveRange().isEmpty()) 698 if (ComputedLiveRanges && Var->getLiveRange().isEmpty())
703 continue; 699 continue;
704 // A spill slot linked to a variable with a stack slot should reuse 700 // A spill slot linked to a variable with a stack slot should reuse
705 // that stack slot. 701 // that stack slot.
706 if (SpillVariable *SpillVar = llvm::dyn_cast<SpillVariable>(Var)) { 702 if (SpillVariable *SpillVar = llvm::dyn_cast<SpillVariable>(Var)) {
707 assert(Var->getWeight() == RegWeight::Zero); 703 assert(Var->getWeight() == RegWeight::Zero);
708 if (!SpillVar->getLinkedTo()->hasReg()) { 704 if (!SpillVar->getLinkedTo()->hasReg()) {
709 VariablesLinkedToSpillSlots.push_back(Var); 705 VariablesLinkedToSpillSlots.push_back(Var);
710 continue; 706 continue;
711 } 707 }
712 } 708 }
713 SpilledVariables.push_back(Var); 709 SpilledVariables.push_back(Var);
714 } 710 }
715 711
716 SortedSpilledVariables.reserve(SpilledVariables.size()); 712 SortedSpilledVariables.reserve(SpilledVariables.size());
717 sortByAlignment(SortedSpilledVariables, SpilledVariables); 713 sortByAlignment(SortedSpilledVariables, SpilledVariables);
718 for (VarList::const_iterator I = SortedSpilledVariables.begin(), 714 for (Variable *Var : SortedSpilledVariables) {
719 E = SortedSpilledVariables.end();
720 I != E; ++I) {
721 Variable *Var = *I;
722 size_t Increment = typeWidthInBytesOnStack(Var->getType()); 715 size_t Increment = typeWidthInBytesOnStack(Var->getType());
723 if (!SpillAreaAlignmentBytes) 716 if (!SpillAreaAlignmentBytes)
724 SpillAreaAlignmentBytes = Increment; 717 SpillAreaAlignmentBytes = Increment;
725 if (SimpleCoalescing && VMetadata->isTracked(Var)) { 718 if (SimpleCoalescing && VMetadata->isTracked(Var)) {
726 if (VMetadata->isMultiBlock(Var)) { 719 if (VMetadata->isMultiBlock(Var)) {
727 GlobalsSize += Increment; 720 GlobalsSize += Increment;
728 } else { 721 } else {
729 SizeT NodeIndex = VMetadata->getLocalUseNode(Var)->getIndex(); 722 SizeT NodeIndex = VMetadata->getLocalUseNode(Var)->getIndex();
730 LocalsSize[NodeIndex] += Increment; 723 LocalsSize[NodeIndex] += Increment;
731 if (LocalsSize[NodeIndex] > SpillAreaSizeBytes) 724 if (LocalsSize[NodeIndex] > SpillAreaSizeBytes)
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
819 ++NumXmmArgs; 812 ++NumXmmArgs;
820 continue; 813 continue;
821 } 814 }
822 finishArgumentLowering(Arg, FramePtr, BasicFrameOffset, InArgsSizeBytes); 815 finishArgumentLowering(Arg, FramePtr, BasicFrameOffset, InArgsSizeBytes);
823 } 816 }
824 817
825 // Fill in stack offsets for locals. 818 // Fill in stack offsets for locals.
826 size_t GlobalsSpaceUsed = SpillAreaPaddingBytes; 819 size_t GlobalsSpaceUsed = SpillAreaPaddingBytes;
827 LocalsSize.assign(LocalsSize.size(), 0); 820 LocalsSize.assign(LocalsSize.size(), 0);
828 size_t NextStackOffset = GlobalsSpaceUsed; 821 size_t NextStackOffset = GlobalsSpaceUsed;
829 for (VarList::const_iterator I = SortedSpilledVariables.begin(), 822 for (Variable *Var : SortedSpilledVariables) {
830 E = SortedSpilledVariables.end();
831 I != E; ++I) {
832 Variable *Var = *I;
833 size_t Increment = typeWidthInBytesOnStack(Var->getType()); 823 size_t Increment = typeWidthInBytesOnStack(Var->getType());
834 if (SimpleCoalescing && VMetadata->isTracked(Var)) { 824 if (SimpleCoalescing && VMetadata->isTracked(Var)) {
835 if (VMetadata->isMultiBlock(Var)) { 825 if (VMetadata->isMultiBlock(Var)) {
836 GlobalsSpaceUsed += Increment; 826 GlobalsSpaceUsed += Increment;
837 NextStackOffset = GlobalsSpaceUsed; 827 NextStackOffset = GlobalsSpaceUsed;
838 } else { 828 } else {
839 SizeT NodeIndex = VMetadata->getLocalUseNode(Var)->getIndex(); 829 SizeT NodeIndex = VMetadata->getLocalUseNode(Var)->getIndex();
840 LocalsSize[NodeIndex] += Increment; 830 LocalsSize[NodeIndex] += Increment;
841 NextStackOffset = SpillAreaPaddingBytes + 831 NextStackOffset = SpillAreaPaddingBytes +
842 GlobalsAndSubsequentPaddingSize + 832 GlobalsAndSubsequentPaddingSize +
843 LocalsSize[NodeIndex]; 833 LocalsSize[NodeIndex];
844 } 834 }
845 } else { 835 } else {
846 NextStackOffset += Increment; 836 NextStackOffset += Increment;
847 } 837 }
848 if (IsEbpBasedFrame) 838 if (IsEbpBasedFrame)
849 Var->setStackOffset(-NextStackOffset); 839 Var->setStackOffset(-NextStackOffset);
850 else 840 else
851 Var->setStackOffset(SpillAreaSizeBytes - NextStackOffset); 841 Var->setStackOffset(SpillAreaSizeBytes - NextStackOffset);
852 } 842 }
853 this->FrameSizeLocals = NextStackOffset - SpillAreaPaddingBytes; 843 this->FrameSizeLocals = NextStackOffset - SpillAreaPaddingBytes;
854 this->HasComputedFrame = true; 844 this->HasComputedFrame = true;
855 845
856 // Assign stack offsets to variables that have been linked to spilled 846 // Assign stack offsets to variables that have been linked to spilled
857 // variables. 847 // variables.
858 for (VarList::const_iterator I = VariablesLinkedToSpillSlots.begin(), 848 for (Variable *Var : VariablesLinkedToSpillSlots) {
859 E = VariablesLinkedToSpillSlots.end();
860 I != E; ++I) {
861 Variable *Var = *I;
862 Variable *Linked = (llvm::cast<SpillVariable>(Var))->getLinkedTo(); 849 Variable *Linked = (llvm::cast<SpillVariable>(Var))->getLinkedTo();
863 Var->setStackOffset(Linked->getStackOffset()); 850 Var->setStackOffset(Linked->getStackOffset());
864 } 851 }
865 852
866 if (Func->getContext()->isVerbose(IceV_Frame)) { 853 if (Func->getContext()->isVerbose(IceV_Frame)) {
867 Ostream &Str = Func->getContext()->getStrDump(); 854 Ostream &Str = Func->getContext()->getStrDump();
868 855
869 Str << "Stack layout:\n"; 856 Str << "Stack layout:\n";
870 uint32_t EspAdjustmentPaddingSize = 857 uint32_t EspAdjustmentPaddingSize =
871 SpillAreaSizeBytes - LocalsSpillAreaSize - 858 SpillAreaSizeBytes - LocalsSpillAreaSize -
(...skipping 14 matching lines...) Expand all
886 << " spill area alignment = " << SpillAreaAlignmentBytes << " bytes\n" 873 << " spill area alignment = " << SpillAreaAlignmentBytes << " bytes\n"
887 << " locals spill area alignment = " << LocalsSlotsAlignmentBytes 874 << " locals spill area alignment = " << LocalsSlotsAlignmentBytes
888 << " bytes\n" 875 << " bytes\n"
889 << " is ebp based = " << IsEbpBasedFrame << "\n"; 876 << " is ebp based = " << IsEbpBasedFrame << "\n";
890 } 877 }
891 } 878 }
892 879
893 void TargetX8632::addEpilog(CfgNode *Node) { 880 void TargetX8632::addEpilog(CfgNode *Node) {
894 InstList &Insts = Node->getInsts(); 881 InstList &Insts = Node->getInsts();
895 InstList::reverse_iterator RI, E; 882 InstList::reverse_iterator RI, E;
883 // TODO(stichnot): Use llvm::make_range with LLVM 3.5.
896 for (RI = Insts.rbegin(), E = Insts.rend(); RI != E; ++RI) { 884 for (RI = Insts.rbegin(), E = Insts.rend(); RI != E; ++RI) {
897 if (llvm::isa<InstX8632Ret>(*RI)) 885 if (llvm::isa<InstX8632Ret>(*RI))
898 break; 886 break;
899 } 887 }
900 if (RI == E) 888 if (RI == E)
901 return; 889 return;
902 890
903 // Convert the reverse_iterator position into its corresponding 891 // Convert the reverse_iterator position into its corresponding
904 // (forward) iterator position. 892 // (forward) iterator position.
905 InstList::iterator InsertPoint = RI.base(); 893 InstList::iterator InsertPoint = RI.base();
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
961 949
962 template <typename T> void TargetX8632::emitConstantPool() const { 950 template <typename T> void TargetX8632::emitConstantPool() const {
963 Ostream &Str = Ctx->getStrEmit(); 951 Ostream &Str = Ctx->getStrEmit();
964 Type Ty = T::Ty; 952 Type Ty = T::Ty;
965 SizeT Align = typeAlignInBytes(Ty); 953 SizeT Align = typeAlignInBytes(Ty);
966 ConstantList Pool = Ctx->getConstantPool(Ty); 954 ConstantList Pool = Ctx->getConstantPool(Ty);
967 955
968 Str << "\t.section\t.rodata.cst" << Align << ",\"aM\",@progbits," << Align 956 Str << "\t.section\t.rodata.cst" << Align << ",\"aM\",@progbits," << Align
969 << "\n"; 957 << "\n";
970 Str << "\t.align\t" << Align << "\n"; 958 Str << "\t.align\t" << Align << "\n";
971 for (ConstantList::const_iterator I = Pool.begin(), E = Pool.end(); I != E; 959 for (Constant *C : Pool) {
972 ++I) { 960 typename T::IceType *Const = llvm::cast<typename T::IceType>(C);
973 typename T::IceType *Const = llvm::cast<typename T::IceType>(*I);
974 typename T::PrimitiveFpType Value = Const->getValue(); 961 typename T::PrimitiveFpType Value = Const->getValue();
975 // Use memcpy() to copy bits from Value into RawValue in a way 962 // Use memcpy() to copy bits from Value into RawValue in a way
976 // that avoids breaking strict-aliasing rules. 963 // that avoids breaking strict-aliasing rules.
977 typename T::PrimitiveIntType RawValue; 964 typename T::PrimitiveIntType RawValue;
978 memcpy(&RawValue, &Value, sizeof(Value)); 965 memcpy(&RawValue, &Value, sizeof(Value));
979 char buf[30]; 966 char buf[30];
980 int CharsPrinted = 967 int CharsPrinted =
981 snprintf(buf, llvm::array_lengthof(buf), T::PrintfString, RawValue); 968 snprintf(buf, llvm::array_lengthof(buf), T::PrintfString, RawValue);
982 assert(CharsPrinted >= 0 && 969 assert(CharsPrinted >= 0 &&
983 (size_t)CharsPrinted < llvm::array_lengthof(buf)); 970 (size_t)CharsPrinted < llvm::array_lengthof(buf));
(...skipping 3331 matching lines...) Expand 10 before | Expand all | Expand 10 after
4315 RegExclude |= RegSet_FramePointer; 4302 RegExclude |= RegSet_FramePointer;
4316 llvm::SmallBitVector WhiteList = getRegisterSet(RegInclude, RegExclude); 4303 llvm::SmallBitVector WhiteList = getRegisterSet(RegInclude, RegExclude);
4317 // Make one pass to black-list pre-colored registers. TODO: If 4304 // Make one pass to black-list pre-colored registers. TODO: If
4318 // there was some prior register allocation pass that made register 4305 // there was some prior register allocation pass that made register
4319 // assignments, those registers need to be black-listed here as 4306 // assignments, those registers need to be black-listed here as
4320 // well. 4307 // well.
4321 llvm::DenseMap<const Variable *, const Inst *> LastUses; 4308 llvm::DenseMap<const Variable *, const Inst *> LastUses;
4322 // The first pass also keeps track of which instruction is the last 4309 // The first pass also keeps track of which instruction is the last
4323 // use for each infinite-weight variable. After the last use, the 4310 // use for each infinite-weight variable. After the last use, the
4324 // variable is released to the free list. 4311 // variable is released to the free list.
4325 for (InstList::iterator I = Context.getCur(), E = Context.getEnd(); I != E; 4312 for (Inst *Inst : Context) {
4326 ++I) {
4327 const Inst *Inst = *I;
4328 if (Inst->isDeleted()) 4313 if (Inst->isDeleted())
4329 continue; 4314 continue;
4330 // Don't consider a FakeKill instruction, because (currently) it 4315 // Don't consider a FakeKill instruction, because (currently) it
4331 // is only used to kill all scratch registers at a call site, and 4316 // is only used to kill all scratch registers at a call site, and
4332 // we don't want to black-list all scratch registers during the 4317 // we don't want to black-list all scratch registers during the
4333 // call lowering. This could become a problem since it relies on 4318 // call lowering. This could become a problem since it relies on
4334 // the lowering sequence not keeping any infinite-weight variables 4319 // the lowering sequence not keeping any infinite-weight variables
4335 // live across a call. TODO(stichnot): Consider replacing this 4320 // live across a call. TODO(stichnot): Consider replacing this
4336 // whole postLower() implementation with a robust local register 4321 // whole postLower() implementation with a robust local register
4337 // allocator, for example compute live ranges only for pre-colored 4322 // allocator, for example compute live ranges only for pre-colored
(...skipping 11 matching lines...) Expand all
4349 LastUses[Var] = Inst; 4334 LastUses[Var] = Inst;
4350 if (!Var->hasReg()) 4335 if (!Var->hasReg())
4351 continue; 4336 continue;
4352 WhiteList[Var->getRegNum()] = false; 4337 WhiteList[Var->getRegNum()] = false;
4353 } 4338 }
4354 } 4339 }
4355 } 4340 }
4356 // The second pass colors infinite-weight variables. 4341 // The second pass colors infinite-weight variables.
4357 llvm::SmallBitVector AvailableRegisters = WhiteList; 4342 llvm::SmallBitVector AvailableRegisters = WhiteList;
4358 llvm::SmallBitVector FreedRegisters(WhiteList.size()); 4343 llvm::SmallBitVector FreedRegisters(WhiteList.size());
4359 for (InstList::iterator I = Context.getCur(), E = Context.getEnd(); I != E; 4344 for (Inst *Inst : Context) {
4360 ++I) {
4361 FreedRegisters.reset(); 4345 FreedRegisters.reset();
4362 const Inst *Inst = *I;
4363 if (Inst->isDeleted()) 4346 if (Inst->isDeleted())
4364 continue; 4347 continue;
4365 // Skip FakeKill instructions like above. 4348 // Skip FakeKill instructions like above.
4366 if (llvm::isa<InstFakeKill>(Inst)) 4349 if (llvm::isa<InstFakeKill>(Inst))
4367 continue; 4350 continue;
4368 // Iterate over all variables referenced in the instruction, 4351 // Iterate over all variables referenced in the instruction,
4369 // including the Dest variable (if any). If the variable is 4352 // including the Dest variable (if any). If the variable is
4370 // marked as infinite-weight, find it a register. If this 4353 // marked as infinite-weight, find it a register. If this
4371 // instruction is the last use of the variable in the lowered 4354 // instruction is the last use of the variable in the lowered
4372 // sequence, release the register to the free list after this 4355 // sequence, release the register to the free list after this
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after
4525 Str << "\t.align\t" << Align << "\n"; 4508 Str << "\t.align\t" << Align << "\n";
4526 Str << MangledName << ":\n"; 4509 Str << MangledName << ":\n";
4527 for (SizeT i = 0; i < Size; ++i) { 4510 for (SizeT i = 0; i < Size; ++i) {
4528 Str << "\t.byte\t" << (((unsigned)Data[i]) & 0xff) << "\n"; 4511 Str << "\t.byte\t" << (((unsigned)Data[i]) & 0xff) << "\n";
4529 } 4512 }
4530 Str << "\t.size\t" << MangledName << ", " << Size << "\n"; 4513 Str << "\t.size\t" << MangledName << ", " << Size << "\n";
4531 } 4514 }
4532 } 4515 }
4533 4516
4534 } // end of namespace Ice 4517 } // end of namespace Ice
OLDNEW
« src/IceCfg.cpp ('K') | « src/IceTargetLowering.h ('k') | src/IceTimerTree.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698