Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(25)

Side by Side Diff: src/IceTargetLoweringARM32.h

Issue 1457683004: Subzero. ARM32. Removing memory legalization warts. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Addresses comments Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 //===- subzero/src/IceTargetLoweringARM32.h - ARM32 lowering ----*- C++ -*-===// 1 //===- subzero/src/IceTargetLoweringARM32.h - ARM32 lowering ----*- C++ -*-===//
2 // 2 //
3 // The Subzero Code Generator 3 // The Subzero Code Generator
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 /// 9 ///
10 /// \file 10 /// \file
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
128 void emit(const ConstantFloat *C) const final; 128 void emit(const ConstantFloat *C) const final;
129 void emit(const ConstantDouble *C) const final; 129 void emit(const ConstantDouble *C) const final;
130 130
131 void lowerArguments() override; 131 void lowerArguments() override;
132 void addProlog(CfgNode *Node) override; 132 void addProlog(CfgNode *Node) override;
133 void addEpilog(CfgNode *Node) override; 133 void addEpilog(CfgNode *Node) override;
134 134
135 Operand *loOperand(Operand *Operand); 135 Operand *loOperand(Operand *Operand);
136 Operand *hiOperand(Operand *Operand); 136 Operand *hiOperand(Operand *Operand);
137 void finishArgumentLowering(Variable *Arg, Variable *FramePtr, 137 void finishArgumentLowering(Variable *Arg, Variable *FramePtr,
138 size_t BasicFrameOffset, size_t &InArgsSizeBytes); 138 size_t BasicFrameOffset, size_t *InArgsSizeBytes);
139 139
140 bool hasCPUFeature(TargetARM32Features::ARM32InstructionSet I) const { 140 bool hasCPUFeature(TargetARM32Features::ARM32InstructionSet I) const {
141 return CPUFeatures.hasFeature(I); 141 return CPUFeatures.hasFeature(I);
142 } 142 }
143 143
144 enum OperandLegalization { 144 enum OperandLegalization {
145 Legal_None = 0, 145 Legal_None = 0,
146 Legal_Reg = 1 << 0, /// physical register, not stack location 146 Legal_Reg = 1 << 0, /// physical register, not stack location
147 Legal_Flex = 1 << 1, /// A flexible operand2, which can hold rotated small 147 Legal_Flex = 1 << 1, /// A flexible operand2, which can hold rotated small
148 /// immediates, shifted registers, or modified fp imm. 148 /// immediates, shifted registers, or modified fp imm.
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after
377 /// all types (integer, floating point, and vectors), as well as moves between 377 /// all types (integer, floating point, and vectors), as well as moves between
378 /// Core and VFP registers. This is not a panacea: you must obey the (weird, 378 /// Core and VFP registers. This is not a panacea: you must obey the (weird,
379 /// confusing, non-uniform) rules for data moves in ARM. 379 /// confusing, non-uniform) rules for data moves in ARM.
380 void _mov(Variable *Dest, Operand *Src0, 380 void _mov(Variable *Dest, Operand *Src0,
381 CondARM32::Cond Pred = CondARM32::AL) { 381 CondARM32::Cond Pred = CondARM32::AL) {
382 // _mov used to be unique in the sense that it would create a temporary 382 // _mov used to be unique in the sense that it would create a temporary
383 // automagically if Dest was nullptr. It won't do that anymore, so we keep 383 // automagically if Dest was nullptr. It won't do that anymore, so we keep
384 // an assert around just in case there is some untested code path where Dest 384 // an assert around just in case there is some untested code path where Dest
385 // is nullptr. 385 // is nullptr.
386 assert(Dest != nullptr); 386 assert(Dest != nullptr);
387 assert(!llvm::isa<OperandARM32Mem>(Src0));
387 auto *Instr = InstARM32Mov::create(Func, Dest, Src0, Pred); 388 auto *Instr = InstARM32Mov::create(Func, Dest, Src0, Pred);
388 389
389 Context.insert(Instr); 390 Context.insert(Instr);
390 if (Instr->isMultiDest()) { 391 if (Instr->isMultiDest()) {
391 // If Instr is multi-dest, then Dest must be a Variable64On32. We add a 392 // If Instr is multi-dest, then Dest must be a Variable64On32. We add a
392 // fake-def for Instr.DestHi here. 393 // fake-def for Instr.DestHi here.
393 assert(llvm::isa<Variable64On32>(Dest)); 394 assert(llvm::isa<Variable64On32>(Dest));
394 Context.insert(InstFakeDef::create(Func, Instr->getDestHi())); 395 Context.insert(InstFakeDef::create(Func, Instr->getDestHi()));
395 } 396 }
396 } 397 }
(...skipping 390 matching lines...) Expand 10 before | Expand all | Expand 10 after
787 Context.insert(InstARM32Vdiv::create(Func, Dest, Src0, Src1)); 788 Context.insert(InstARM32Vdiv::create(Func, Dest, Src0, Src1));
788 } 789 }
789 void _vcmp(Variable *Src0, Variable *Src1, 790 void _vcmp(Variable *Src0, Variable *Src1,
790 CondARM32::Cond Pred = CondARM32::AL) { 791 CondARM32::Cond Pred = CondARM32::AL) {
791 Context.insert(InstARM32Vcmp::create(Func, Src0, Src1, Pred)); 792 Context.insert(InstARM32Vcmp::create(Func, Src0, Src1, Pred));
792 } 793 }
793 void _vcmp(Variable *Src0, OperandARM32FlexFpZero *FpZero, 794 void _vcmp(Variable *Src0, OperandARM32FlexFpZero *FpZero,
794 CondARM32::Cond Pred = CondARM32::AL) { 795 CondARM32::Cond Pred = CondARM32::AL) {
795 Context.insert(InstARM32Vcmp::create(Func, Src0, FpZero, Pred)); 796 Context.insert(InstARM32Vcmp::create(Func, Src0, FpZero, Pred));
796 } 797 }
798 void _veor(Variable *Dest, Variable *Src0, Variable *Src1) {
799 Context.insert(InstARM32Veor::create(Func, Dest, Src0, Src1));
800 }
797 void _vmrs(CondARM32::Cond Pred = CondARM32::AL) { 801 void _vmrs(CondARM32::Cond Pred = CondARM32::AL) {
798 Context.insert(InstARM32Vmrs::create(Func, Pred)); 802 Context.insert(InstARM32Vmrs::create(Func, Pred));
799 } 803 }
800 void _vmul(Variable *Dest, Variable *Src0, Variable *Src1) { 804 void _vmul(Variable *Dest, Variable *Src0, Variable *Src1) {
801 Context.insert(InstARM32Vmul::create(Func, Dest, Src0, Src1)); 805 Context.insert(InstARM32Vmul::create(Func, Dest, Src0, Src1));
802 } 806 }
803 void _veor(Variable *Dest, Variable *Src0, Variable *Src1) {
804 Context.insert(InstARM32Veor::create(Func, Dest, Src0, Src1));
805 }
806 void _vsqrt(Variable *Dest, Variable *Src, 807 void _vsqrt(Variable *Dest, Variable *Src,
807 CondARM32::Cond Pred = CondARM32::AL) { 808 CondARM32::Cond Pred = CondARM32::AL) {
808 Context.insert(InstARM32Vsqrt::create(Func, Dest, Src, Pred)); 809 Context.insert(InstARM32Vsqrt::create(Func, Dest, Src, Pred));
809 } 810 }
810 void _vsub(Variable *Dest, Variable *Src0, Variable *Src1) { 811 void _vsub(Variable *Dest, Variable *Src0, Variable *Src1) {
811 Context.insert(InstARM32Vsub::create(Func, Dest, Src0, Src1)); 812 Context.insert(InstARM32Vsub::create(Func, Dest, Src0, Src1));
812 } 813 }
813 814
814 /// Run a pass through stack variables and ensure that the offsets are legal. 815 /// Run a pass through stack variables and ensure that the offsets are legal.
815 /// If the offset is not legal, use a new base register that accounts for the 816 /// If the offset is not legal, use a new base register that accounts for the
816 /// offset, such that the addressing mode offset bits are now legal. 817 /// offset, such that the addressing mode offset bits are now legal.
817 void legalizeStackSlots(); 818 void legalizeStackSlots();
818 /// Returns true if the given Offset can be represented in a ldr/str. 819 /// Returns true if the given Offset can be represented in a ldr/str.
819 bool isLegalMemOffset(Type Ty, int32_t Offset) const; 820 bool isLegalMemOffset(Type Ty, int32_t Offset) const;
820 // Creates a new Base register centered around 821 // Creates a new Base register centered around
821 // [OrigBaseReg, +/- Offset+StackAdjust]. 822 // [OrigBaseReg, +/- Offset+StackAdjust].
822 Variable *newBaseRegister(int32_t Offset, int32_t StackAdjust, 823 Variable *newBaseRegister(int32_t Offset, int32_t StackAdjust,
823 Variable *OrigBaseReg); 824 Variable *OrigBaseReg);
824 /// Creates a new, legal StackVariable w.r.t. ARM's Immediate requirements. 825 /// Creates a new, legal OperandARM32Mem for accessing OrigBase + Offset +
825 /// This method is not very smart: it will always create and return a new 826 /// StackAdjust. The returned mem operand is a legal operand for accessing
826 /// StackVariable, even if Offset + StackAdjust is encodable. 827 /// memory that is of type Ty.
827 StackVariable *legalizeStackSlot(Type Ty, int32_t Offset, int32_t StackAdjust, 828 ///
828 Variable *OrigBaseReg, Variable **NewBaseReg, 829 /// If [OrigBaseReg, #Offset+StackAdjust] is encodable, then the method
829 int32_t *NewBaseOffset); 830 /// returns a Mem operand expressing it. Otherwise,
830 /// Legalizes Mov if its Source (or Destination) contains an invalid 831 ///
831 /// immediate. 832 /// if [*NewBaseReg, #Offset+StackAdjust-*NewBaseOffset] is encodable, the
832 void legalizeMovStackAddrImm(InstARM32Mov *Mov, int32_t StackAdjust, 833 /// method will return that. Otherwise,
833 Variable *OrigBaseReg, Variable **NewBaseReg, 834 ///
834 int32_t *NewBaseOffset); 835 /// a new base register ip=OrigBaseReg+Offset+StackAdjust is created, and the
836 /// method returns [ip, #0].
837 OperandARM32Mem *createMemOperand(Type Ty, int32_t Offset,
838 int32_t StackAdjust, Variable *OrigBaseReg,
839 Variable **NewBaseReg,
840 int32_t *NewBaseOffset);
841 /// Legalizes Mov if its Source (or Destination) is a spilled Variable. Moves
842 /// to memory become store instructions, and moves from memory, loads.
843 void legalizeMov(InstARM32Mov *Mov, int32_t StackAdjust,
844 Variable *OrigBaseReg, Variable **NewBaseReg,
845 int32_t *NewBaseOffset);
835 846
836 TargetARM32Features CPUFeatures; 847 TargetARM32Features CPUFeatures;
837 bool UsesFramePointer = false; 848 bool UsesFramePointer = false;
838 bool NeedsStackAlignment = false; 849 bool NeedsStackAlignment = false;
839 bool MaybeLeafFunc = true; 850 bool MaybeLeafFunc = true;
840 size_t SpillAreaSizeBytes = 0; 851 size_t SpillAreaSizeBytes = 0;
841 // TODO(jpp): std::array instead of array. 852 // TODO(jpp): std::array instead of array.
842 static llvm::SmallBitVector TypeToRegisterSet[RCARM32_NUM]; 853 static llvm::SmallBitVector TypeToRegisterSet[RCARM32_NUM];
843 static llvm::SmallBitVector RegisterAliases[RegARM32::Reg_NUM]; 854 static llvm::SmallBitVector RegisterAliases[RegARM32::Reg_NUM];
844 static llvm::SmallBitVector ScratchRegs; 855 static llvm::SmallBitVector ScratchRegs;
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after
996 1007
997 private: 1008 private:
998 ~TargetHeaderARM32() = default; 1009 ~TargetHeaderARM32() = default;
999 1010
1000 TargetARM32Features CPUFeatures; 1011 TargetARM32Features CPUFeatures;
1001 }; 1012 };
1002 1013
1003 } // end of namespace Ice 1014 } // end of namespace Ice
1004 1015
1005 #endif // SUBZERO_SRC_ICETARGETLOWERINGARM32_H 1016 #endif // SUBZERO_SRC_ICETARGETLOWERINGARM32_H
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698