Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(239)

Side by Side Diff: src/IceTargetLoweringX86Base.h

Issue 1472623002: Unify alloca, outgoing arg, and prolog construction (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Fixed missing out args in subtraction. Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 //===- subzero/src/IceTargetLoweringX86Base.h - x86 lowering ----*- C++ -*-===// 1 //===- subzero/src/IceTargetLoweringX86Base.h - x86 lowering ----*- C++ -*-===//
2 // 2 //
3 // The Subzero Code Generator 3 // The Subzero Code Generator
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 /// 9 ///
10 /// \file 10 /// \file
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
102 return Traits::X86_STACK_ALIGNMENT_BYTES; 102 return Traits::X86_STACK_ALIGNMENT_BYTES;
103 } 103 }
104 void reserveFixedAllocaArea(size_t Size, size_t Align) override { 104 void reserveFixedAllocaArea(size_t Size, size_t Align) override {
105 FixedAllocaSizeBytes = Size; 105 FixedAllocaSizeBytes = Size;
106 assert(llvm::isPowerOf2_32(Align)); 106 assert(llvm::isPowerOf2_32(Align));
107 FixedAllocaAlignBytes = Align; 107 FixedAllocaAlignBytes = Align;
108 PrologEmitsFixedAllocas = true; 108 PrologEmitsFixedAllocas = true;
109 } 109 }
110 /// Returns the (negative) offset from ebp/rbp where the fixed Allocas start. 110 /// Returns the (negative) offset from ebp/rbp where the fixed Allocas start.
111 int32_t getFrameFixedAllocaOffset() const override { 111 int32_t getFrameFixedAllocaOffset() const override {
112 return FixedAllocaSizeBytes - SpillAreaSizeBytes; 112 return FixedAllocaSizeBytes - (SpillAreaSizeBytes - maxOutArgsSizeBytes());
113 }
114 virtual uint32_t maxOutArgsSizeBytes() const override {
115 return MaxOutArgsSizeBytes;
116 }
117 virtual void updateMaxOutArgsSizeBytes(uint32_t Size) {
118 MaxOutArgsSizeBytes = std::max(MaxOutArgsSizeBytes, Size);
113 } 119 }
114 120
115 bool shouldSplitToVariable64On32(Type Ty) const override { 121 bool shouldSplitToVariable64On32(Type Ty) const override {
116 return Traits::Is64Bit ? false : Ty == IceType_i64; 122 return Traits::Is64Bit ? false : Ty == IceType_i64;
117 } 123 }
118 124
119 SizeT getMinJumpTableSize() const override { return 4; } 125 SizeT getMinJumpTableSize() const override { return 4; }
120 126
121 void emitVariable(const Variable *Var) const override; 127 void emitVariable(const Variable *Var) const override;
122 128
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
175 void lowerInsertElement(const InstInsertElement *Inst) override; 181 void lowerInsertElement(const InstInsertElement *Inst) override;
176 void lowerLoad(const InstLoad *Inst) override; 182 void lowerLoad(const InstLoad *Inst) override;
177 void lowerPhi(const InstPhi *Inst) override; 183 void lowerPhi(const InstPhi *Inst) override;
178 void lowerSelect(const InstSelect *Inst) override; 184 void lowerSelect(const InstSelect *Inst) override;
179 void lowerStore(const InstStore *Inst) override; 185 void lowerStore(const InstStore *Inst) override;
180 void lowerSwitch(const InstSwitch *Inst) override; 186 void lowerSwitch(const InstSwitch *Inst) override;
181 void lowerUnreachable(const InstUnreachable *Inst) override; 187 void lowerUnreachable(const InstUnreachable *Inst) override;
182 void lowerOther(const Inst *Instr) override; 188 void lowerOther(const Inst *Instr) override;
183 void lowerRMW(const typename Traits::Insts::FakeRMW *RMW); 189 void lowerRMW(const typename Traits::Insts::FakeRMW *RMW);
184 void prelowerPhis() override; 190 void prelowerPhis() override;
191 uint32_t getCallStackArgumentsSizeBytes(const std::vector<Type> &ArgTypes,
192 Type ReturnType);
185 uint32_t getCallStackArgumentsSizeBytes(const InstCall *Instr) override; 193 uint32_t getCallStackArgumentsSizeBytes(const InstCall *Instr) override;
186 void genTargetHelperCallFor(Inst *Instr) override { (void)Instr; } 194 void genTargetHelperCallFor(Inst *Instr) override;
187 void doAddressOptLoad() override; 195 void doAddressOptLoad() override;
188 void doAddressOptStore() override; 196 void doAddressOptStore() override;
189 void doMockBoundsCheck(Operand *Opnd) override; 197 void doMockBoundsCheck(Operand *Opnd) override;
190 void randomlyInsertNop(float Probability, 198 void randomlyInsertNop(float Probability,
191 RandomNumberGenerator &RNG) override; 199 RandomNumberGenerator &RNG) override;
192 200
193 /// Naive lowering of cmpxchg. 201 /// Naive lowering of cmpxchg.
194 void lowerAtomicCmpxchg(Variable *DestPrev, Operand *Ptr, Operand *Expected, 202 void lowerAtomicCmpxchg(Variable *DestPrev, Operand *Ptr, Operand *Expected,
195 Operand *Desired); 203 Operand *Desired);
196 /// Attempt a more optimized lowering of cmpxchg. Returns true if optimized. 204 /// Attempt a more optimized lowering of cmpxchg. Returns true if optimized.
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after
323 } 331 }
324 void _adc_rmw(typename Traits::X86OperandMem *DestSrc0, Operand *Src1) { 332 void _adc_rmw(typename Traits::X86OperandMem *DestSrc0, Operand *Src1) {
325 Context.insert(Traits::Insts::AdcRMW::create(Func, DestSrc0, Src1)); 333 Context.insert(Traits::Insts::AdcRMW::create(Func, DestSrc0, Src1));
326 } 334 }
327 void _add(Variable *Dest, Operand *Src0) { 335 void _add(Variable *Dest, Operand *Src0) {
328 Context.insert(Traits::Insts::Add::create(Func, Dest, Src0)); 336 Context.insert(Traits::Insts::Add::create(Func, Dest, Src0));
329 } 337 }
330 void _add_rmw(typename Traits::X86OperandMem *DestSrc0, Operand *Src1) { 338 void _add_rmw(typename Traits::X86OperandMem *DestSrc0, Operand *Src1) {
331 Context.insert(Traits::Insts::AddRMW::create(Func, DestSrc0, Src1)); 339 Context.insert(Traits::Insts::AddRMW::create(Func, DestSrc0, Src1));
332 } 340 }
333 void _adjust_stack(int32_t Amount) {
334 Context.insert(Traits::Insts::AdjustStack::create(
335 Func, Amount, getPhysicalRegister(Traits::RegisterSet::Reg_esp)));
336 }
337 void _addps(Variable *Dest, Operand *Src0) { 341 void _addps(Variable *Dest, Operand *Src0) {
338 Context.insert(Traits::Insts::Addps::create(Func, Dest, Src0)); 342 Context.insert(Traits::Insts::Addps::create(Func, Dest, Src0));
339 } 343 }
340 void _addss(Variable *Dest, Operand *Src0) { 344 void _addss(Variable *Dest, Operand *Src0) {
341 Context.insert(Traits::Insts::Addss::create(Func, Dest, Src0)); 345 Context.insert(Traits::Insts::Addss::create(Func, Dest, Src0));
342 } 346 }
343 void _and(Variable *Dest, Operand *Src0) { 347 void _and(Variable *Dest, Operand *Src0) {
344 Context.insert(Traits::Insts::And::create(Func, Dest, Src0)); 348 Context.insert(Traits::Insts::And::create(Func, Dest, Src0));
345 } 349 }
346 void _and_rmw(typename Traits::X86OperandMem *DestSrc0, Operand *Src1) { 350 void _and_rmw(typename Traits::X86OperandMem *DestSrc0, Operand *Src1) {
(...skipping 351 matching lines...) Expand 10 before | Expand all | Expand 10 after
698 void findRMW(); 702 void findRMW();
699 703
700 typename Traits::InstructionSet InstructionSet = 704 typename Traits::InstructionSet InstructionSet =
701 Traits::InstructionSet::Begin; 705 Traits::InstructionSet::Begin;
702 bool IsEbpBasedFrame = false; 706 bool IsEbpBasedFrame = false;
703 bool NeedsStackAlignment = false; 707 bool NeedsStackAlignment = false;
704 size_t SpillAreaSizeBytes = 0; 708 size_t SpillAreaSizeBytes = 0;
705 size_t FixedAllocaSizeBytes = 0; 709 size_t FixedAllocaSizeBytes = 0;
706 size_t FixedAllocaAlignBytes = 0; 710 size_t FixedAllocaAlignBytes = 0;
707 bool PrologEmitsFixedAllocas = false; 711 bool PrologEmitsFixedAllocas = false;
712 uint32_t MaxOutArgsSizeBytes = 0;
708 static std::array<llvm::SmallBitVector, RCX86_NUM> TypeToRegisterSet; 713 static std::array<llvm::SmallBitVector, RCX86_NUM> TypeToRegisterSet;
709 static std::array<llvm::SmallBitVector, Traits::RegisterSet::Reg_NUM> 714 static std::array<llvm::SmallBitVector, Traits::RegisterSet::Reg_NUM>
710 RegisterAliases; 715 RegisterAliases;
711 static llvm::SmallBitVector ScratchRegs; 716 static llvm::SmallBitVector ScratchRegs;
712 llvm::SmallBitVector RegsUsed; 717 llvm::SmallBitVector RegsUsed;
713 std::array<VarList, IceType_NUM> PhysicalRegisters; 718 std::array<VarList, IceType_NUM> PhysicalRegisters;
714 719
715 /// Randomize a given immediate operand 720 /// Randomize a given immediate operand
716 Operand *randomizeOrPoolImmediate(Constant *Immediate, 721 Operand *randomizeOrPoolImmediate(Constant *Immediate,
717 int32_t RegNum = Variable::NoRegister); 722 int32_t RegNum = Variable::NoRegister);
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
773 lowerIcmp64(const InstIcmp *Icmp, const InstBr *Br); 778 lowerIcmp64(const InstIcmp *Icmp, const InstBr *Br);
774 779
775 BoolFolding FoldingInfo; 780 BoolFolding FoldingInfo;
776 }; 781 };
777 } // end of namespace X86Internal 782 } // end of namespace X86Internal
778 } // end of namespace Ice 783 } // end of namespace Ice
779 784
780 #include "IceTargetLoweringX86BaseImpl.h" 785 #include "IceTargetLoweringX86BaseImpl.h"
781 786
782 #endif // SUBZERO_SRC_ICETARGETLOWERINGX86BASE_H 787 #endif // SUBZERO_SRC_ICETARGETLOWERINGX86BASE_H
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698