OLD | NEW |
1 //===- subzero/src/IceTargetLoweringX86Base.h - x86 lowering ----*- C++ -*-===// | 1 //===- subzero/src/IceTargetLoweringX86Base.h - x86 lowering ----*- C++ -*-===// |
2 // | 2 // |
3 // The Subzero Code Generator | 3 // The Subzero Code Generator |
4 // | 4 // |
5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
7 // | 7 // |
8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
9 /// | 9 /// |
10 /// \file | 10 /// \file |
11 /// \brief Declares the TargetLoweringX86 template class, which implements the | 11 /// \brief Declares the TargetLoweringX86 template class, which implements the |
12 /// TargetLowering base interface for the x86 architecture. | 12 /// TargetLowering base interface for the x86 architecture. |
13 /// | 13 /// |
14 //===----------------------------------------------------------------------===// | 14 //===----------------------------------------------------------------------===// |
15 | 15 |
16 #ifndef SUBZERO_SRC_ICETARGETLOWERINGX86BASE_H | 16 #ifndef SUBZERO_SRC_ICETARGETLOWERINGX86BASE_H |
17 #define SUBZERO_SRC_ICETARGETLOWERINGX86BASE_H | 17 #define SUBZERO_SRC_ICETARGETLOWERINGX86BASE_H |
18 | 18 |
19 #include "IceDefs.h" | 19 #include "IceDefs.h" |
20 #include "IceInst.h" | 20 #include "IceInst.h" |
21 #include "IceSwitchLowering.h" | 21 #include "IceSwitchLowering.h" |
22 #include "IceTargetLowering.h" | 22 #include "IceTargetLowering.h" |
23 #include "IceTargetLoweringX86RegClass.h" | 23 #include "IceTargetLoweringX86RegClass.h" |
24 #include "IceUtils.h" | 24 #include "IceUtils.h" |
25 | 25 |
26 #include <array> | 26 #include <array> |
27 #include <type_traits> | 27 #include <type_traits> |
28 #include <utility> | 28 #include <utility> |
29 | 29 |
| 30 #ifndef X86NAMESPACE |
| 31 #error "You must define the X86 Target namespace." |
| 32 #endif |
| 33 |
30 namespace Ice { | 34 namespace Ice { |
31 namespace X86Internal { | 35 namespace X86NAMESPACE { |
32 | 36 |
33 template <class MachineTraits> class BoolFolding; | 37 using namespace ::Ice::X86; |
34 | 38 |
35 template <class Machine> struct MachineTraits {}; | 39 template <typename Traits> class BoolFolding; |
36 | 40 |
37 /// TargetX86Base is a template for all X86 Targets, and it relies on the CRT | 41 /// TargetX86Base is a template for all X86 Targets, and it relies on the CRT |
38 /// pattern for generating code, delegating to actual backends target-specific | 42 /// pattern for generating code, delegating to actual backends target-specific |
39 /// lowerings (e.g., call, ret, and intrinsics.) Backends are expected to | 43 /// lowerings (e.g., call, ret, and intrinsics.) Backends are expected to |
40 /// implement the following methods (which should be accessible from | 44 /// implement the following methods (which should be accessible from |
41 /// TargetX86Base): | 45 /// TargetX86Base): |
42 /// | 46 /// |
43 /// Operand *createNaClReadTPSrcOperand() | 47 /// Operand *createNaClReadTPSrcOperand() |
44 /// | 48 /// |
45 /// Note: Ideally, we should be able to | 49 /// Note: Ideally, we should be able to |
46 /// | 50 /// |
47 /// static_assert(std::is_base_of<TargetX86Base<Machine>, Machine>::value); | 51 /// static_assert(std::is_base_of<TargetX86Base<TraitsType>, |
| 52 /// Machine>::value); |
48 /// | 53 /// |
49 /// but that does not work: the compiler does not know that Machine inherits | 54 /// but that does not work: the compiler does not know that Machine inherits |
50 /// from TargetX86Base at this point in translation. | 55 /// from TargetX86Base at this point in translation. |
51 template <class Machine> class TargetX86Base : public TargetLowering { | 56 template <typename TraitsType> class TargetX86Base : public TargetLowering { |
52 TargetX86Base() = delete; | 57 TargetX86Base() = delete; |
53 TargetX86Base(const TargetX86Base &) = delete; | 58 TargetX86Base(const TargetX86Base &) = delete; |
54 TargetX86Base &operator=(const TargetX86Base &) = delete; | 59 TargetX86Base &operator=(const TargetX86Base &) = delete; |
55 | 60 |
56 public: | 61 public: |
57 using Traits = MachineTraits<Machine>; | 62 using Traits = TraitsType; |
58 using BoolFolding = ::Ice::X86Internal::BoolFolding<Traits>; | 63 using BoolFolding = BoolFolding<Traits>; |
| 64 using ConcreteTarget = typename Traits::ConcreteTarget; |
| 65 using InstructionSetEnum = typename Traits::InstructionSet; |
| 66 |
| 67 using BrCond = typename Traits::Cond::BrCond; |
| 68 using CmppsCond = typename Traits::Cond::CmppsCond; |
| 69 |
| 70 using X86Address = typename Traits::Address; |
| 71 using X86Operand = typename Traits::X86Operand; |
| 72 using X86OperandMem = typename Traits::X86OperandMem; |
| 73 using SegmentRegisters = typename Traits::X86OperandMem::SegmentRegisters; |
| 74 using SpillVariable = typename Traits::SpillVariable; |
| 75 |
| 76 using InstX86Br = typename Traits::Insts::Br; |
| 77 using InstX86FakeRMW = typename Traits::Insts::FakeRMW; |
| 78 using InstX86Label = typename Traits::Insts::Label; |
59 | 79 |
60 ~TargetX86Base() override = default; | 80 ~TargetX86Base() override = default; |
61 | 81 |
62 static void staticInit(); | 82 static void staticInit(); |
63 static TargetX86Base *create(Cfg *Func) { return new TargetX86Base(Func); } | 83 static TargetX86Base *create(Cfg *Func) { return new TargetX86Base(Func); } |
64 | 84 |
65 void translateOm1() override; | 85 void translateOm1() override; |
66 void translateO2() override; | 86 void translateO2() override; |
67 void doLoadOpt(); | 87 void doLoadOpt(); |
68 bool doBranchOpt(Inst *I, const CfgNode *NextNode) override; | 88 bool doBranchOpt(Inst *I, const CfgNode *NextNode) override; |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
149 hiOperand(Operand *Operand); | 169 hiOperand(Operand *Operand); |
150 template <typename T = Traits> | 170 template <typename T = Traits> |
151 typename std::enable_if<T::Is64Bit, Operand>::type *hiOperand(Operand *) { | 171 typename std::enable_if<T::Is64Bit, Operand>::type *hiOperand(Operand *) { |
152 llvm::report_fatal_error( | 172 llvm::report_fatal_error( |
153 "Hey, yo! This is x86-64. Watcha doin'? (hiOperand)"); | 173 "Hey, yo! This is x86-64. Watcha doin'? (hiOperand)"); |
154 } | 174 } |
155 | 175 |
156 void finishArgumentLowering(Variable *Arg, Variable *FramePtr, | 176 void finishArgumentLowering(Variable *Arg, Variable *FramePtr, |
157 size_t BasicFrameOffset, size_t StackAdjBytes, | 177 size_t BasicFrameOffset, size_t StackAdjBytes, |
158 size_t &InArgsSizeBytes); | 178 size_t &InArgsSizeBytes); |
159 typename Traits::Address stackVarToAsmOperand(const Variable *Var) const; | 179 X86Address stackVarToAsmOperand(const Variable *Var) const; |
160 | 180 |
161 typename Traits::InstructionSet getInstructionSet() const { | 181 InstructionSetEnum getInstructionSet() const { return InstructionSet; } |
162 return InstructionSet; | |
163 } | |
164 Operand *legalizeUndef(Operand *From, int32_t RegNum = Variable::NoRegister); | 182 Operand *legalizeUndef(Operand *From, int32_t RegNum = Variable::NoRegister); |
165 | 183 |
166 protected: | 184 protected: |
167 explicit TargetX86Base(Cfg *Func); | 185 explicit TargetX86Base(Cfg *Func); |
168 | 186 |
169 void postLower() override; | 187 void postLower() override; |
170 | 188 |
171 void lowerAlloca(const InstAlloca *Inst) override; | 189 void lowerAlloca(const InstAlloca *Inst) override; |
172 void lowerArithmetic(const InstArithmetic *Inst) override; | 190 void lowerArithmetic(const InstArithmetic *Inst) override; |
173 void lowerAssign(const InstAssign *Inst) override; | 191 void lowerAssign(const InstAssign *Inst) override; |
174 void lowerBr(const InstBr *Inst) override; | 192 void lowerBr(const InstBr *Inst) override; |
175 void lowerCast(const InstCast *Inst) override; | 193 void lowerCast(const InstCast *Inst) override; |
176 void lowerExtractElement(const InstExtractElement *Inst) override; | 194 void lowerExtractElement(const InstExtractElement *Inst) override; |
177 void lowerFcmp(const InstFcmp *Inst) override; | 195 void lowerFcmp(const InstFcmp *Inst) override; |
178 void lowerIcmp(const InstIcmp *Inst) override; | 196 void lowerIcmp(const InstIcmp *Inst) override; |
179 | 197 |
180 void lowerIntrinsicCall(const InstIntrinsicCall *Inst) override; | 198 void lowerIntrinsicCall(const InstIntrinsicCall *Inst) override; |
181 void lowerInsertElement(const InstInsertElement *Inst) override; | 199 void lowerInsertElement(const InstInsertElement *Inst) override; |
182 void lowerLoad(const InstLoad *Inst) override; | 200 void lowerLoad(const InstLoad *Inst) override; |
183 void lowerPhi(const InstPhi *Inst) override; | 201 void lowerPhi(const InstPhi *Inst) override; |
184 void lowerSelect(const InstSelect *Inst) override; | 202 void lowerSelect(const InstSelect *Inst) override; |
185 void lowerStore(const InstStore *Inst) override; | 203 void lowerStore(const InstStore *Inst) override; |
186 void lowerSwitch(const InstSwitch *Inst) override; | 204 void lowerSwitch(const InstSwitch *Inst) override; |
187 void lowerUnreachable(const InstUnreachable *Inst) override; | 205 void lowerUnreachable(const InstUnreachable *Inst) override; |
188 void lowerOther(const Inst *Instr) override; | 206 void lowerOther(const Inst *Instr) override; |
189 void lowerRMW(const typename Traits::Insts::FakeRMW *RMW); | 207 void lowerRMW(const InstX86FakeRMW *RMW); |
190 void prelowerPhis() override; | 208 void prelowerPhis() override; |
191 uint32_t getCallStackArgumentsSizeBytes(const std::vector<Type> &ArgTypes, | 209 uint32_t getCallStackArgumentsSizeBytes(const std::vector<Type> &ArgTypes, |
192 Type ReturnType); | 210 Type ReturnType); |
193 uint32_t getCallStackArgumentsSizeBytes(const InstCall *Instr) override; | 211 uint32_t getCallStackArgumentsSizeBytes(const InstCall *Instr) override; |
194 void genTargetHelperCallFor(Inst *Instr) override; | 212 void genTargetHelperCallFor(Inst *Instr) override; |
195 void doAddressOptLoad() override; | 213 void doAddressOptLoad() override; |
196 void doAddressOptStore() override; | 214 void doAddressOptStore() override; |
197 void doMockBoundsCheck(Operand *Opnd) override; | 215 void doMockBoundsCheck(Operand *Opnd) override; |
198 void randomlyInsertNop(float Probability, | 216 void randomlyInsertNop(float Probability, |
199 RandomNumberGenerator &RNG) override; | 217 RandomNumberGenerator &RNG) override; |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
270 }; | 288 }; |
271 using LegalMask = uint32_t; | 289 using LegalMask = uint32_t; |
272 Operand *legalize(Operand *From, LegalMask Allowed = Legal_All, | 290 Operand *legalize(Operand *From, LegalMask Allowed = Legal_All, |
273 int32_t RegNum = Variable::NoRegister); | 291 int32_t RegNum = Variable::NoRegister); |
274 Variable *legalizeToReg(Operand *From, int32_t RegNum = Variable::NoRegister); | 292 Variable *legalizeToReg(Operand *From, int32_t RegNum = Variable::NoRegister); |
275 /// Legalize the first source operand for use in the cmp instruction. | 293 /// Legalize the first source operand for use in the cmp instruction. |
276 Operand *legalizeSrc0ForCmp(Operand *Src0, Operand *Src1); | 294 Operand *legalizeSrc0ForCmp(Operand *Src0, Operand *Src1); |
277 /// Turn a pointer operand into a memory operand that can be used by a real | 295 /// Turn a pointer operand into a memory operand that can be used by a real |
278 /// load/store operation. Legalizes the operand as well. This is a nop if the | 296 /// load/store operation. Legalizes the operand as well. This is a nop if the |
279 /// operand is already a legal memory operand. | 297 /// operand is already a legal memory operand. |
280 typename Traits::X86OperandMem *formMemoryOperand(Operand *Ptr, Type Ty, | 298 X86OperandMem *formMemoryOperand(Operand *Ptr, Type Ty, |
281 bool DoLegalize = true); | 299 bool DoLegalize = true); |
282 | 300 |
283 Variable *makeReg(Type Ty, int32_t RegNum = Variable::NoRegister); | 301 Variable *makeReg(Type Ty, int32_t RegNum = Variable::NoRegister); |
284 static Type stackSlotType(); | 302 static Type stackSlotType(); |
285 | 303 |
286 static constexpr uint32_t NoSizeLimit = 0; | 304 static constexpr uint32_t NoSizeLimit = 0; |
287 static const Type TypeForSize[]; | 305 static const Type TypeForSize[]; |
288 /// Returns the largest type which is equal to or larger than Size bytes. The | 306 /// Returns the largest type which is equal to or larger than Size bytes. The |
289 /// type is suitable for copying memory i.e. a load and store will be a single | 307 /// type is suitable for copying memory i.e. a load and store will be a single |
290 /// instruction (for example x86 will get f64 not i64). | 308 /// instruction (for example x86 will get f64 not i64). |
291 static Type largestTypeInSize(uint32_t Size, uint32_t MaxSize = NoSizeLimit); | 309 static Type largestTypeInSize(uint32_t Size, uint32_t MaxSize = NoSizeLimit); |
(...skipping 17 matching lines...) Expand all Loading... |
309 Variable *makeVectorOfOnes(Type Ty, int32_t RegNum = Variable::NoRegister); | 327 Variable *makeVectorOfOnes(Type Ty, int32_t RegNum = Variable::NoRegister); |
310 Variable *makeVectorOfMinusOnes(Type Ty, | 328 Variable *makeVectorOfMinusOnes(Type Ty, |
311 int32_t RegNum = Variable::NoRegister); | 329 int32_t RegNum = Variable::NoRegister); |
312 Variable *makeVectorOfHighOrderBits(Type Ty, | 330 Variable *makeVectorOfHighOrderBits(Type Ty, |
313 int32_t RegNum = Variable::NoRegister); | 331 int32_t RegNum = Variable::NoRegister); |
314 Variable *makeVectorOfFabsMask(Type Ty, | 332 Variable *makeVectorOfFabsMask(Type Ty, |
315 int32_t RegNum = Variable::NoRegister); | 333 int32_t RegNum = Variable::NoRegister); |
316 /// @} | 334 /// @} |
317 | 335 |
318 /// Return a memory operand corresponding to a stack allocated Variable. | 336 /// Return a memory operand corresponding to a stack allocated Variable. |
319 typename Traits::X86OperandMem * | 337 X86OperandMem *getMemoryOperandForStackSlot(Type Ty, Variable *Slot, |
320 getMemoryOperandForStackSlot(Type Ty, Variable *Slot, uint32_t Offset = 0); | 338 uint32_t Offset = 0); |
321 | 339 |
322 void | 340 void |
323 makeRandomRegisterPermutation(llvm::SmallVectorImpl<int32_t> &Permutation, | 341 makeRandomRegisterPermutation(llvm::SmallVectorImpl<int32_t> &Permutation, |
324 const llvm::SmallBitVector &ExcludeRegisters, | 342 const llvm::SmallBitVector &ExcludeRegisters, |
325 uint64_t Salt) const override; | 343 uint64_t Salt) const override; |
326 | 344 |
327 /// The following are helpers that insert lowered x86 instructions with | 345 /// The following are helpers that insert lowered x86 instructions with |
328 /// minimal syntactic overhead, so that the lowering code can look as close to | 346 /// minimal syntactic overhead, so that the lowering code can look as close to |
329 /// assembly as practical. | 347 /// assembly as practical. |
330 void _adc(Variable *Dest, Operand *Src0) { | 348 void _adc(Variable *Dest, Operand *Src0) { |
331 Context.insert<typename Traits::Insts::Adc>(Dest, Src0); | 349 Context.insert<typename Traits::Insts::Adc>(Dest, Src0); |
332 } | 350 } |
333 void _adc_rmw(typename Traits::X86OperandMem *DestSrc0, Operand *Src1) { | 351 void _adc_rmw(X86OperandMem *DestSrc0, Operand *Src1) { |
334 Context.insert<typename Traits::Insts::AdcRMW>(DestSrc0, Src1); | 352 Context.insert<typename Traits::Insts::AdcRMW>(DestSrc0, Src1); |
335 } | 353 } |
336 void _add(Variable *Dest, Operand *Src0) { | 354 void _add(Variable *Dest, Operand *Src0) { |
337 Context.insert<typename Traits::Insts::Add>(Dest, Src0); | 355 Context.insert<typename Traits::Insts::Add>(Dest, Src0); |
338 } | 356 } |
339 void _add_rmw(typename Traits::X86OperandMem *DestSrc0, Operand *Src1) { | 357 void _add_rmw(X86OperandMem *DestSrc0, Operand *Src1) { |
340 Context.insert<typename Traits::Insts::AddRMW>(DestSrc0, Src1); | 358 Context.insert<typename Traits::Insts::AddRMW>(DestSrc0, Src1); |
341 } | 359 } |
342 void _addps(Variable *Dest, Operand *Src0) { | 360 void _addps(Variable *Dest, Operand *Src0) { |
343 Context.insert<typename Traits::Insts::Addps>(Dest, Src0); | 361 Context.insert<typename Traits::Insts::Addps>(Dest, Src0); |
344 } | 362 } |
345 void _addss(Variable *Dest, Operand *Src0) { | 363 void _addss(Variable *Dest, Operand *Src0) { |
346 Context.insert<typename Traits::Insts::Addss>(Dest, Src0); | 364 Context.insert<typename Traits::Insts::Addss>(Dest, Src0); |
347 } | 365 } |
348 void _and(Variable *Dest, Operand *Src0) { | 366 void _and(Variable *Dest, Operand *Src0) { |
349 Context.insert<typename Traits::Insts::And>(Dest, Src0); | 367 Context.insert<typename Traits::Insts::And>(Dest, Src0); |
350 } | 368 } |
351 void _andnps(Variable *Dest, Operand *Src0) { | 369 void _andnps(Variable *Dest, Operand *Src0) { |
352 Context.insert<typename Traits::Insts::Andnps>(Dest, Src0); | 370 Context.insert<typename Traits::Insts::Andnps>(Dest, Src0); |
353 } | 371 } |
354 void _andps(Variable *Dest, Operand *Src0) { | 372 void _andps(Variable *Dest, Operand *Src0) { |
355 Context.insert<typename Traits::Insts::Andps>(Dest, Src0); | 373 Context.insert<typename Traits::Insts::Andps>(Dest, Src0); |
356 } | 374 } |
357 void _and_rmw(typename Traits::X86OperandMem *DestSrc0, Operand *Src1) { | 375 void _and_rmw(X86OperandMem *DestSrc0, Operand *Src1) { |
358 Context.insert<typename Traits::Insts::AndRMW>(DestSrc0, Src1); | 376 Context.insert<typename Traits::Insts::AndRMW>(DestSrc0, Src1); |
359 } | 377 } |
360 void _blendvps(Variable *Dest, Operand *Src0, Operand *Src1) { | 378 void _blendvps(Variable *Dest, Operand *Src0, Operand *Src1) { |
361 Context.insert<typename Traits::Insts::Blendvps>(Dest, Src0, Src1); | 379 Context.insert<typename Traits::Insts::Blendvps>(Dest, Src0, Src1); |
362 } | 380 } |
363 void _br(typename Traits::Cond::BrCond Condition, CfgNode *TargetTrue, | 381 void _br(BrCond Condition, CfgNode *TargetTrue, CfgNode *TargetFalse) { |
364 CfgNode *TargetFalse) { | 382 Context.insert<InstX86Br>(TargetTrue, TargetFalse, Condition, |
365 Context.insert<typename Traits::Insts::Br>( | 383 InstX86Br::Far); |
366 TargetTrue, TargetFalse, Condition, Traits::Insts::Br::Far); | |
367 } | 384 } |
368 void _br(CfgNode *Target) { | 385 void _br(CfgNode *Target) { |
369 Context.insert<typename Traits::Insts::Br>(Target, Traits::Insts::Br::Far); | 386 Context.insert<InstX86Br>(Target, InstX86Br::Far); |
370 } | 387 } |
371 void _br(typename Traits::Cond::BrCond Condition, CfgNode *Target) { | 388 void _br(BrCond Condition, CfgNode *Target) { |
372 Context.insert<typename Traits::Insts::Br>(Target, Condition, | 389 Context.insert<InstX86Br>(Target, Condition, InstX86Br::Far); |
373 Traits::Insts::Br::Far); | |
374 } | 390 } |
375 void _br(typename Traits::Cond::BrCond Condition, | 391 void _br(BrCond Condition, InstX86Label *Label, |
376 typename Traits::Insts::Label *Label, | 392 typename InstX86Br::Mode Kind = InstX86Br::Near) { |
377 typename Traits::Insts::Br::Mode Kind = Traits::Insts::Br::Near) { | 393 Context.insert<InstX86Br>(Label, Condition, Kind); |
378 Context.insert<typename Traits::Insts::Br>(Label, Condition, Kind); | |
379 } | 394 } |
380 void _bsf(Variable *Dest, Operand *Src0) { | 395 void _bsf(Variable *Dest, Operand *Src0) { |
381 Context.insert<typename Traits::Insts::Bsf>(Dest, Src0); | 396 Context.insert<typename Traits::Insts::Bsf>(Dest, Src0); |
382 } | 397 } |
383 void _bsr(Variable *Dest, Operand *Src0) { | 398 void _bsr(Variable *Dest, Operand *Src0) { |
384 Context.insert<typename Traits::Insts::Bsr>(Dest, Src0); | 399 Context.insert<typename Traits::Insts::Bsr>(Dest, Src0); |
385 } | 400 } |
386 void _bswap(Variable *SrcDest) { | 401 void _bswap(Variable *SrcDest) { |
387 Context.insert<typename Traits::Insts::Bswap>(SrcDest); | 402 Context.insert<typename Traits::Insts::Bswap>(SrcDest); |
388 } | 403 } |
389 void _cbwdq(Variable *Dest, Operand *Src0) { | 404 void _cbwdq(Variable *Dest, Operand *Src0) { |
390 Context.insert<typename Traits::Insts::Cbwdq>(Dest, Src0); | 405 Context.insert<typename Traits::Insts::Cbwdq>(Dest, Src0); |
391 } | 406 } |
392 void _cmov(Variable *Dest, Operand *Src0, | 407 void _cmov(Variable *Dest, Operand *Src0, BrCond Condition) { |
393 typename Traits::Cond::BrCond Condition) { | |
394 Context.insert<typename Traits::Insts::Cmov>(Dest, Src0, Condition); | 408 Context.insert<typename Traits::Insts::Cmov>(Dest, Src0, Condition); |
395 } | 409 } |
396 void _cmp(Operand *Src0, Operand *Src1) { | 410 void _cmp(Operand *Src0, Operand *Src1) { |
397 Context.insert<typename Traits::Insts::Icmp>(Src0, Src1); | 411 Context.insert<typename Traits::Insts::Icmp>(Src0, Src1); |
398 } | 412 } |
399 void _cmpps(Variable *Dest, Operand *Src0, | 413 void _cmpps(Variable *Dest, Operand *Src0, CmppsCond Condition) { |
400 typename Traits::Cond::CmppsCond Condition) { | |
401 Context.insert<typename Traits::Insts::Cmpps>(Dest, Src0, Condition); | 414 Context.insert<typename Traits::Insts::Cmpps>(Dest, Src0, Condition); |
402 } | 415 } |
403 void _cmpxchg(Operand *DestOrAddr, Variable *Eax, Variable *Desired, | 416 void _cmpxchg(Operand *DestOrAddr, Variable *Eax, Variable *Desired, |
404 bool Locked) { | 417 bool Locked) { |
405 Context.insert<typename Traits::Insts::Cmpxchg>(DestOrAddr, Eax, Desired, | 418 Context.insert<typename Traits::Insts::Cmpxchg>(DestOrAddr, Eax, Desired, |
406 Locked); | 419 Locked); |
407 // Mark eax as possibly modified by cmpxchg. | 420 // Mark eax as possibly modified by cmpxchg. |
408 Context.insert<InstFakeDef>(Eax, llvm::dyn_cast<Variable>(DestOrAddr)); | 421 Context.insert<InstFakeDef>(Eax, llvm::dyn_cast<Variable>(DestOrAddr)); |
409 _set_dest_redefined(); | 422 _set_dest_redefined(); |
410 Context.insert<InstFakeUse>(Eax); | 423 Context.insert<InstFakeUse>(Eax); |
411 } | 424 } |
412 void _cmpxchg8b(typename Traits::X86OperandMem *Addr, Variable *Edx, | 425 void _cmpxchg8b(X86OperandMem *Addr, Variable *Edx, Variable *Eax, |
413 Variable *Eax, Variable *Ecx, Variable *Ebx, bool Locked) { | 426 Variable *Ecx, Variable *Ebx, bool Locked) { |
414 Context.insert<typename Traits::Insts::Cmpxchg8b>(Addr, Edx, Eax, Ecx, Ebx, | 427 Context.insert<typename Traits::Insts::Cmpxchg8b>(Addr, Edx, Eax, Ecx, Ebx, |
415 Locked); | 428 Locked); |
416 // Mark edx, and eax as possibly modified by cmpxchg8b. | 429 // Mark edx, and eax as possibly modified by cmpxchg8b. |
417 Context.insert<InstFakeDef>(Edx); | 430 Context.insert<InstFakeDef>(Edx); |
418 _set_dest_redefined(); | 431 _set_dest_redefined(); |
419 Context.insert<InstFakeUse>(Edx); | 432 Context.insert<InstFakeUse>(Edx); |
420 Context.insert<InstFakeDef>(Eax); | 433 Context.insert<InstFakeDef>(Eax); |
421 _set_dest_redefined(); | 434 _set_dest_redefined(); |
422 Context.insert<InstFakeUse>(Eax); | 435 Context.insert<InstFakeUse>(Eax); |
423 } | 436 } |
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
526 } | 539 } |
527 void _nop(SizeT Variant) { | 540 void _nop(SizeT Variant) { |
528 Context.insert<typename Traits::Insts::Nop>(Variant); | 541 Context.insert<typename Traits::Insts::Nop>(Variant); |
529 } | 542 } |
530 void _or(Variable *Dest, Operand *Src0) { | 543 void _or(Variable *Dest, Operand *Src0) { |
531 Context.insert<typename Traits::Insts::Or>(Dest, Src0); | 544 Context.insert<typename Traits::Insts::Or>(Dest, Src0); |
532 } | 545 } |
533 void _orps(Variable *Dest, Operand *Src0) { | 546 void _orps(Variable *Dest, Operand *Src0) { |
534 Context.insert<typename Traits::Insts::Orps>(Dest, Src0); | 547 Context.insert<typename Traits::Insts::Orps>(Dest, Src0); |
535 } | 548 } |
536 void _or_rmw(typename Traits::X86OperandMem *DestSrc0, Operand *Src1) { | 549 void _or_rmw(X86OperandMem *DestSrc0, Operand *Src1) { |
537 Context.insert<typename Traits::Insts::OrRMW>(DestSrc0, Src1); | 550 Context.insert<typename Traits::Insts::OrRMW>(DestSrc0, Src1); |
538 } | 551 } |
539 void _padd(Variable *Dest, Operand *Src0) { | 552 void _padd(Variable *Dest, Operand *Src0) { |
540 Context.insert<typename Traits::Insts::Padd>(Dest, Src0); | 553 Context.insert<typename Traits::Insts::Padd>(Dest, Src0); |
541 } | 554 } |
542 void _pand(Variable *Dest, Operand *Src0) { | 555 void _pand(Variable *Dest, Operand *Src0) { |
543 Context.insert<typename Traits::Insts::Pand>(Dest, Src0); | 556 Context.insert<typename Traits::Insts::Pand>(Dest, Src0); |
544 } | 557 } |
545 void _pandn(Variable *Dest, Operand *Src0) { | 558 void _pandn(Variable *Dest, Operand *Src0) { |
546 Context.insert<typename Traits::Insts::Pandn>(Dest, Src0); | 559 Context.insert<typename Traits::Insts::Pandn>(Dest, Src0); |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
598 } | 611 } |
599 void _rol(Variable *Dest, Operand *Src0) { | 612 void _rol(Variable *Dest, Operand *Src0) { |
600 Context.insert<typename Traits::Insts::Rol>(Dest, Src0); | 613 Context.insert<typename Traits::Insts::Rol>(Dest, Src0); |
601 } | 614 } |
602 void _sar(Variable *Dest, Operand *Src0) { | 615 void _sar(Variable *Dest, Operand *Src0) { |
603 Context.insert<typename Traits::Insts::Sar>(Dest, Src0); | 616 Context.insert<typename Traits::Insts::Sar>(Dest, Src0); |
604 } | 617 } |
605 void _sbb(Variable *Dest, Operand *Src0) { | 618 void _sbb(Variable *Dest, Operand *Src0) { |
606 Context.insert<typename Traits::Insts::Sbb>(Dest, Src0); | 619 Context.insert<typename Traits::Insts::Sbb>(Dest, Src0); |
607 } | 620 } |
608 void _sbb_rmw(typename Traits::X86OperandMem *DestSrc0, Operand *Src1) { | 621 void _sbb_rmw(X86OperandMem *DestSrc0, Operand *Src1) { |
609 Context.insert<typename Traits::Insts::SbbRMW>(DestSrc0, Src1); | 622 Context.insert<typename Traits::Insts::SbbRMW>(DestSrc0, Src1); |
610 } | 623 } |
611 void _setcc(Variable *Dest, typename Traits::Cond::BrCond Condition) { | 624 void _setcc(Variable *Dest, BrCond Condition) { |
612 Context.insert<typename Traits::Insts::Setcc>(Dest, Condition); | 625 Context.insert<typename Traits::Insts::Setcc>(Dest, Condition); |
613 } | 626 } |
614 void _shl(Variable *Dest, Operand *Src0) { | 627 void _shl(Variable *Dest, Operand *Src0) { |
615 Context.insert<typename Traits::Insts::Shl>(Dest, Src0); | 628 Context.insert<typename Traits::Insts::Shl>(Dest, Src0); |
616 } | 629 } |
617 void _shld(Variable *Dest, Variable *Src0, Operand *Src1) { | 630 void _shld(Variable *Dest, Variable *Src0, Operand *Src1) { |
618 Context.insert<typename Traits::Insts::Shld>(Dest, Src0, Src1); | 631 Context.insert<typename Traits::Insts::Shld>(Dest, Src0, Src1); |
619 } | 632 } |
620 void _shr(Variable *Dest, Operand *Src0) { | 633 void _shr(Variable *Dest, Operand *Src0) { |
621 Context.insert<typename Traits::Insts::Shr>(Dest, Src0); | 634 Context.insert<typename Traits::Insts::Shr>(Dest, Src0); |
622 } | 635 } |
623 void _shrd(Variable *Dest, Variable *Src0, Operand *Src1) { | 636 void _shrd(Variable *Dest, Variable *Src0, Operand *Src1) { |
624 Context.insert<typename Traits::Insts::Shrd>(Dest, Src0, Src1); | 637 Context.insert<typename Traits::Insts::Shrd>(Dest, Src0, Src1); |
625 } | 638 } |
626 void _shufps(Variable *Dest, Operand *Src0, Operand *Src1) { | 639 void _shufps(Variable *Dest, Operand *Src0, Operand *Src1) { |
627 Context.insert<typename Traits::Insts::Shufps>(Dest, Src0, Src1); | 640 Context.insert<typename Traits::Insts::Shufps>(Dest, Src0, Src1); |
628 } | 641 } |
629 void _sqrtss(Variable *Dest, Operand *Src0) { | 642 void _sqrtss(Variable *Dest, Operand *Src0) { |
630 Context.insert<typename Traits::Insts::Sqrtss>(Dest, Src0); | 643 Context.insert<typename Traits::Insts::Sqrtss>(Dest, Src0); |
631 } | 644 } |
632 void _store(Operand *Value, typename Traits::X86Operand *Mem) { | 645 void _store(Operand *Value, X86Operand *Mem) { |
633 Context.insert<typename Traits::Insts::Store>(Value, Mem); | 646 Context.insert<typename Traits::Insts::Store>(Value, Mem); |
634 } | 647 } |
635 void _storep(Variable *Value, typename Traits::X86OperandMem *Mem) { | 648 void _storep(Variable *Value, X86OperandMem *Mem) { |
636 Context.insert<typename Traits::Insts::StoreP>(Value, Mem); | 649 Context.insert<typename Traits::Insts::StoreP>(Value, Mem); |
637 } | 650 } |
638 void _storeq(Variable *Value, typename Traits::X86OperandMem *Mem) { | 651 void _storeq(Variable *Value, X86OperandMem *Mem) { |
639 Context.insert<typename Traits::Insts::StoreQ>(Value, Mem); | 652 Context.insert<typename Traits::Insts::StoreQ>(Value, Mem); |
640 } | 653 } |
641 void _sub(Variable *Dest, Operand *Src0) { | 654 void _sub(Variable *Dest, Operand *Src0) { |
642 Context.insert<typename Traits::Insts::Sub>(Dest, Src0); | 655 Context.insert<typename Traits::Insts::Sub>(Dest, Src0); |
643 } | 656 } |
644 void _sub_rmw(typename Traits::X86OperandMem *DestSrc0, Operand *Src1) { | 657 void _sub_rmw(X86OperandMem *DestSrc0, Operand *Src1) { |
645 Context.insert<typename Traits::Insts::SubRMW>(DestSrc0, Src1); | 658 Context.insert<typename Traits::Insts::SubRMW>(DestSrc0, Src1); |
646 } | 659 } |
647 void _subps(Variable *Dest, Operand *Src0) { | 660 void _subps(Variable *Dest, Operand *Src0) { |
648 Context.insert<typename Traits::Insts::Subps>(Dest, Src0); | 661 Context.insert<typename Traits::Insts::Subps>(Dest, Src0); |
649 } | 662 } |
650 void _subss(Variable *Dest, Operand *Src0) { | 663 void _subss(Variable *Dest, Operand *Src0) { |
651 Context.insert<typename Traits::Insts::Subss>(Dest, Src0); | 664 Context.insert<typename Traits::Insts::Subss>(Dest, Src0); |
652 } | 665 } |
653 void _test(Operand *Src0, Operand *Src1) { | 666 void _test(Operand *Src0, Operand *Src1) { |
654 Context.insert<typename Traits::Insts::Test>(Src0, Src1); | 667 Context.insert<typename Traits::Insts::Test>(Src0, Src1); |
(...skipping 17 matching lines...) Expand all Loading... |
672 Context.insert<InstFakeDef>(Src, llvm::dyn_cast<Variable>(Dest)); | 685 Context.insert<InstFakeDef>(Src, llvm::dyn_cast<Variable>(Dest)); |
673 _set_dest_redefined(); | 686 _set_dest_redefined(); |
674 Context.insert<InstFakeUse>(Src); | 687 Context.insert<InstFakeUse>(Src); |
675 } | 688 } |
676 void _xor(Variable *Dest, Operand *Src0) { | 689 void _xor(Variable *Dest, Operand *Src0) { |
677 Context.insert<typename Traits::Insts::Xor>(Dest, Src0); | 690 Context.insert<typename Traits::Insts::Xor>(Dest, Src0); |
678 } | 691 } |
679 void _xorps(Variable *Dest, Operand *Src0) { | 692 void _xorps(Variable *Dest, Operand *Src0) { |
680 Context.insert<typename Traits::Insts::Xorps>(Dest, Src0); | 693 Context.insert<typename Traits::Insts::Xorps>(Dest, Src0); |
681 } | 694 } |
682 void _xor_rmw(typename Traits::X86OperandMem *DestSrc0, Operand *Src1) { | 695 void _xor_rmw(X86OperandMem *DestSrc0, Operand *Src1) { |
683 Context.insert<typename Traits::Insts::XorRMW>(DestSrc0, Src1); | 696 Context.insert<typename Traits::Insts::XorRMW>(DestSrc0, Src1); |
684 } | 697 } |
685 | 698 |
686 void _iaca_start() { | 699 void _iaca_start() { |
687 if (!BuildDefs::minimal()) | 700 if (!BuildDefs::minimal()) |
688 Context.insert<typename Traits::Insts::IacaStart>(); | 701 Context.insert<typename Traits::Insts::IacaStart>(); |
689 } | 702 } |
690 void _iaca_end() { | 703 void _iaca_end() { |
691 if (!BuildDefs::minimal()) | 704 if (!BuildDefs::minimal()) |
692 Context.insert<typename Traits::Insts::IacaEnd>(); | 705 Context.insert<typename Traits::Insts::IacaEnd>(); |
(...skipping 17 matching lines...) Expand all Loading... |
710 Lowering = nullptr; | 723 Lowering = nullptr; |
711 } | 724 } |
712 | 725 |
713 private: | 726 private: |
714 TargetX86Base *Lowering; | 727 TargetX86Base *Lowering; |
715 }; | 728 }; |
716 | 729 |
717 bool optimizeScalarMul(Variable *Dest, Operand *Src0, int32_t Src1); | 730 bool optimizeScalarMul(Variable *Dest, Operand *Src0, int32_t Src1); |
718 void findRMW(); | 731 void findRMW(); |
719 | 732 |
720 typename Traits::InstructionSet InstructionSet = | 733 InstructionSetEnum InstructionSet = Traits::InstructionSet::Begin; |
721 Traits::InstructionSet::Begin; | |
722 bool IsEbpBasedFrame = false; | 734 bool IsEbpBasedFrame = false; |
723 bool NeedsStackAlignment = false; | 735 bool NeedsStackAlignment = false; |
724 size_t SpillAreaSizeBytes = 0; | 736 size_t SpillAreaSizeBytes = 0; |
725 size_t FixedAllocaSizeBytes = 0; | 737 size_t FixedAllocaSizeBytes = 0; |
726 size_t FixedAllocaAlignBytes = 0; | 738 size_t FixedAllocaAlignBytes = 0; |
727 bool PrologEmitsFixedAllocas = false; | 739 bool PrologEmitsFixedAllocas = false; |
728 uint32_t MaxOutArgsSizeBytes = 0; | 740 uint32_t MaxOutArgsSizeBytes = 0; |
729 static std::array<llvm::SmallBitVector, RCX86_NUM> TypeToRegisterSet; | 741 static std::array<llvm::SmallBitVector, RCX86_NUM> TypeToRegisterSet; |
730 static std::array<llvm::SmallBitVector, Traits::RegisterSet::Reg_NUM> | 742 static std::array<llvm::SmallBitVector, Traits::RegisterSet::Reg_NUM> |
731 RegisterAliases; | 743 RegisterAliases; |
732 static llvm::SmallBitVector ScratchRegs; | 744 static llvm::SmallBitVector ScratchRegs; |
733 llvm::SmallBitVector RegsUsed; | 745 llvm::SmallBitVector RegsUsed; |
734 std::array<VarList, IceType_NUM> PhysicalRegisters; | 746 std::array<VarList, IceType_NUM> PhysicalRegisters; |
735 | 747 |
736 /// Randomize a given immediate operand | 748 /// Randomize a given immediate operand |
737 Operand *randomizeOrPoolImmediate(Constant *Immediate, | 749 Operand *randomizeOrPoolImmediate(Constant *Immediate, |
738 int32_t RegNum = Variable::NoRegister); | 750 int32_t RegNum = Variable::NoRegister); |
739 typename Traits::X86OperandMem * | 751 X86OperandMem * |
740 randomizeOrPoolImmediate(typename Traits::X86OperandMem *MemOperand, | 752 randomizeOrPoolImmediate(X86OperandMem *MemOperand, |
741 int32_t RegNum = Variable::NoRegister); | 753 int32_t RegNum = Variable::NoRegister); |
742 bool RandomizationPoolingPaused = false; | 754 bool RandomizationPoolingPaused = false; |
743 | 755 |
744 private: | 756 private: |
745 /// dispatchToConcrete is the template voodoo that allows TargetX86Base to | 757 /// dispatchToConcrete is the template voodoo that allows TargetX86Base to |
746 /// invoke methods in Machine (which inherits from TargetX86Base) without | 758 /// invoke methods in Machine (which inherits from TargetX86Base) without |
747 /// having to rely on virtual method calls. There are two overloads, one for | 759 /// having to rely on virtual method calls. There are two overloads, one for |
748 /// non-void types, and one for void types. We need this becase, for non-void | 760 /// non-void types, and one for void types. We need this becase, for non-void |
749 /// types, we need to return the method result, where as for void, we don't. | 761 /// types, we need to return the method result, where as for void, we don't. |
750 /// While it is true that the code compiles without the void "version", there | 762 /// While it is true that the code compiles without the void "version", there |
751 /// used to be a time when compilers would reject such code. | 763 /// used to be a time when compilers would reject such code. |
752 /// | 764 /// |
753 /// This machinery is far from perfect. Note that, in particular, the | 765 /// This machinery is far from perfect. Note that, in particular, the |
754 /// arguments provided to dispatchToConcrete() need to match the arguments for | 766 /// arguments provided to dispatchToConcrete() need to match the arguments for |
755 /// Method **exactly** (i.e., no argument promotion is performed.) | 767 /// Method **exactly** (i.e., no argument promotion is performed.) |
756 template <typename Ret, typename... Args> | 768 template <typename Ret, typename... Args> |
757 typename std::enable_if<!std::is_void<Ret>::value, Ret>::type | 769 typename std::enable_if<!std::is_void<Ret>::value, Ret>::type |
758 dispatchToConcrete(Ret (Machine::*Method)(Args...), Args &&... args) { | 770 dispatchToConcrete(Ret (ConcreteTarget::*Method)(Args...), Args &&... args) { |
759 return (static_cast<Machine *>(this)->*Method)(std::forward<Args>(args)...); | 771 return (static_cast<ConcreteTarget *>(this)->*Method)( |
| 772 std::forward<Args>(args)...); |
760 } | 773 } |
761 | 774 |
762 template <typename... Args> | 775 template <typename... Args> |
763 void dispatchToConcrete(void (Machine::*Method)(Args...), Args &&... args) { | 776 void dispatchToConcrete(void (ConcreteTarget::*Method)(Args...), |
764 (static_cast<Machine *>(this)->*Method)(std::forward<Args>(args)...); | 777 Args &&... args) { |
| 778 (static_cast<ConcreteTarget *>(this)->*Method)(std::forward<Args>(args)...); |
765 } | 779 } |
766 | 780 |
767 void lowerShift64(InstArithmetic::OpKind Op, Operand *Src0Lo, Operand *Src0Hi, | 781 void lowerShift64(InstArithmetic::OpKind Op, Operand *Src0Lo, Operand *Src0Hi, |
768 Operand *Src1Lo, Variable *DestLo, Variable *DestHi); | 782 Operand *Src1Lo, Variable *DestLo, Variable *DestHi); |
769 | 783 |
770 /// Emit the code for a combined operation and consumer instruction, or set | 784 /// Emit the code for a combined operation and consumer instruction, or set |
771 /// the destination variable of the operation if Consumer == nullptr. | 785 /// the destination variable of the operation if Consumer == nullptr. |
772 void lowerIcmpAndConsumer(const InstIcmp *Icmp, const Inst *Consumer); | 786 void lowerIcmpAndConsumer(const InstIcmp *Icmp, const Inst *Consumer); |
773 void lowerFcmpAndConsumer(const InstFcmp *Fcmp, const Inst *Consumer); | 787 void lowerFcmpAndConsumer(const InstFcmp *Fcmp, const Inst *Consumer); |
774 void lowerArithAndConsumer(const InstArithmetic *Arith, const Inst *Consumer); | 788 void lowerArithAndConsumer(const InstArithmetic *Arith, const Inst *Consumer); |
775 | 789 |
776 /// Emit a setcc instruction if Consumer == nullptr; otherwise emit a | 790 /// Emit a setcc instruction if Consumer == nullptr; otherwise emit a |
777 /// specialized version of Consumer. | 791 /// specialized version of Consumer. |
778 void setccOrConsumer(typename Traits::Cond::BrCond Condition, Variable *Dest, | 792 void setccOrConsumer(BrCond Condition, Variable *Dest, const Inst *Consumer); |
779 const Inst *Consumer); | |
780 | 793 |
781 /// Emit a mov [1|0] instruction if Consumer == nullptr; otherwise emit a | 794 /// Emit a mov [1|0] instruction if Consumer == nullptr; otherwise emit a |
782 /// specialized version of Consumer. | 795 /// specialized version of Consumer. |
783 void movOrConsumer(bool IcmpResult, Variable *Dest, const Inst *Consumer); | 796 void movOrConsumer(bool IcmpResult, Variable *Dest, const Inst *Consumer); |
784 | 797 |
785 /// Emit the code for instructions with a vector type. | 798 /// Emit the code for instructions with a vector type. |
786 void lowerIcmpVector(const InstIcmp *Icmp); | 799 void lowerIcmpVector(const InstIcmp *Icmp); |
787 void lowerFcmpVector(const InstFcmp *Icmp); | 800 void lowerFcmpVector(const InstFcmp *Icmp); |
788 void lowerSelectVector(const InstSelect *Inst); | 801 void lowerSelectVector(const InstSelect *Inst); |
789 | 802 |
790 /// Helpers for select lowering. | 803 /// Helpers for select lowering. |
791 void lowerSelectMove(Variable *Dest, typename Traits::Cond::BrCond Cond, | 804 void lowerSelectMove(Variable *Dest, BrCond Cond, Operand *SrcT, |
792 Operand *SrcT, Operand *SrcF); | 805 Operand *SrcF); |
793 void lowerSelectIntMove(Variable *Dest, typename Traits::Cond::BrCond Cond, | 806 void lowerSelectIntMove(Variable *Dest, BrCond Cond, Operand *SrcT, |
794 Operand *SrcT, Operand *SrcF); | 807 Operand *SrcF); |
795 /// Generic helper to move an arbitrary type from Src to Dest. | 808 /// Generic helper to move an arbitrary type from Src to Dest. |
796 void lowerMove(Variable *Dest, Operand *Src, bool IsRedefinition); | 809 void lowerMove(Variable *Dest, Operand *Src, bool IsRedefinition); |
797 | 810 |
798 /// Optimizations for idiom recognition. | 811 /// Optimizations for idiom recognition. |
799 bool lowerOptimizeFcmpSelect(const InstFcmp *Fcmp, const InstSelect *Select); | 812 bool lowerOptimizeFcmpSelect(const InstFcmp *Fcmp, const InstSelect *Select); |
800 | 813 |
801 /// Complains loudly if invoked because the cpu can handle 64-bit types | 814 /// Complains loudly if invoked because the cpu can handle 64-bit types |
802 /// natively. | 815 /// natively. |
803 template <typename T = Traits> | 816 template <typename T = Traits> |
804 typename std::enable_if<T::Is64Bit, void>::type lowerIcmp64(const InstIcmp *, | 817 typename std::enable_if<T::Is64Bit, void>::type lowerIcmp64(const InstIcmp *, |
805 const Inst *) { | 818 const Inst *) { |
806 llvm::report_fatal_error( | 819 llvm::report_fatal_error( |
807 "Hey, yo! This is x86-64. Watcha doin'? (lowerIcmp64)"); | 820 "Hey, yo! This is x86-64. Watcha doin'? (lowerIcmp64)"); |
808 } | 821 } |
809 /// x86lowerIcmp64 handles 64-bit icmp lowering. | 822 /// x86lowerIcmp64 handles 64-bit icmp lowering. |
810 template <typename T = Traits> | 823 template <typename T = Traits> |
811 typename std::enable_if<!T::Is64Bit, void>::type | 824 typename std::enable_if<!T::Is64Bit, void>::type |
812 lowerIcmp64(const InstIcmp *Icmp, const Inst *Consumer); | 825 lowerIcmp64(const InstIcmp *Icmp, const Inst *Consumer); |
813 | 826 |
814 BoolFolding FoldingInfo; | 827 BoolFolding FoldingInfo; |
815 }; | 828 }; |
816 } // end of namespace X86Internal | 829 } // end of namespace X86NAMESPACE |
817 } // end of namespace Ice | 830 } // end of namespace Ice |
818 | 831 |
819 #include "IceTargetLoweringX86BaseImpl.h" | 832 #include "IceTargetLoweringX86BaseImpl.h" |
820 | 833 |
821 #endif // SUBZERO_SRC_ICETARGETLOWERINGX86BASE_H | 834 #endif // SUBZERO_SRC_ICETARGETLOWERINGX86BASE_H |
OLD | NEW |