OLD | NEW |
1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===// | 1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===// |
2 // | 2 // |
3 // The LLVM Compiler Infrastructure | 3 // The LLVM Compiler Infrastructure |
4 // | 4 // |
5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
7 // | 7 // |
8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
9 // | 9 // |
10 // This file contains the X86 implementation of the TargetRegisterInfo class. | 10 // This file contains the X86 implementation of the TargetRegisterInfo class. |
11 // This file is responsible for the frame pointer elimination optimization | 11 // This file is responsible for the frame pointer elimination optimization |
12 // on X86. | 12 // on X86. |
13 // | 13 // |
14 //===----------------------------------------------------------------------===// | 14 //===----------------------------------------------------------------------===// |
15 | 15 |
16 #include "X86RegisterInfo.h" | 16 #include "X86RegisterInfo.h" |
17 #include "X86InstrBuilder.h" | 17 #include "X86InstrBuilder.h" |
18 #include "X86MachineFunctionInfo.h" | 18 #include "X86MachineFunctionInfo.h" |
| 19 #include "X86NaClDecls.h" // @LOCALMOD |
19 #include "X86Subtarget.h" | 20 #include "X86Subtarget.h" |
20 #include "X86TargetMachine.h" | 21 #include "X86TargetMachine.h" |
21 #include "llvm/ADT/BitVector.h" | 22 #include "llvm/ADT/BitVector.h" |
22 #include "llvm/ADT/STLExtras.h" | 23 #include "llvm/ADT/STLExtras.h" |
23 #include "llvm/CodeGen/MachineFrameInfo.h" | 24 #include "llvm/CodeGen/MachineFrameInfo.h" |
24 #include "llvm/CodeGen/MachineFunction.h" | 25 #include "llvm/CodeGen/MachineFunction.h" |
25 #include "llvm/CodeGen/MachineFunctionPass.h" | 26 #include "llvm/CodeGen/MachineFunctionPass.h" |
26 #include "llvm/CodeGen/MachineInstrBuilder.h" | 27 #include "llvm/CodeGen/MachineInstrBuilder.h" |
27 #include "llvm/CodeGen/MachineModuleInfo.h" | 28 #include "llvm/CodeGen/MachineModuleInfo.h" |
28 #include "llvm/CodeGen/MachineRegisterInfo.h" | 29 #include "llvm/CodeGen/MachineRegisterInfo.h" |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
156 } while (Super); | 157 } while (Super); |
157 return RC; | 158 return RC; |
158 } | 159 } |
159 | 160 |
160 const TargetRegisterClass * | 161 const TargetRegisterClass * |
161 X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, | 162 X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, |
162 unsigned Kind) const { | 163 unsigned Kind) const { |
163 switch (Kind) { | 164 switch (Kind) { |
164 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); | 165 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!"); |
165 case 0: // Normal GPRs. | 166 case 0: // Normal GPRs. |
166 if (Subtarget.isTarget64BitLP64()) | 167 // @LOCALMOD -- NaCl is ILP32, but 32-bit pointers become 64-bit |
| 168 // after sandboxing (clobbers a full 64-bit reg). |
| 169 if (Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64()) |
167 return &X86::GR64RegClass; | 170 return &X86::GR64RegClass; |
168 return &X86::GR32RegClass; | 171 return &X86::GR32RegClass; |
169 case 1: // Normal GPRs except the stack pointer (for encoding reasons). | 172 case 1: // Normal GPRs except the stack pointer (for encoding reasons). |
170 if (Subtarget.isTarget64BitLP64()) | 173 // @LOCALMOD -- NaCl is ILP32, but 32-bit pointers become 64-bit |
| 174 // after sandboxing (clobbers a full 64-bit reg). |
| 175 if (Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64()) |
171 return &X86::GR64_NOSPRegClass; | 176 return &X86::GR64_NOSPRegClass; |
172 return &X86::GR32_NOSPRegClass; | 177 return &X86::GR32_NOSPRegClass; |
173 case 2: // Available for tailcall (not callee-saved GPRs). | 178 case 2: // Available for tailcall (not callee-saved GPRs). |
174 if (Subtarget.isTargetWin64()) | 179 if (Subtarget.isTargetWin64()) |
175 return &X86::GR64_TCW64RegClass; | 180 return &X86::GR64_TCW64RegClass; |
176 else if (Subtarget.is64Bit()) | 181 else if (Subtarget.is64Bit()) |
177 return &X86::GR64_TCRegClass; | 182 return &X86::GR64_TCRegClass; |
178 | 183 |
179 const Function *F = MF.getFunction(); | 184 const Function *F = MF.getFunction(); |
180 bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false); | 185 bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false); |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
250 } | 255 } |
251 case CallingConv::Cold: | 256 case CallingConv::Cold: |
252 if (Is64Bit) | 257 if (Is64Bit) |
253 return CSR_64_MostRegs_SaveList; | 258 return CSR_64_MostRegs_SaveList; |
254 break; | 259 break; |
255 default: | 260 default: |
256 break; | 261 break; |
257 } | 262 } |
258 | 263 |
259 bool CallsEHReturn = MF->getMMI().callsEHReturn(); | 264 bool CallsEHReturn = MF->getMMI().callsEHReturn(); |
| 265 bool IsNaCl = Subtarget.isTargetNaCl(); // @LOCALMOD |
260 if (Is64Bit) { | 266 if (Is64Bit) { |
261 if (IsWin64) | 267 if (IsWin64) |
262 return CSR_Win64_SaveList; | 268 return CSR_Win64_SaveList; |
| 269 // @LOCALMOD-BEGIN |
| 270 if (IsNaCl) { |
| 271 if (CallsEHReturn) |
| 272 return CSR_NaCl64EHRet_SaveList; |
| 273 return CSR_NaCl64_SaveList; |
| 274 } |
| 275 // @LOCALMOD-END |
263 if (CallsEHReturn) | 276 if (CallsEHReturn) |
264 return CSR_64EHRet_SaveList; | 277 return CSR_64EHRet_SaveList; |
265 return CSR_64_SaveList; | 278 return CSR_64_SaveList; |
266 } | 279 } |
267 if (CallsEHReturn) | 280 if (CallsEHReturn) |
268 return CSR_32EHRet_SaveList; | 281 return CSR_32EHRet_SaveList; |
269 return CSR_32_SaveList; | 282 return CSR_32_SaveList; |
270 } | 283 } |
271 | 284 |
272 const uint32_t* | 285 const uint32_t* |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
390 Reserved.set(*AI); | 403 Reserved.set(*AI); |
391 } | 404 } |
392 } | 405 } |
393 if (!Is64Bit || !Subtarget.hasAVX512()) { | 406 if (!Is64Bit || !Subtarget.hasAVX512()) { |
394 for (unsigned n = 16; n != 32; ++n) { | 407 for (unsigned n = 16; n != 32; ++n) { |
395 for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI) | 408 for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI) |
396 Reserved.set(*AI); | 409 Reserved.set(*AI); |
397 } | 410 } |
398 } | 411 } |
399 | 412 |
| 413 // @LOCALMOD-START |
| 414 const X86Subtarget& Subtarget = MF.getTarget().getSubtarget<X86Subtarget>(); |
| 415 const bool RestrictR15 = FlagRestrictR15; |
| 416 assert(FlagUseZeroBasedSandbox || RestrictR15); |
| 417 if (Subtarget.isTargetNaCl64()) { |
| 418 if (RestrictR15) { |
| 419 Reserved.set(X86::R15); |
| 420 Reserved.set(X86::R15D); |
| 421 Reserved.set(X86::R15W); |
| 422 Reserved.set(X86::R15B); |
| 423 } |
| 424 Reserved.set(X86::RBP); |
| 425 Reserved.set(X86::EBP); |
| 426 Reserved.set(X86::BP); |
| 427 Reserved.set(X86::BPL); |
| 428 const bool RestrictR11 = FlagHideSandboxBase && !FlagUseZeroBasedSandbox; |
| 429 if (RestrictR11) { |
| 430 // Restrict r11 so that it can be used for indirect jump |
| 431 // sequences that don't leak the sandbox base address onto the |
| 432 // stack. |
| 433 Reserved.set(X86::R11); |
| 434 Reserved.set(X86::R11D); |
| 435 Reserved.set(X86::R11W); |
| 436 Reserved.set(X86::R11B); |
| 437 } |
| 438 } |
| 439 // @LOCALMOD-END |
| 440 |
400 return Reserved; | 441 return Reserved; |
401 } | 442 } |
402 | 443 |
403 //===----------------------------------------------------------------------===// | 444 //===----------------------------------------------------------------------===// |
404 // Stack Frame Processing methods | 445 // Stack Frame Processing methods |
405 //===----------------------------------------------------------------------===// | 446 //===----------------------------------------------------------------------===// |
406 | 447 |
407 bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const { | 448 bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const { |
408 const MachineFrameInfo *MFI = MF.getFrameInfo(); | 449 const MachineFrameInfo *MFI = MF.getFrameInfo(); |
409 | 450 |
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
528 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset(); | 569 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset(); |
529 MI.getOperand(FIOperandNum + 3).setOffset(Offset); | 570 MI.getOperand(FIOperandNum + 3).setOffset(Offset); |
530 } | 571 } |
531 } | 572 } |
532 | 573 |
533 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { | 574 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { |
534 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); | 575 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); |
535 return TFI->hasFP(MF) ? FramePtr : StackPtr; | 576 return TFI->hasFP(MF) ? FramePtr : StackPtr; |
536 } | 577 } |
537 | 578 |
| 579 unsigned X86RegisterInfo::getPtrSizedFrameRegister( |
| 580 const MachineFunction &MF) const { |
| 581 unsigned FrameReg = getFrameRegister(MF); |
| 582 if (Subtarget.isTarget64BitILP32()) |
| 583 FrameReg = getX86SubSuperRegister(FrameReg, MVT::i32, false); |
| 584 return FrameReg; |
| 585 } |
| 586 |
538 namespace llvm { | 587 namespace llvm { |
539 unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT, | 588 unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT, |
540 bool High) { | 589 bool High) { |
541 switch (VT) { | 590 switch (VT) { |
542 default: llvm_unreachable("Unexpected VT"); | 591 default: llvm_unreachable("Unexpected VT"); |
543 case MVT::i8: | 592 case MVT::i8: |
544 if (High) { | 593 if (High) { |
545 switch (Reg) { | 594 switch (Reg) { |
546 default: return getX86SubSuperRegister(Reg, MVT::i64); | 595 default: return getX86SubSuperRegister(Reg, MVT::i64); |
547 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: | 596 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI: |
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
662 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: | 711 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: |
663 return X86::R11D; | 712 return X86::R11D; |
664 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: | 713 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: |
665 return X86::R12D; | 714 return X86::R12D; |
666 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: | 715 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: |
667 return X86::R13D; | 716 return X86::R13D; |
668 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: | 717 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: |
669 return X86::R14D; | 718 return X86::R14D; |
670 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: | 719 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: |
671 return X86::R15D; | 720 return X86::R15D; |
| 721 // @LOCALMOD. TODO: possibly revert this after LEA .td fixes |
| 722 case X86::EIP: case X86::RIP: |
| 723 return X86::EIP; |
672 } | 724 } |
673 case MVT::i64: | 725 case MVT::i64: |
674 switch (Reg) { | 726 switch (Reg) { |
675 default: llvm_unreachable("Unexpected register"); | 727 default: llvm_unreachable("Unexpected register"); |
676 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: | 728 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: |
677 return X86::RAX; | 729 return X86::RAX; |
678 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: | 730 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: |
679 return X86::RDX; | 731 return X86::RDX; |
680 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: | 732 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX: |
681 return X86::RCX; | 733 return X86::RCX; |
(...skipping 16 matching lines...) Expand all Loading... |
698 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: | 750 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11: |
699 return X86::R11; | 751 return X86::R11; |
700 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: | 752 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12: |
701 return X86::R12; | 753 return X86::R12; |
702 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: | 754 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13: |
703 return X86::R13; | 755 return X86::R13; |
704 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: | 756 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14: |
705 return X86::R14; | 757 return X86::R14; |
706 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: | 758 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: |
707 return X86::R15; | 759 return X86::R15; |
| 760 // @LOCALMOD. TODO: possibly revert this after LEA .td fixes |
| 761 case X86::EIP: case X86::RIP: |
| 762 return X86::RIP; |
708 } | 763 } |
709 } | 764 } |
710 } | 765 } |
711 | 766 |
712 unsigned get512BitSuperRegister(unsigned Reg) { | 767 unsigned get512BitSuperRegister(unsigned Reg) { |
713 if (Reg >= X86::XMM0 && Reg <= X86::XMM31) | 768 if (Reg >= X86::XMM0 && Reg <= X86::XMM31) |
714 return X86::ZMM0 + (Reg - X86::XMM0); | 769 return X86::ZMM0 + (Reg - X86::XMM0); |
715 if (Reg >= X86::YMM0 && Reg <= X86::YMM31) | 770 if (Reg >= X86::YMM0 && Reg <= X86::YMM31) |
716 return X86::ZMM0 + (Reg - X86::YMM0); | 771 return X86::ZMM0 + (Reg - X86::YMM0); |
717 if (Reg >= X86::ZMM0 && Reg <= X86::ZMM31) | 772 if (Reg >= X86::ZMM0 && Reg <= X86::ZMM31) |
718 return Reg; | 773 return Reg; |
719 llvm_unreachable("Unexpected SIMD register"); | 774 llvm_unreachable("Unexpected SIMD register"); |
720 } | 775 } |
721 | 776 |
722 } | 777 } |
OLD | NEW |