OLD | NEW |
---|---|
1 //=== X86NaClRewritePAss.cpp - Rewrite instructions for NaCl SFI --*- C++ -*-=// | 1 //=== X86NaClRewritePAss.cpp - Rewrite instructions for NaCl SFI --*- C++ -*-=// |
2 // | 2 // |
3 // The LLVM Compiler Infrastructure | 3 // The LLVM Compiler Infrastructure |
4 // | 4 // |
5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
7 // | 7 // |
8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
9 // | 9 // |
10 // This file contains a pass that ensures stores and loads and stack/frame | 10 // This file contains a pass that ensures stores and loads and stack/frame |
(...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
253 MI.getOperand(1).setReg(X86::RBP); | 253 MI.getOperand(1).setReg(X86::RBP); |
254 MI.setDesc(TII->get(X86::MOV64rr)); | 254 MI.setDesc(TII->get(X86::MOV64rr)); |
255 Opc = X86::MOV64rr; | 255 Opc = X86::MOV64rr; |
256 } | 256 } |
257 | 257 |
258 // "MOV RBP, RSP" is already safe | 258 // "MOV RBP, RSP" is already safe |
259 if (Opc == X86::MOV64rr && MI.getOperand(1).getReg() == X86::RBP) { | 259 if (Opc == X86::MOV64rr && MI.getOperand(1).getReg() == X86::RBP) { |
260 return true; | 260 return true; |
261 } | 261 } |
262 | 262 |
263 // Promote 32-bit lea to 64-bit lea (does this ever happen?) | |
264 assert(Opc != X86::LEA32r && "Invalid opcode in 64-bit mode!"); | 263 assert(Opc != X86::LEA32r && "Invalid opcode in 64-bit mode!"); |
265 if (Opc == X86::LEA64_32r) { | 264 if (Opc == X86::LEA64_32r){ |
265 unsigned BaseReg = MI.getOperand(1).getReg(); | |
266 if (BaseReg != X86::EBP) { | |
jvoung (off chromium)
2015/05/12 23:53:05
Add a comment about why EBP is a special case?
Ha
Derek Schuff
2015/05/13 00:44:50
Done.
| |
267 // Create a MachineInstr bundle (i.e. a bundle-locked group) and fix up | |
268 // the stack pointer by adding R15. TODO(dschuff): generalize this for | |
269 // other uses if needed, and try to replace some pseudos if | |
270 // possible. Eventually replace with auto-sandboxing. | |
271 auto NextMBBI = MBBI; | |
272 ++NextMBBI; | |
273 BuildMI(MBB, NextMBBI, MBBI->getDebugLoc(), | |
274 TII->get(X86::ADD64rr), X86::RSP).addReg(X86::RSP).addReg(X86::R15 ); | |
jvoung (off chromium)
2015/05/12 23:53:04
80 col
Derek Schuff
2015/05/13 00:44:50
Done.
| |
275 MIBundleBuilder(MBB, MBBI, NextMBBI); | |
276 finalizeBundle(MBB, MBBI.getInstrIterator()); | |
277 return true; | |
278 } | |
279 | |
280 // Promote 32-bit lea to 64-bit lea (does this ever happen?) | |
266 unsigned DestReg = MI.getOperand(0).getReg(); | 281 unsigned DestReg = MI.getOperand(0).getReg(); |
267 unsigned BaseReg = MI.getOperand(1).getReg(); | |
268 unsigned Scale = MI.getOperand(2).getImm(); | 282 unsigned Scale = MI.getOperand(2).getImm(); |
269 unsigned IndexReg = MI.getOperand(3).getReg(); | 283 unsigned IndexReg = MI.getOperand(3).getReg(); |
270 assert(DestReg == X86::ESP); | 284 assert(DestReg == X86::ESP); |
271 assert(Scale == 1); | 285 assert(Scale == 1); |
272 assert(BaseReg == X86::EBP); | 286 assert(BaseReg == X86::EBP); |
273 assert(IndexReg == 0); | 287 assert(IndexReg == 0); |
274 MI.getOperand(0).setReg(X86::RSP); | 288 MI.getOperand(0).setReg(X86::RSP); |
275 MI.getOperand(1).setReg(X86::RBP); | 289 MI.getOperand(1).setReg(X86::RBP); |
276 MI.setDesc(TII->get(X86::LEA64r)); | 290 MI.setDesc(TII->get(X86::LEA64r)); |
277 Opc = X86::LEA64r; | 291 Opc = X86::LEA64r; |
(...skipping 465 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
743 } | 757 } |
744 return Modified; | 758 return Modified; |
745 } | 759 } |
746 | 760 |
747 /// createX86NaClRewritePassPass - returns an instance of the pass. | 761 /// createX86NaClRewritePassPass - returns an instance of the pass. |
748 namespace llvm { | 762 namespace llvm { |
749 FunctionPass* createX86NaClRewritePass() { | 763 FunctionPass* createX86NaClRewritePass() { |
750 return new X86NaClRewritePass(); | 764 return new X86NaClRewritePass(); |
751 } | 765 } |
752 } | 766 } |
OLD | NEW |