Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(278)

Side by Side Diff: lib/Target/X86/X86NaClRewritePass.cpp

Issue 1137803004: Add support for using MI bundles as bundle-locked groups on x86 (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-llvm.git@master
Patch Set: review Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « lib/Target/X86/X86MCInstLower.cpp ('k') | test/NaCl/X86/dynamic-stack-alloc2.ll » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 //=== X86NaClRewritePAss.cpp - Rewrite instructions for NaCl SFI --*- C++ -*-=// 1 //=== X86NaClRewritePAss.cpp - Rewrite instructions for NaCl SFI --*- C++ -*-=//
2 // 2 //
3 // The LLVM Compiler Infrastructure 3 // The LLVM Compiler Infrastructure
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 // 9 //
10 // This file contains a pass that ensures stores and loads and stack/frame 10 // This file contains a pass that ensures stores and loads and stack/frame
(...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after
253 MI.getOperand(1).setReg(X86::RBP); 253 MI.getOperand(1).setReg(X86::RBP);
254 MI.setDesc(TII->get(X86::MOV64rr)); 254 MI.setDesc(TII->get(X86::MOV64rr));
255 Opc = X86::MOV64rr; 255 Opc = X86::MOV64rr;
256 } 256 }
257 257
258 // "MOV RBP, RSP" is already safe 258 // "MOV RBP, RSP" is already safe
259 if (Opc == X86::MOV64rr && MI.getOperand(1).getReg() == X86::RBP) { 259 if (Opc == X86::MOV64rr && MI.getOperand(1).getReg() == X86::RBP) {
260 return true; 260 return true;
261 } 261 }
262 262
263 // Promote 32-bit lea to 64-bit lea (does this ever happen?)
264 assert(Opc != X86::LEA32r && "Invalid opcode in 64-bit mode!"); 263 assert(Opc != X86::LEA32r && "Invalid opcode in 64-bit mode!");
265 if (Opc == X86::LEA64_32r) { 264 if (Opc == X86::LEA64_32r){
266 unsigned DestReg = MI.getOperand(0).getReg();
267 unsigned BaseReg = MI.getOperand(1).getReg(); 265 unsigned BaseReg = MI.getOperand(1).getReg();
268 unsigned Scale = MI.getOperand(2).getImm(); 266 if (BaseReg == X86::EBP) {
269 unsigned IndexReg = MI.getOperand(3).getReg(); 267 // leal N(%ebp), %esp can be promoted to leaq N(%rbp), %rsp, which
270 assert(DestReg == X86::ESP); 268 // converts to SPAJDi32 below.
271 assert(Scale == 1); 269 unsigned DestReg = MI.getOperand(0).getReg();
272 assert(BaseReg == X86::EBP); 270 unsigned Scale = MI.getOperand(2).getImm();
273 assert(IndexReg == 0); 271 unsigned IndexReg = MI.getOperand(3).getReg();
274 MI.getOperand(0).setReg(X86::RSP); 272 assert(DestReg == X86::ESP);
275 MI.getOperand(1).setReg(X86::RBP); 273 assert(Scale == 1);
276 MI.setDesc(TII->get(X86::LEA64r)); 274 assert(BaseReg == X86::EBP);
277 Opc = X86::LEA64r; 275 assert(IndexReg == 0);
276 MI.getOperand(0).setReg(X86::RSP);
277 MI.getOperand(1).setReg(X86::RBP);
278 MI.setDesc(TII->get(X86::LEA64r));
279 Opc = X86::LEA64r;
280 } else {
281 // Create a MachineInstr bundle (i.e. a bundle-locked group) and fix up
282 // the stack pointer by adding R15. TODO(dschuff): generalize this for
283 // other uses if needed, and try to replace some pseudos if
284 // possible. Eventually replace with auto-sandboxing.
285 auto NextMBBI = MBBI;
286 ++NextMBBI;
287 BuildMI(MBB, NextMBBI, MBBI->getDebugLoc(),
288 TII->get(X86::ADD64rr), X86::RSP)
289 .addReg(X86::RSP).addReg(X86::R15);
290 MIBundleBuilder(MBB, MBBI, NextMBBI);
291 finalizeBundle(MBB, MBBI.getInstrIterator());
292 return true;
293 }
278 } 294 }
279 295
280 if (Opc == X86::LEA64r && MatchesSPAdj(MI)) { 296 if (Opc == X86::LEA64r && MatchesSPAdj(MI)) {
281 const MachineOperand &Offset = MI.getOperand(4); 297 const MachineOperand &Offset = MI.getOperand(4);
282 BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_SPADJi32)) 298 BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_SPADJi32))
283 .addImm(Offset.getImm()) 299 .addImm(Offset.getImm())
284 .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15); 300 .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15);
285 MI.eraseFromParent(); 301 MI.eraseFromParent();
286 return true; 302 return true;
287 } 303 }
(...skipping 455 matching lines...) Expand 10 before | Expand all | Expand 10 after
743 } 759 }
744 return Modified; 760 return Modified;
745 } 761 }
746 762
747 /// createX86NaClRewritePassPass - returns an instance of the pass. 763 /// createX86NaClRewritePassPass - returns an instance of the pass.
748 namespace llvm { 764 namespace llvm {
749 FunctionPass* createX86NaClRewritePass() { 765 FunctionPass* createX86NaClRewritePass() {
750 return new X86NaClRewritePass(); 766 return new X86NaClRewritePass();
751 } 767 }
752 } 768 }
OLDNEW
« no previous file with comments | « lib/Target/X86/X86MCInstLower.cpp ('k') | test/NaCl/X86/dynamic-stack-alloc2.ll » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698