OLD | NEW |
(Empty) | |
| 1 //=== X86NaClRewritePAss.cpp - Rewrite instructions for NaCl SFI --*- C++ -*-=// |
| 2 // |
| 3 // The LLVM Compiler Infrastructure |
| 4 // |
| 5 // This file is distributed under the University of Illinois Open Source |
| 6 // License. See LICENSE.TXT for details. |
| 7 // |
| 8 //===----------------------------------------------------------------------===// |
| 9 // |
| 10 // This file contains a pass that ensures stores and loads and stack/frame |
| 11 // pointer addresses are within the NaCl sandbox (for x86-64). |
| 12 // It also ensures that indirect control flow follows NaCl requirments. |
| 13 // |
| 14 // The other major portion of rewriting for NaCl is done in X86InstrNaCl.cpp, |
| 15 // which is responsible for expanding the NaCl-specific operations introduced |
| 16 // here and also the intrinsic functions to support setjmp, etc. |
| 17 //===----------------------------------------------------------------------===// |
| 18 #define DEBUG_TYPE "x86-sandboxing" |
| 19 |
| 20 #include "X86.h" |
| 21 #include "X86InstrInfo.h" |
| 22 #include "X86NaClDecls.h" |
| 23 #include "X86Subtarget.h" |
| 24 #include "llvm/CodeGen/MachineFunctionPass.h" |
| 25 #include "llvm/CodeGen/MachineInstr.h" |
| 26 #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 27 #include "llvm/CodeGen/MachineJumpTableInfo.h" |
| 28 #include "llvm/Support/CommandLine.h" |
| 29 #include "llvm/Support/Debug.h" |
| 30 #include "llvm/Support/FormattedStream.h" |
| 31 #include "llvm/Support/raw_ostream.h" |
| 32 #include "llvm/Support/CommandLine.h" |
| 33 |
| 34 using namespace llvm; |
| 35 |
| 36 cl::opt<bool> FlagRestrictR15("sfi-restrict-r15", |
| 37 cl::desc("Restrict use of %r15. This flag can" |
| 38 " be turned off for the zero-based" |
| 39 " sandbox model."), |
| 40 cl::init(true)); |
| 41 |
| 42 namespace { |
| 43 class X86NaClRewritePass : public MachineFunctionPass { |
| 44 public: |
| 45 static char ID; |
| 46 X86NaClRewritePass() : MachineFunctionPass(ID) {} |
| 47 |
| 48 virtual bool runOnMachineFunction(MachineFunction &Fn); |
| 49 |
| 50 virtual const char *getPassName() const { |
| 51 return "NaCl Rewrites"; |
| 52 } |
| 53 |
| 54 private: |
| 55 |
| 56 const TargetMachine *TM; |
| 57 const TargetInstrInfo *TII; |
| 58 const TargetRegisterInfo *TRI; |
| 59 const X86Subtarget *Subtarget; |
| 60 bool Is64Bit; |
| 61 |
| 62 bool runOnMachineBasicBlock(MachineBasicBlock &MBB); |
| 63 |
| 64 void TraceLog(const char *func, |
| 65 const MachineBasicBlock &MBB, |
| 66 const MachineBasicBlock::iterator MBBI) const; |
| 67 |
| 68 bool ApplyRewrites(MachineBasicBlock &MBB, |
| 69 MachineBasicBlock::iterator MBBI); |
| 70 bool ApplyStackSFI(MachineBasicBlock &MBB, |
| 71 MachineBasicBlock::iterator MBBI); |
| 72 |
| 73 bool ApplyMemorySFI(MachineBasicBlock &MBB, |
| 74 MachineBasicBlock::iterator MBBI); |
| 75 |
| 76 bool ApplyFrameSFI(MachineBasicBlock &MBB, |
| 77 MachineBasicBlock::iterator MBBI); |
| 78 |
| 79 bool ApplyControlSFI(MachineBasicBlock &MBB, |
| 80 MachineBasicBlock::iterator MBBI); |
| 81 |
| 82 bool AlignJumpTableTargets(MachineFunction &MF); |
| 83 }; |
| 84 |
| 85 char X86NaClRewritePass::ID = 0; |
| 86 |
| 87 } |
| 88 |
| 89 static void DumpInstructionVerbose(const MachineInstr &MI) { |
| 90 DEBUG({ |
| 91 dbgs() << MI; |
| 92 dbgs() << MI.getNumOperands() << " operands:" << "\n"; |
| 93 for (unsigned i = 0; i < MI.getNumOperands(); ++i) { |
| 94 const MachineOperand& op = MI.getOperand(i); |
| 95 dbgs() << " " << i << "(" << (unsigned)op.getType() << "):" << op |
| 96 << "\n"; |
| 97 } |
| 98 dbgs() << "\n"; |
| 99 }); |
| 100 } |
| 101 |
| 102 static bool IsPushPop(MachineInstr &MI) { |
| 103 const unsigned Opcode = MI.getOpcode(); |
| 104 switch (Opcode) { |
| 105 default: |
| 106 return false; |
| 107 case X86::PUSH64r: |
| 108 case X86::POP64r: |
| 109 return true; |
| 110 } |
| 111 } |
| 112 |
| 113 static bool IsStore(MachineInstr &MI) { |
| 114 return MI.mayStore(); |
| 115 } |
| 116 |
| 117 static bool IsLoad(MachineInstr &MI) { |
| 118 return MI.mayLoad(); |
| 119 } |
| 120 |
| 121 static bool IsFrameChange(MachineInstr &MI, const TargetRegisterInfo *TRI) { |
| 122 return MI.modifiesRegister(X86::EBP, TRI); |
| 123 } |
| 124 |
| 125 static bool IsStackChange(MachineInstr &MI, const TargetRegisterInfo *TRI) { |
| 126 return MI.modifiesRegister(X86::ESP, TRI); |
| 127 } |
| 128 |
| 129 static bool HasControlFlow(const MachineInstr &MI) { |
| 130 return MI.getDesc().isBranch() || |
| 131 MI.getDesc().isCall() || |
| 132 MI.getDesc().isReturn() || |
| 133 MI.getDesc().isTerminator() || |
| 134 MI.getDesc().isBarrier(); |
| 135 } |
| 136 |
| 137 static bool IsDirectBranch(const MachineInstr &MI) { |
| 138 return MI.getDesc().isBranch() && |
| 139 !MI.getDesc().isIndirectBranch(); |
| 140 } |
| 141 |
| 142 static bool IsRegAbsolute(unsigned Reg) { |
| 143 const bool RestrictR15 = FlagRestrictR15; |
| 144 assert(FlagUseZeroBasedSandbox || RestrictR15); |
| 145 return (Reg == X86::RSP || Reg == X86::RBP || |
| 146 (Reg == X86::R15 && RestrictR15)); |
| 147 } |
| 148 |
| 149 static bool FindMemoryOperand(const MachineInstr &MI, |
| 150 SmallVectorImpl<unsigned>* indices) { |
| 151 int NumFound = 0; |
| 152 for (unsigned i = 0; i < MI.getNumOperands(); ) { |
| 153 if (isMem(&MI, i)) { |
| 154 NumFound++; |
| 155 indices->push_back(i); |
| 156 i += X86::AddrNumOperands; |
| 157 } else { |
| 158 i++; |
| 159 } |
| 160 } |
| 161 |
| 162 // Intrinsics and other functions can have mayLoad and mayStore to reflect |
| 163 // the side effects of those functions. This function is used to find |
| 164 // explicit memory references in the instruction, of which there are none. |
| 165 if (NumFound == 0) |
| 166 return false; |
| 167 |
| 168 return true; |
| 169 } |
| 170 |
| 171 static unsigned PromoteRegTo64(unsigned RegIn) { |
| 172 if (RegIn == 0) |
| 173 return 0; |
| 174 unsigned RegOut = getX86SubSuperRegister(RegIn, MVT::i64, false); |
| 175 assert(RegOut != 0); |
| 176 return RegOut; |
| 177 } |
| 178 |
| 179 static unsigned DemoteRegTo32(unsigned RegIn) { |
| 180 if (RegIn == 0) |
| 181 return 0; |
| 182 unsigned RegOut = getX86SubSuperRegister(RegIn, MVT::i32, false); |
| 183 assert(RegOut != 0); |
| 184 return RegOut; |
| 185 } |
| 186 |
| 187 |
| 188 // |
| 189 // True if this MI restores RSP from RBP with a slight adjustment offset. |
| 190 // |
| 191 static bool MatchesSPAdj(const MachineInstr &MI) { |
| 192 assert (MI.getOpcode() == X86::LEA64r && "Call to MatchesSPAdj w/ non LEA"); |
| 193 const MachineOperand &DestReg = MI.getOperand(0); |
| 194 const MachineOperand &BaseReg = MI.getOperand(1); |
| 195 const MachineOperand &Scale = MI.getOperand(2); |
| 196 const MachineOperand &IndexReg = MI.getOperand(3); |
| 197 const MachineOperand &Offset = MI.getOperand(4); |
| 198 return (DestReg.isReg() && DestReg.getReg() == X86::RSP && |
| 199 BaseReg.isReg() && BaseReg.getReg() == X86::RBP && |
| 200 Scale.getImm() == 1 && |
| 201 IndexReg.isReg() && IndexReg.getReg() == 0 && |
| 202 Offset.isImm()); |
| 203 } |
| 204 |
| 205 void |
| 206 X86NaClRewritePass::TraceLog(const char *func, |
| 207 const MachineBasicBlock &MBB, |
| 208 const MachineBasicBlock::iterator MBBI) const { |
| 209 DEBUG(dbgs() << "@" << func |
| 210 << "(" << MBB.getName() << ", " << (*MBBI) << ")\n"); |
| 211 } |
| 212 |
| 213 bool X86NaClRewritePass::ApplyStackSFI(MachineBasicBlock &MBB, |
| 214 MachineBasicBlock::iterator MBBI) { |
| 215 TraceLog("ApplyStackSFI", MBB, MBBI); |
| 216 assert(Is64Bit); |
| 217 MachineInstr &MI = *MBBI; |
| 218 |
| 219 if (!IsStackChange(MI, TRI)) |
| 220 return false; |
| 221 |
| 222 if (IsPushPop(MI)) |
| 223 return false; |
| 224 |
| 225 if (MI.getDesc().isCall()) |
| 226 return false; |
| 227 |
| 228 unsigned Opc = MI.getOpcode(); |
| 229 DebugLoc DL = MI.getDebugLoc(); |
| 230 unsigned DestReg = MI.getOperand(0).getReg(); |
| 231 assert(DestReg == X86::ESP || DestReg == X86::RSP); |
| 232 |
| 233 unsigned NewOpc = 0; |
| 234 switch (Opc) { |
| 235 case X86::ADD64ri8 : NewOpc = X86::NACL_ASPi8; break; |
| 236 case X86::ADD64ri32: NewOpc = X86::NACL_ASPi32; break; |
| 237 case X86::SUB64ri8 : NewOpc = X86::NACL_SSPi8; break; |
| 238 case X86::SUB64ri32: NewOpc = X86::NACL_SSPi32; break; |
| 239 case X86::AND64ri8 : NewOpc = X86::NACL_ANDSPi8; break; |
| 240 case X86::AND64ri32: NewOpc = X86::NACL_ANDSPi32; break; |
| 241 } |
| 242 if (NewOpc) { |
| 243 BuildMI(MBB, MBBI, DL, TII->get(NewOpc)) |
| 244 .addImm(MI.getOperand(2).getImm()) |
| 245 .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15); |
| 246 MI.eraseFromParent(); |
| 247 return true; |
| 248 } |
| 249 |
| 250 // Promote "MOV ESP, EBP" to a 64-bit move |
| 251 if (Opc == X86::MOV32rr && MI.getOperand(1).getReg() == X86::EBP) { |
| 252 MI.getOperand(0).setReg(X86::RSP); |
| 253 MI.getOperand(1).setReg(X86::RBP); |
| 254 MI.setDesc(TII->get(X86::MOV64rr)); |
| 255 Opc = X86::MOV64rr; |
| 256 } |
| 257 |
| 258 // "MOV RBP, RSP" is already safe |
| 259 if (Opc == X86::MOV64rr && MI.getOperand(1).getReg() == X86::RBP) { |
| 260 return true; |
| 261 } |
| 262 |
| 263 // Promote 32-bit lea to 64-bit lea (does this ever happen?) |
| 264 assert(Opc != X86::LEA32r && "Invalid opcode in 64-bit mode!"); |
| 265 if (Opc == X86::LEA64_32r) { |
| 266 unsigned DestReg = MI.getOperand(0).getReg(); |
| 267 unsigned BaseReg = MI.getOperand(1).getReg(); |
| 268 unsigned Scale = MI.getOperand(2).getImm(); |
| 269 unsigned IndexReg = MI.getOperand(3).getReg(); |
| 270 assert(DestReg == X86::ESP); |
| 271 assert(Scale == 1); |
| 272 assert(BaseReg == X86::EBP); |
| 273 assert(IndexReg == 0); |
| 274 MI.getOperand(0).setReg(X86::RSP); |
| 275 MI.getOperand(1).setReg(X86::RBP); |
| 276 MI.setDesc(TII->get(X86::LEA64r)); |
| 277 Opc = X86::LEA64r; |
| 278 } |
| 279 |
| 280 if (Opc == X86::LEA64r && MatchesSPAdj(MI)) { |
| 281 const MachineOperand &Offset = MI.getOperand(4); |
| 282 BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_SPADJi32)) |
| 283 .addImm(Offset.getImm()) |
| 284 .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15); |
| 285 MI.eraseFromParent(); |
| 286 return true; |
| 287 } |
| 288 |
| 289 if (Opc == X86::MOV32rr || Opc == X86::MOV64rr) { |
| 290 BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_RESTSPr)) |
| 291 .addReg(DemoteRegTo32(MI.getOperand(1).getReg())) |
| 292 .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15); |
| 293 MI.eraseFromParent(); |
| 294 return true; |
| 295 } |
| 296 |
| 297 if (Opc == X86::MOV32rm) { |
| 298 BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_RESTSPm)) |
| 299 .addOperand(MI.getOperand(1)) // Base |
| 300 .addOperand(MI.getOperand(2)) // Scale |
| 301 .addOperand(MI.getOperand(3)) // Index |
| 302 .addOperand(MI.getOperand(4)) // Offset |
| 303 .addOperand(MI.getOperand(5)) // Segment |
| 304 .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15); |
| 305 MI.eraseFromParent(); |
| 306 return true; |
| 307 } |
| 308 |
| 309 DEBUG(DumpInstructionVerbose(MI)); |
| 310 llvm_unreachable("Unhandled Stack SFI"); |
| 311 } |
| 312 |
| 313 bool X86NaClRewritePass::ApplyFrameSFI(MachineBasicBlock &MBB, |
| 314 MachineBasicBlock::iterator MBBI) { |
| 315 TraceLog("ApplyFrameSFI", MBB, MBBI); |
| 316 assert(Is64Bit); |
| 317 MachineInstr &MI = *MBBI; |
| 318 |
| 319 if (!IsFrameChange(MI, TRI)) |
| 320 return false; |
| 321 |
| 322 unsigned Opc = MI.getOpcode(); |
| 323 DebugLoc DL = MI.getDebugLoc(); |
| 324 |
| 325 // Handle moves to RBP |
| 326 if (Opc == X86::MOV64rr) { |
| 327 assert(MI.getOperand(0).getReg() == X86::RBP); |
| 328 unsigned SrcReg = MI.getOperand(1).getReg(); |
| 329 |
| 330 // MOV RBP, RSP is already safe |
| 331 if (SrcReg == X86::RSP) |
| 332 return false; |
| 333 |
| 334 // Rewrite: mov %rbp, %rX |
| 335 // To: naclrestbp %eX, %rZP |
| 336 BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_RESTBPr)) |
| 337 .addReg(DemoteRegTo32(SrcReg)) |
| 338 .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15); // rZP |
| 339 MI.eraseFromParent(); |
| 340 return true; |
| 341 } |
| 342 |
| 343 // Handle memory moves to RBP |
| 344 if (Opc == X86::MOV64rm) { |
| 345 assert(MI.getOperand(0).getReg() == X86::RBP); |
| 346 |
| 347 // Zero-based sandbox model uses address clipping |
| 348 if (FlagUseZeroBasedSandbox) |
| 349 return false; |
| 350 |
| 351 // Rewrite: mov %rbp, (...) |
| 352 // To: naclrestbp (...), %rZP |
| 353 BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_RESTBPm)) |
| 354 .addOperand(MI.getOperand(1)) // Base |
| 355 .addOperand(MI.getOperand(2)) // Scale |
| 356 .addOperand(MI.getOperand(3)) // Index |
| 357 .addOperand(MI.getOperand(4)) // Offset |
| 358 .addOperand(MI.getOperand(5)) // Segment |
| 359 .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15); // rZP |
| 360 MI.eraseFromParent(); |
| 361 return true; |
| 362 } |
| 363 |
| 364 // Popping onto RBP |
| 365 // Rewrite to: |
| 366 // naclrestbp (%rsp), %rZP |
| 367 // naclasp $8, %rZP |
| 368 // |
| 369 // TODO(pdox): Consider rewriting to this instead: |
| 370 // .bundle_lock |
| 371 // pop %rbp |
| 372 // mov %ebp,%ebp |
| 373 // add %rZP, %rbp |
| 374 // .bundle_unlock |
| 375 if (Opc == X86::POP64r) { |
| 376 assert(MI.getOperand(0).getReg() == X86::RBP); |
| 377 |
| 378 BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_RESTBPm)) |
| 379 .addReg(X86::RSP) // Base |
| 380 .addImm(1) // Scale |
| 381 .addReg(0) // Index |
| 382 .addImm(0) // Offset |
| 383 .addReg(0) // Segment |
| 384 .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15); // rZP |
| 385 |
| 386 BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_ASPi8)) |
| 387 .addImm(8) |
| 388 .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15); |
| 389 |
| 390 MI.eraseFromParent(); |
| 391 return true; |
| 392 } |
| 393 |
| 394 DEBUG(DumpInstructionVerbose(MI)); |
| 395 llvm_unreachable("Unhandled Frame SFI"); |
| 396 } |
| 397 |
| 398 bool X86NaClRewritePass::ApplyControlSFI(MachineBasicBlock &MBB, |
| 399 MachineBasicBlock::iterator MBBI) { |
| 400 const bool HideSandboxBase = (FlagHideSandboxBase && |
| 401 Is64Bit && !FlagUseZeroBasedSandbox); |
| 402 TraceLog("ApplyControlSFI", MBB, MBBI); |
| 403 MachineInstr &MI = *MBBI; |
| 404 |
| 405 if (!HasControlFlow(MI)) |
| 406 return false; |
| 407 |
| 408 // Direct branches are OK |
| 409 if (IsDirectBranch(MI)) |
| 410 return false; |
| 411 |
| 412 DebugLoc DL = MI.getDebugLoc(); |
| 413 unsigned Opc = MI.getOpcode(); |
| 414 |
| 415 // Rewrite indirect jump/call instructions |
| 416 unsigned NewOpc = 0; |
| 417 switch (Opc) { |
| 418 // 32-bit |
| 419 case X86::JMP32r : NewOpc = X86::NACL_JMP32r; break; |
| 420 case X86::TAILJMPr : NewOpc = X86::NACL_JMP32r; break; |
| 421 case X86::CALL32r : NewOpc = X86::NACL_CALL32r; break; |
| 422 // 64-bit |
| 423 case X86::NACL_CG_JMP64r : NewOpc = X86::NACL_JMP64r; break; |
| 424 case X86::CALL64r : NewOpc = X86::NACL_CALL64r; break; |
| 425 case X86::TAILJMPr64 : NewOpc = X86::NACL_JMP64r; break; |
| 426 } |
| 427 if (NewOpc) { |
| 428 unsigned TargetReg = MI.getOperand(0).getReg(); |
| 429 if (Is64Bit) { |
| 430 // CALL64r, etc. take a 64-bit register as a target. However, NaCl gas |
| 431 // expects naclcall/nacljmp pseudos to have 32-bit regs as targets |
| 432 // so NACL_CALL64r and NACL_JMP64r stick with that as well. |
| 433 // Demote any 64-bit register to 32-bit to match the expectations. |
| 434 TargetReg = DemoteRegTo32(TargetReg); |
| 435 } |
| 436 MachineInstrBuilder NewMI = |
| 437 BuildMI(MBB, MBBI, DL, TII->get(NewOpc)) |
| 438 .addReg(TargetReg); |
| 439 if (Is64Bit) { |
| 440 NewMI.addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15); |
| 441 } |
| 442 MI.eraseFromParent(); |
| 443 return true; |
| 444 } |
| 445 |
| 446 // EH_RETURN has a single argment which is not actually used directly. |
| 447 // The argument gives the location where to reposition the stack pointer |
| 448 // before returning. EmitPrologue takes care of that repositioning. |
| 449 // So EH_RETURN just ultimately emits a plain "ret". |
| 450 // RETI returns and pops some number of bytes from the stack. |
| 451 if (Opc == X86::RETL || Opc == X86::RETQ || |
| 452 Opc == X86::EH_RETURN || Opc == X86::EH_RETURN64 || |
| 453 Opc == X86::RETIL || Opc == X86::RETIQ) { |
| 454 // To maintain compatibility with nacl-as, for now we don't emit naclret. |
| 455 // MI.setDesc(TII->get(Is64Bit ? X86::NACL_RET64 : X86::NACL_RET32)); |
| 456 // |
| 457 // For NaCl64 returns, follow the convention of using r11 to hold |
| 458 // the target of an indirect jump to avoid potentially leaking the |
| 459 // sandbox base address. |
| 460 unsigned RegTarget; |
| 461 if (Is64Bit) { |
| 462 RegTarget = (HideSandboxBase ? X86::R11 : X86::RCX); |
| 463 BuildMI(MBB, MBBI, DL, TII->get(X86::POP64r), RegTarget); |
| 464 if (Opc == X86::RETIL || Opc == X86::RETIQ) { |
| 465 BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_ASPi32)) |
| 466 .addOperand(MI.getOperand(0)) |
| 467 .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15); |
| 468 } |
| 469 |
| 470 BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_JMP64r)) |
| 471 .addReg(getX86SubSuperRegister(RegTarget, MVT::i32, false)) |
| 472 .addReg(FlagUseZeroBasedSandbox ? 0 : X86::R15); |
| 473 } else { |
| 474 RegTarget = X86::ECX; |
| 475 BuildMI(MBB, MBBI, DL, TII->get(X86::POP32r), RegTarget); |
| 476 if (Opc == X86::RETIL || Opc == X86::RETIQ) { |
| 477 BuildMI(MBB, MBBI, DL, TII->get(X86::ADD32ri), X86::ESP) |
| 478 .addReg(X86::ESP) |
| 479 .addOperand(MI.getOperand(0)); |
| 480 } |
| 481 BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_JMP32r)) |
| 482 .addReg(RegTarget); |
| 483 } |
| 484 MI.eraseFromParent(); |
| 485 return true; |
| 486 } |
| 487 |
| 488 // Traps are OK (but are considered to have control flow |
| 489 // being a terminator like RET). |
| 490 if (Opc == X86::TRAP) |
| 491 return false; |
| 492 |
| 493 DEBUG(DumpInstructionVerbose(MI)); |
| 494 llvm_unreachable("Unhandled Control SFI"); |
| 495 } |
| 496 |
| 497 // |
| 498 // Sandboxes loads and stores (64-bit only) |
| 499 // |
| 500 bool X86NaClRewritePass::ApplyMemorySFI(MachineBasicBlock &MBB, |
| 501 MachineBasicBlock::iterator MBBI) { |
| 502 TraceLog("ApplyMemorySFI", MBB, MBBI); |
| 503 assert(Is64Bit); |
| 504 MachineInstr &MI = *MBBI; |
| 505 |
| 506 if (!IsLoad(MI) && !IsStore(MI)) |
| 507 return false; |
| 508 |
| 509 if (IsPushPop(MI)) |
| 510 return false; |
| 511 |
| 512 SmallVector<unsigned, 2> MemOps; |
| 513 if (!FindMemoryOperand(MI, &MemOps)) |
| 514 return false; |
| 515 bool Modified = false; |
| 516 for (unsigned MemOp : MemOps) { |
| 517 MachineOperand &BaseReg = MI.getOperand(MemOp + 0); |
| 518 MachineOperand &Scale = MI.getOperand(MemOp + 1); |
| 519 MachineOperand &IndexReg = MI.getOperand(MemOp + 2); |
| 520 //MachineOperand &Disp = MI.getOperand(MemOp + 3); |
| 521 MachineOperand &SegmentReg = MI.getOperand(MemOp + 4); |
| 522 |
| 523 // RIP-relative addressing is safe. |
| 524 if (BaseReg.getReg() == X86::RIP) |
| 525 continue; |
| 526 |
| 527 // Make sure the base and index are 64-bit registers. |
| 528 IndexReg.setReg(PromoteRegTo64(IndexReg.getReg())); |
| 529 BaseReg.setReg(PromoteRegTo64(BaseReg.getReg())); |
| 530 assert(IndexReg.getSubReg() == 0); |
| 531 assert(BaseReg.getSubReg() == 0); |
| 532 |
| 533 bool AbsoluteBase = IsRegAbsolute(BaseReg.getReg()); |
| 534 bool AbsoluteIndex = IsRegAbsolute(IndexReg.getReg()); |
| 535 unsigned AddrReg = 0; |
| 536 |
| 537 if (AbsoluteBase && AbsoluteIndex) { |
| 538 llvm_unreachable("Unexpected absolute register pair"); |
| 539 } else if (AbsoluteBase) { |
| 540 AddrReg = IndexReg.getReg(); |
| 541 } else if (AbsoluteIndex) { |
| 542 assert(!BaseReg.getReg() && "Unexpected base register"); |
| 543 assert(Scale.getImm() == 1); |
| 544 AddrReg = 0; |
| 545 } else { |
| 546 if (!BaseReg.getReg()) { |
| 547 // No base, fill in relative. |
| 548 BaseReg.setReg(FlagUseZeroBasedSandbox ? 0 : X86::R15); |
| 549 AddrReg = IndexReg.getReg(); |
| 550 } else if (!FlagUseZeroBasedSandbox) { |
| 551 // Switch base and index registers if index register is undefined. |
| 552 // That is do conversions like "mov d(%r,0,0) -> mov d(%r15, %r, 1)". |
| 553 assert (!IndexReg.getReg() |
| 554 && "Unexpected index and base register"); |
| 555 IndexReg.setReg(BaseReg.getReg()); |
| 556 Scale.setImm(1); |
| 557 BaseReg.setReg(X86::R15); |
| 558 AddrReg = IndexReg.getReg(); |
| 559 } else { |
| 560 llvm_unreachable( |
| 561 "Unexpected index and base register"); |
| 562 } |
| 563 } |
| 564 |
| 565 if (AddrReg) { |
| 566 assert(!SegmentReg.getReg() && "Unexpected segment register"); |
| 567 SegmentReg.setReg(X86::PSEUDO_NACL_SEG); |
| 568 Modified = true; |
| 569 } |
| 570 } |
| 571 |
| 572 return Modified; |
| 573 } |
| 574 |
| 575 bool X86NaClRewritePass::ApplyRewrites(MachineBasicBlock &MBB, |
| 576 MachineBasicBlock::iterator MBBI) { |
| 577 MachineInstr &MI = *MBBI; |
| 578 DebugLoc DL = MI.getDebugLoc(); |
| 579 unsigned Opc = MI.getOpcode(); |
| 580 |
| 581 // These direct jumps need their opcode rewritten |
| 582 // and variable operands removed. |
| 583 unsigned NewOpc = 0; |
| 584 switch (Opc) { |
| 585 // 32-bit direct calls are handled unmodified by the assemblers |
| 586 case X86::CALLpcrel32 : return true; |
| 587 case X86::TAILJMPd : NewOpc = X86::JMP_4; break; |
| 588 case X86::NACL_CG_TAILJMPd64 : NewOpc = X86::JMP_4; break; |
| 589 case X86::NACL_CG_CALL64pcrel32: NewOpc = X86::NACL_CALL64d; break; |
| 590 } |
| 591 if (NewOpc) { |
| 592 BuildMI(MBB, MBBI, DL, TII->get(NewOpc)) |
| 593 .addOperand(MI.getOperand(0)); |
| 594 MI.eraseFromParent(); |
| 595 return true; |
| 596 } |
| 597 |
| 598 // General Dynamic NaCl TLS model |
| 599 // http://code.google.com/p/nativeclient/issues/detail?id=1685 |
| 600 if (Opc == X86::NACL_CG_GD_TLS_addr64) { |
| 601 |
| 602 // Rewrite to: |
| 603 // leaq $sym@TLSGD(%rip), %rdi |
| 604 // call __tls_get_addr@PLT |
| 605 BuildMI(MBB, MBBI, DL, TII->get(X86::LEA64r), X86::RDI) |
| 606 .addReg(X86::RIP) // Base |
| 607 .addImm(1) // Scale |
| 608 .addReg(0) // Index |
| 609 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0, |
| 610 MI.getOperand(3).getTargetFlags()) |
| 611 .addReg(0); // Segment |
| 612 BuildMI(MBB, MBBI, DL, TII->get(X86::NACL_CALL64d)) |
| 613 .addExternalSymbol("__tls_get_addr", X86II::MO_PLT); |
| 614 MI.eraseFromParent(); |
| 615 return true; |
| 616 } |
| 617 |
| 618 // Local Exec NaCl TLS Model |
| 619 if (Opc == X86::NACL_CG_LE_TLS_addr64 || |
| 620 Opc == X86::NACL_CG_LE_TLS_addr32) { |
| 621 unsigned CallOpc, LeaOpc, Reg; |
| 622 // Rewrite to: |
| 623 // call __nacl_read_tp@PLT |
| 624 // lea $sym@flag(,%reg), %reg |
| 625 if (Opc == X86::NACL_CG_LE_TLS_addr64) { |
| 626 CallOpc = X86::NACL_CALL64d; |
| 627 LeaOpc = X86::LEA64r; |
| 628 Reg = X86::RAX; |
| 629 } else { |
| 630 CallOpc = X86::CALLpcrel32; |
| 631 LeaOpc = X86::LEA32r; |
| 632 Reg = X86::EAX; |
| 633 } |
| 634 BuildMI(MBB, MBBI, DL, TII->get(CallOpc)) |
| 635 .addExternalSymbol("__nacl_read_tp", X86II::MO_PLT); |
| 636 BuildMI(MBB, MBBI, DL, TII->get(LeaOpc), Reg) |
| 637 .addReg(0) // Base |
| 638 .addImm(1) // Scale |
| 639 .addReg(Reg) // Index |
| 640 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0, |
| 641 MI.getOperand(3).getTargetFlags()) |
| 642 .addReg(0); // Segment |
| 643 MI.eraseFromParent(); |
| 644 return true; |
| 645 } |
| 646 |
| 647 // Initial Exec NaCl TLS Model |
| 648 if (Opc == X86::NACL_CG_IE_TLS_addr64 || |
| 649 Opc == X86::NACL_CG_IE_TLS_addr32) { |
| 650 unsigned CallOpc, AddOpc, Base, Reg; |
| 651 // Rewrite to: |
| 652 // call __nacl_read_tp@PLT |
| 653 // addq sym@flag(%base), %reg |
| 654 if (Opc == X86::NACL_CG_IE_TLS_addr64) { |
| 655 CallOpc = X86::NACL_CALL64d; |
| 656 AddOpc = X86::ADD64rm; |
| 657 Base = X86::RIP; |
| 658 Reg = X86::RAX; |
| 659 } else { |
| 660 CallOpc = X86::CALLpcrel32; |
| 661 AddOpc = X86::ADD32rm; |
| 662 Base = MI.getOperand(3).getTargetFlags() == X86II::MO_INDNTPOFF ? |
| 663 0 : X86::EBX; // EBX for GOTNTPOFF. |
| 664 Reg = X86::EAX; |
| 665 } |
| 666 BuildMI(MBB, MBBI, DL, TII->get(CallOpc)) |
| 667 .addExternalSymbol("__nacl_read_tp", X86II::MO_PLT); |
| 668 BuildMI(MBB, MBBI, DL, TII->get(AddOpc), Reg) |
| 669 .addReg(Reg) |
| 670 .addReg(Base) |
| 671 .addImm(1) // Scale |
| 672 .addReg(0) // Index |
| 673 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0, |
| 674 MI.getOperand(3).getTargetFlags()) |
| 675 .addReg(0); // Segment |
| 676 MI.eraseFromParent(); |
| 677 return true; |
| 678 } |
| 679 |
| 680 return false; |
| 681 } |
| 682 |
| 683 bool X86NaClRewritePass::AlignJumpTableTargets(MachineFunction &MF) { |
| 684 bool Modified = true; |
| 685 |
| 686 MF.setAlignment(5); // log2, 32 = 2^5 |
| 687 |
| 688 MachineJumpTableInfo *JTI = MF.getJumpTableInfo(); |
| 689 if (JTI != NULL) { |
| 690 const std::vector<MachineJumpTableEntry> &JT = JTI->getJumpTables(); |
| 691 for (unsigned i = 0; i < JT.size(); ++i) { |
| 692 const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs; |
| 693 for (unsigned j = 0; j < MBBs.size(); ++j) { |
| 694 MBBs[j]->setAlignment(5); |
| 695 Modified |= true; |
| 696 } |
| 697 } |
| 698 } |
| 699 return Modified; |
| 700 } |
| 701 |
| 702 bool X86NaClRewritePass::runOnMachineFunction(MachineFunction &MF) { |
| 703 bool Modified = false; |
| 704 |
| 705 TM = &MF.getTarget(); |
| 706 TII = MF.getSubtarget().getInstrInfo(); |
| 707 TRI = MF.getSubtarget().getRegisterInfo(); |
| 708 Subtarget = &TM->getSubtarget<X86Subtarget>(); |
| 709 Is64Bit = Subtarget->is64Bit(); |
| 710 |
| 711 assert(Subtarget->isTargetNaCl() && "Unexpected target in NaClRewritePass!"); |
| 712 |
| 713 DEBUG(dbgs() << "*************** NaCl Rewrite Pass ***************\n"); |
| 714 for (MachineFunction::iterator MFI = MF.begin(), E = MF.end(); |
| 715 MFI != E; |
| 716 ++MFI) { |
| 717 Modified |= runOnMachineBasicBlock(*MFI); |
| 718 } |
| 719 Modified |= AlignJumpTableTargets(MF); |
| 720 DEBUG(dbgs() << "*************** NaCl Rewrite DONE ***************\n"); |
| 721 return Modified; |
| 722 } |
| 723 |
| 724 bool X86NaClRewritePass::runOnMachineBasicBlock(MachineBasicBlock &MBB) { |
| 725 bool Modified = false; |
| 726 if (MBB.hasAddressTaken()) { |
| 727 //FIXME: use a symbolic constant or get this value from some configuration |
| 728 MBB.setAlignment(5); |
| 729 Modified = true; |
| 730 } |
| 731 for (MachineBasicBlock::iterator MBBI = MBB.begin(), NextMBBI = MBBI; |
| 732 MBBI != MBB.end(); MBBI = NextMBBI) { |
| 733 ++NextMBBI; |
| 734 // When one of these methods makes a change, |
| 735 // it returns true, skipping the others. |
| 736 if (ApplyRewrites(MBB, MBBI) || |
| 737 (Is64Bit && ApplyStackSFI(MBB, MBBI)) || |
| 738 (Is64Bit && ApplyMemorySFI(MBB, MBBI)) || |
| 739 (Is64Bit && ApplyFrameSFI(MBB, MBBI)) || |
| 740 ApplyControlSFI(MBB, MBBI)) { |
| 741 Modified = true; |
| 742 } |
| 743 } |
| 744 return Modified; |
| 745 } |
| 746 |
| 747 /// createX86NaClRewritePassPass - returns an instance of the pass. |
| 748 namespace llvm { |
| 749 FunctionPass* createX86NaClRewritePass() { |
| 750 return new X86NaClRewritePass(); |
| 751 } |
| 752 } |
OLD | NEW |