| OLD | NEW |
| 1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// | 1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// |
| 2 // | 2 // |
| 3 // The LLVM Compiler Infrastructure | 3 // The LLVM Compiler Infrastructure |
| 4 // | 4 // |
| 5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
| 6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
| 7 // | 7 // |
| 8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
| 9 // | 9 // |
| 10 // This file defines the interfaces that X86 uses to lower LLVM code into a | 10 // This file defines the interfaces that X86 uses to lower LLVM code into a |
| (...skipping 248 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 259 // 32-bit code use the register pressure specific scheduling. | 259 // 32-bit code use the register pressure specific scheduling. |
| 260 // For Atom, always use ILP scheduling. | 260 // For Atom, always use ILP scheduling. |
| 261 if (Subtarget->isAtom()) | 261 if (Subtarget->isAtom()) |
| 262 setSchedulingPreference(Sched::ILP); | 262 setSchedulingPreference(Sched::ILP); |
| 263 else if (Subtarget->is64Bit()) | 263 else if (Subtarget->is64Bit()) |
| 264 setSchedulingPreference(Sched::ILP); | 264 setSchedulingPreference(Sched::ILP); |
| 265 else | 265 else |
| 266 setSchedulingPreference(Sched::RegPressure); | 266 setSchedulingPreference(Sched::RegPressure); |
| 267 const X86RegisterInfo *RegInfo = | 267 const X86RegisterInfo *RegInfo = |
| 268 static_cast<const X86RegisterInfo*>(TM.getRegisterInfo()); | 268 static_cast<const X86RegisterInfo*>(TM.getRegisterInfo()); |
| 269 (void)RegInfo; // @LOCALMOD |
| 269 setStackPointerRegisterToSaveRestore(X86StackPtr); // @LOCALMOD | 270 setStackPointerRegisterToSaveRestore(X86StackPtr); // @LOCALMOD |
| 270 | 271 |
| 271 // Bypass expensive divides on Atom when compiling with O2 | 272 // Bypass expensive divides on Atom when compiling with O2 |
| 272 if (Subtarget->hasSlowDivide() && TM.getOptLevel() >= CodeGenOpt::Default) { | 273 if (Subtarget->hasSlowDivide() && TM.getOptLevel() >= CodeGenOpt::Default) { |
| 273 addBypassSlowDiv(32, 8); | 274 addBypassSlowDiv(32, 8); |
| 274 if (Subtarget->is64Bit()) | 275 if (Subtarget->is64Bit()) |
| 275 addBypassSlowDiv(64, 16); | 276 addBypassSlowDiv(64, 16); |
| 276 } | 277 } |
| 277 | 278 |
| 278 if (Subtarget->isTargetKnownWindowsMSVC()) { | 279 if (Subtarget->isTargetKnownWindowsMSVC()) { |
| (...skipping 13037 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 13316 unsigned Reg = (Has64BitPointers ? X86::RAX : X86::EAX); // @LOCALMOD | 13317 unsigned Reg = (Has64BitPointers ? X86::RAX : X86::EAX); // @LOCALMOD |
| 13317 | 13318 |
| 13318 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag); | 13319 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag); |
| 13319 Flag = Chain.getValue(1); | 13320 Flag = Chain.getValue(1); |
| 13320 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | 13321 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); |
| 13321 | 13322 |
| 13322 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag); | 13323 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag); |
| 13323 | 13324 |
| 13324 const X86RegisterInfo *RegInfo = | 13325 const X86RegisterInfo *RegInfo = |
| 13325 static_cast<const X86RegisterInfo*>(DAG.getTarget().getRegisterInfo()); | 13326 static_cast<const X86RegisterInfo*>(DAG.getTarget().getRegisterInfo()); |
| 13327 (void)RegInfo; // @LOCALMOD |
| 13326 unsigned SPReg = X86StackPtr; // @LOCALMOD | 13328 unsigned SPReg = X86StackPtr; // @LOCALMOD |
| 13327 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy); | 13329 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy); |
| 13328 Chain = SP.getValue(1); | 13330 Chain = SP.getValue(1); |
| 13329 | 13331 |
| 13330 if (Align) { | 13332 if (Align) { |
| 13331 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), | 13333 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), |
| 13332 DAG.getConstant(-(uint64_t)Align, VT)); | 13334 DAG.getConstant(-(uint64_t)Align, VT)); |
| 13333 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP); | 13335 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP); |
| 13334 } | 13336 } |
| 13335 | 13337 |
| (...skipping 9761 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 23097 if (isLegalAddressingMode(AM, Ty)) | 23099 if (isLegalAddressingMode(AM, Ty)) |
| 23098 // Scale represents reg2 * scale, thus account for 1 | 23100 // Scale represents reg2 * scale, thus account for 1 |
| 23099 // as soon as we use a second register. | 23101 // as soon as we use a second register. |
| 23100 return AM.Scale != 0; | 23102 return AM.Scale != 0; |
| 23101 return -1; | 23103 return -1; |
| 23102 } | 23104 } |
| 23103 | 23105 |
| 23104 bool X86TargetLowering::isTargetFTOL() const { | 23106 bool X86TargetLowering::isTargetFTOL() const { |
| 23105 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit(); | 23107 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit(); |
| 23106 } | 23108 } |
| OLD | NEW |