Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/IceTargetLoweringX86BaseImpl.h

Issue 1506653002: Subzero: Add Non-SFI support for x86-32. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Refactor the link commands Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 //===- subzero/src/IceTargetLoweringX86BaseImpl.h - x86 lowering -*- C++ -*-==// 1 //===- subzero/src/IceTargetLoweringX86BaseImpl.h - x86 lowering -*- C++ -*-==//
2 // 2 //
3 // The Subzero Code Generator 3 // The Subzero Code Generator
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 /// 9 ///
10 /// \file 10 /// \file
(...skipping 306 matching lines...) Expand 10 before | Expand all | Expand 10 after
317 "Traits::InstructionSet range different from TargetInstructionSet"); 317 "Traits::InstructionSet range different from TargetInstructionSet");
318 if (Func->getContext()->getFlags().getTargetInstructionSet() != 318 if (Func->getContext()->getFlags().getTargetInstructionSet() !=
319 TargetInstructionSet::BaseInstructionSet) { 319 TargetInstructionSet::BaseInstructionSet) {
320 InstructionSet = static_cast<typename Traits::InstructionSet>( 320 InstructionSet = static_cast<typename Traits::InstructionSet>(
321 (Func->getContext()->getFlags().getTargetInstructionSet() - 321 (Func->getContext()->getFlags().getTargetInstructionSet() -
322 TargetInstructionSet::X86InstructionSet_Begin) + 322 TargetInstructionSet::X86InstructionSet_Begin) +
323 Traits::InstructionSet::Begin); 323 Traits::InstructionSet::Begin);
324 } 324 }
325 } 325 }
326 326
327 template <class Machine> void TargetX86Base<Machine>::staticInit() { 327 template <class Machine>
328 void TargetX86Base<Machine>::staticInit(const ClFlags &Flags) {
328 Traits::initRegisterSet(&TypeToRegisterSet, &RegisterAliases, &ScratchRegs); 329 Traits::initRegisterSet(&TypeToRegisterSet, &RegisterAliases, &ScratchRegs);
330 PcRelFixup = Traits::FK_PcRel;
331 AbsFixup = Flags.getUseNonsfi() ? Traits::FK_Gotoff : Traits::FK_Abs;
329 } 332 }
330 333
331 template <class Machine> void TargetX86Base<Machine>::translateO2() { 334 template <class Machine> void TargetX86Base<Machine>::translateO2() {
332 TimerMarker T(TimerStack::TT_O2, Func); 335 TimerMarker T(TimerStack::TT_O2, Func);
333 336
337 if (!Traits::Is64Bit && Func->getContext()->getFlags().getUseNonsfi()) {
338 GotVar = Func->makeVariable(IceType_i32);
339 }
334 genTargetHelperCalls(); 340 genTargetHelperCalls();
335 Func->dump("After target helper call insertion"); 341 Func->dump("After target helper call insertion");
336 342
337 // Merge Alloca instructions, and lay out the stack. 343 // Merge Alloca instructions, and lay out the stack.
338 static constexpr bool SortAndCombineAllocas = true; 344 static constexpr bool SortAndCombineAllocas = true;
339 Func->processAllocas(SortAndCombineAllocas); 345 Func->processAllocas(SortAndCombineAllocas);
340 Func->dump("After Alloca processing"); 346 Func->dump("After Alloca processing");
341 347
342 if (!Ctx->getFlags().getPhiEdgeSplit()) { 348 if (!Ctx->getFlags().getPhiEdgeSplit()) {
343 // Lower Phi instructions. 349 // Lower Phi instructions.
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
392 Func->dump("After x86 address mode opt"); 398 Func->dump("After x86 address mode opt");
393 399
394 // Disable constant blinding or pooling for load optimization. 400 // Disable constant blinding or pooling for load optimization.
395 { 401 {
396 BoolFlagSaver B(RandomizationPoolingPaused, true); 402 BoolFlagSaver B(RandomizationPoolingPaused, true);
397 doLoadOpt(); 403 doLoadOpt();
398 } 404 }
399 Func->genCode(); 405 Func->genCode();
400 if (Func->hasError()) 406 if (Func->hasError())
401 return; 407 return;
408 initGotVarIfNeeded();
402 Func->dump("After x86 codegen"); 409 Func->dump("After x86 codegen");
403 410
404 // Register allocation. This requires instruction renumbering and full 411 // Register allocation. This requires instruction renumbering and full
405 // liveness analysis. Loops must be identified before liveness so variable 412 // liveness analysis. Loops must be identified before liveness so variable
406 // use weights are correct. 413 // use weights are correct.
407 Func->renumberInstructions(); 414 Func->renumberInstructions();
408 if (Func->hasError()) 415 if (Func->hasError())
409 return; 416 return;
410 Func->liveness(Liveness_Intervals); 417 Func->liveness(Liveness_Intervals);
411 if (Func->hasError()) 418 if (Func->hasError())
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
450 Func->doNopInsertion(); 457 Func->doNopInsertion();
451 458
452 // Mark nodes that require sandbox alignment 459 // Mark nodes that require sandbox alignment
453 if (Ctx->getFlags().getUseSandboxing()) 460 if (Ctx->getFlags().getUseSandboxing())
454 Func->markNodesForSandboxing(); 461 Func->markNodesForSandboxing();
455 } 462 }
456 463
457 template <class Machine> void TargetX86Base<Machine>::translateOm1() { 464 template <class Machine> void TargetX86Base<Machine>::translateOm1() {
458 TimerMarker T(TimerStack::TT_Om1, Func); 465 TimerMarker T(TimerStack::TT_Om1, Func);
459 466
467 if (!Traits::Is64Bit && Func->getContext()->getFlags().getUseNonsfi()) {
468 GotVar = Func->makeVariable(IceType_i32);
469 }
460 genTargetHelperCalls(); 470 genTargetHelperCalls();
461 471
462 // Do not merge Alloca instructions, and lay out the stack. 472 // Do not merge Alloca instructions, and lay out the stack.
463 static constexpr bool SortAndCombineAllocas = false; 473 static constexpr bool SortAndCombineAllocas = false;
464 Func->processAllocas(SortAndCombineAllocas); 474 Func->processAllocas(SortAndCombineAllocas);
465 Func->dump("After Alloca processing"); 475 Func->dump("After Alloca processing");
466 476
467 Func->placePhiLoads(); 477 Func->placePhiLoads();
468 if (Func->hasError()) 478 if (Func->hasError())
469 return; 479 return;
470 Func->placePhiStores(); 480 Func->placePhiStores();
471 if (Func->hasError()) 481 if (Func->hasError())
472 return; 482 return;
473 Func->deletePhis(); 483 Func->deletePhis();
474 if (Func->hasError()) 484 if (Func->hasError())
475 return; 485 return;
476 Func->dump("After Phi lowering"); 486 Func->dump("After Phi lowering");
477 487
478 Func->doArgLowering(); 488 Func->doArgLowering();
479 Func->genCode(); 489 Func->genCode();
480 if (Func->hasError()) 490 if (Func->hasError())
481 return; 491 return;
492 initGotVarIfNeeded();
482 Func->dump("After initial x8632 codegen"); 493 Func->dump("After initial x8632 codegen");
483 494
484 regAlloc(RAK_InfOnly); 495 regAlloc(RAK_InfOnly);
485 if (Func->hasError()) 496 if (Func->hasError())
486 return; 497 return;
487 Func->dump("After regalloc of infinite-weight variables"); 498 Func->dump("After regalloc of infinite-weight variables");
488 499
489 Func->genFrame(); 500 Func->genFrame();
490 if (Func->hasError()) 501 if (Func->hasError())
491 return; 502 return;
(...skipping 302 matching lines...) Expand 10 before | Expand all | Expand 10 after
794 template <class Machine> 805 template <class Machine>
795 void TargetX86Base<Machine>::emitVariable(const Variable *Var) const { 806 void TargetX86Base<Machine>::emitVariable(const Variable *Var) const {
796 if (!BuildDefs::dump()) 807 if (!BuildDefs::dump())
797 return; 808 return;
798 Ostream &Str = Ctx->getStrEmit(); 809 Ostream &Str = Ctx->getStrEmit();
799 if (Var->hasReg()) { 810 if (Var->hasReg()) {
800 Str << "%" << getRegName(Var->getRegNum(), Var->getType()); 811 Str << "%" << getRegName(Var->getRegNum(), Var->getType());
801 return; 812 return;
802 } 813 }
803 if (Var->mustHaveReg()) { 814 if (Var->mustHaveReg()) {
804 llvm_unreachable("Infinite-weight Variable has no register assigned"); 815 llvm::report_fatal_error(
816 "Infinite-weight Variable has no register assigned");
805 } 817 }
806 const int32_t Offset = Var->getStackOffset(); 818 const int32_t Offset = Var->getStackOffset();
807 int32_t BaseRegNum = Var->getBaseRegNum(); 819 int32_t BaseRegNum = Var->getBaseRegNum();
808 if (BaseRegNum == Variable::NoRegister) 820 if (BaseRegNum == Variable::NoRegister)
809 BaseRegNum = getFrameOrStackReg(); 821 BaseRegNum = getFrameOrStackReg();
810 // Print in the form "Offset(%reg)", taking care that: 822 // Print in the form "Offset(%reg)", taking care that:
811 // - Offset is never printed when it is 0 823 // - Offset is never printed when it is 0
812 824
813 const bool DecorateAsm = Func->getContext()->getFlags().getDecorateAsm(); 825 const bool DecorateAsm = Func->getContext()->getFlags().getDecorateAsm();
814 // Only print Offset when it is nonzero, regardless of DecorateAsm. 826 // Only print Offset when it is nonzero, regardless of DecorateAsm.
815 if (Offset) { 827 if (Offset) {
816 if (DecorateAsm) { 828 if (DecorateAsm) {
817 Str << Var->getSymbolicStackOffset(Func); 829 Str << Var->getSymbolicStackOffset(Func);
818 } else { 830 } else {
819 Str << Offset; 831 Str << Offset;
820 } 832 }
821 } 833 }
822 const Type FrameSPTy = Traits::WordType; 834 const Type FrameSPTy = Traits::WordType;
823 Str << "(%" << getRegName(BaseRegNum, FrameSPTy) << ")"; 835 Str << "(%" << getRegName(BaseRegNum, FrameSPTy) << ")";
824 } 836 }
825 837
826 template <class Machine> 838 template <class Machine>
827 typename TargetX86Base<Machine>::Traits::Address 839 typename TargetX86Base<Machine>::Traits::Address
828 TargetX86Base<Machine>::stackVarToAsmOperand(const Variable *Var) const { 840 TargetX86Base<Machine>::stackVarToAsmOperand(const Variable *Var) const {
829 if (Var->hasReg()) 841 if (Var->hasReg())
830 llvm_unreachable("Stack Variable has a register assigned"); 842 llvm::report_fatal_error("Stack Variable has a register assigned");
831 if (Var->mustHaveReg()) { 843 if (Var->mustHaveReg()) {
832 llvm_unreachable("Infinite-weight Variable has no register assigned"); 844 llvm::report_fatal_error(
845 "Infinite-weight Variable has no register assigned");
833 } 846 }
834 int32_t Offset = Var->getStackOffset(); 847 int32_t Offset = Var->getStackOffset();
835 int32_t BaseRegNum = Var->getBaseRegNum(); 848 int32_t BaseRegNum = Var->getBaseRegNum();
836 if (Var->getBaseRegNum() == Variable::NoRegister) 849 if (Var->getBaseRegNum() == Variable::NoRegister)
837 BaseRegNum = getFrameOrStackReg(); 850 BaseRegNum = getFrameOrStackReg();
838 return typename Traits::Address(Traits::getEncodedGPR(BaseRegNum), Offset, 851 return typename Traits::Address(Traits::getEncodedGPR(BaseRegNum), Offset,
839 AssemblerFixup::NoFixup); 852 AssemblerFixup::NoFixup);
840 } 853 }
841 854
842 /// Helper function for addProlog(). 855 /// Helper function for addProlog().
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
903 return Var64On32->getLo(); 916 return Var64On32->getLo();
904 if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) { 917 if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
905 auto *ConstInt = llvm::dyn_cast<ConstantInteger32>( 918 auto *ConstInt = llvm::dyn_cast<ConstantInteger32>(
906 Ctx->getConstantInt32(static_cast<int32_t>(Const->getValue()))); 919 Ctx->getConstantInt32(static_cast<int32_t>(Const->getValue())));
907 // Check if we need to blind/pool the constant. 920 // Check if we need to blind/pool the constant.
908 return legalize(ConstInt); 921 return legalize(ConstInt);
909 } 922 }
910 if (auto *Mem = llvm::dyn_cast<typename Traits::X86OperandMem>(Operand)) { 923 if (auto *Mem = llvm::dyn_cast<typename Traits::X86OperandMem>(Operand)) {
911 auto *MemOperand = Traits::X86OperandMem::create( 924 auto *MemOperand = Traits::X86OperandMem::create(
912 Func, IceType_i32, Mem->getBase(), Mem->getOffset(), Mem->getIndex(), 925 Func, IceType_i32, Mem->getBase(), Mem->getOffset(), Mem->getIndex(),
913 Mem->getShift(), Mem->getSegmentRegister()); 926 Mem->getShift(), Mem->getSegmentRegister(), Mem->getIsPIC());
914 // Test if we should randomize or pool the offset, if so randomize it or 927 // Test if we should randomize or pool the offset, if so randomize it or
915 // pool it then create mem operand with the blinded/pooled constant. 928 // pool it then create mem operand with the blinded/pooled constant.
916 // Otherwise, return the mem operand as ordinary mem operand. 929 // Otherwise, return the mem operand as ordinary mem operand.
917 return legalize(MemOperand); 930 return legalize(MemOperand);
918 } 931 }
919 llvm_unreachable("Unsupported operand type"); 932 llvm_unreachable("Unsupported operand type");
920 return nullptr; 933 return nullptr;
921 } 934 }
922 935
923 template <class Machine> 936 template <class Machine>
(...skipping 19 matching lines...) Expand all
943 } else if (auto *IntOffset = llvm::dyn_cast<ConstantInteger32>(Offset)) { 956 } else if (auto *IntOffset = llvm::dyn_cast<ConstantInteger32>(Offset)) {
944 Offset = Ctx->getConstantInt32(4 + IntOffset->getValue()); 957 Offset = Ctx->getConstantInt32(4 + IntOffset->getValue());
945 } else if (auto *SymOffset = llvm::dyn_cast<ConstantRelocatable>(Offset)) { 958 } else if (auto *SymOffset = llvm::dyn_cast<ConstantRelocatable>(Offset)) {
946 assert(!Utils::WouldOverflowAdd(SymOffset->getOffset(), 4)); 959 assert(!Utils::WouldOverflowAdd(SymOffset->getOffset(), 4));
947 Offset = 960 Offset =
948 Ctx->getConstantSym(4 + SymOffset->getOffset(), SymOffset->getName(), 961 Ctx->getConstantSym(4 + SymOffset->getOffset(), SymOffset->getName(),
949 SymOffset->getSuppressMangling()); 962 SymOffset->getSuppressMangling());
950 } 963 }
951 auto *MemOperand = Traits::X86OperandMem::create( 964 auto *MemOperand = Traits::X86OperandMem::create(
952 Func, IceType_i32, Mem->getBase(), Offset, Mem->getIndex(), 965 Func, IceType_i32, Mem->getBase(), Offset, Mem->getIndex(),
953 Mem->getShift(), Mem->getSegmentRegister()); 966 Mem->getShift(), Mem->getSegmentRegister(), Mem->getIsPIC());
954 // Test if the Offset is an eligible i32 constants for randomization and 967 // Test if the Offset is an eligible i32 constants for randomization and
955 // pooling. Blind/pool it if it is. Otherwise return as oridinary mem 968 // pooling. Blind/pool it if it is. Otherwise return as oridinary mem
956 // operand. 969 // operand.
957 return legalize(MemOperand); 970 return legalize(MemOperand);
958 } 971 }
959 llvm_unreachable("Unsupported operand type"); 972 llvm_unreachable("Unsupported operand type");
960 return nullptr; 973 return nullptr;
961 } 974 }
962 975
963 template <class Machine> 976 template <class Machine>
964 llvm::SmallBitVector 977 llvm::SmallBitVector
965 TargetX86Base<Machine>::getRegisterSet(RegSetMask Include, 978 TargetX86Base<Machine>::getRegisterSet(RegSetMask Include,
966 RegSetMask Exclude) const { 979 RegSetMask Exclude) const {
967 return Traits::getRegisterSet(Include, Exclude); 980 return Traits::getRegisterSet(Include, Exclude);
968 } 981 }
969 982
983 template <class Machine> void TargetX86Base<Machine>::initGotVarIfNeeded() {
984 if (!Func->getContext()->getFlags().getUseNonsfi())
985 return;
986 if (Traits::Is64Bit) {
987 // Probably no implementation is needed, but error to be safe for now.
988 llvm::report_fatal_error(
989 "Need to implement initGotVarIfNeeded() for 64-bit.");
990 }
991 // Insert the GotVar assignment as the very first lowered instruction. Later,
992 // it will be moved into the right place - after the stack frame is set up but
993 // before in-args are copied into registers.
994 Context.init(Func->getEntryNode());
995 Context.setInsertPoint(Context.getCur());
996 Context.insert(InstX86GetIP<Machine>::create(Func, GotVar));
997 }
998
970 template <class Machine> 999 template <class Machine>
971 void TargetX86Base<Machine>::lowerAlloca(const InstAlloca *Inst) { 1000 void TargetX86Base<Machine>::lowerAlloca(const InstAlloca *Inst) {
972 // Conservatively require the stack to be aligned. Some stack adjustment 1001 // Conservatively require the stack to be aligned. Some stack adjustment
973 // operations implemented below assume that the stack is aligned before the 1002 // operations implemented below assume that the stack is aligned before the
974 // alloca. All the alloca code ensures that the stack alignment is preserved 1003 // alloca. All the alloca code ensures that the stack alignment is preserved
975 // after the alloca. The stack alignment restriction can be relaxed in some 1004 // after the alloca. The stack alignment restriction can be relaxed in some
976 // cases. 1005 // cases.
977 NeedsStackAlignment = true; 1006 NeedsStackAlignment = true;
978 1007
979 // For default align=0, set it to the real value 1, to avoid any 1008 // For default align=0, set it to the real value 1, to avoid any
(...skipping 3011 matching lines...) Expand 10 before | Expand all | Expand 10 after
3991 } 4020 }
3992 _test(SecondVar, SecondVar); 4021 _test(SecondVar, SecondVar);
3993 _cmov(T_Dest2, T_Dest, Traits::Cond::Br_e); 4022 _cmov(T_Dest2, T_Dest, Traits::Cond::Br_e);
3994 _mov(DestLo, T_Dest2); 4023 _mov(DestLo, T_Dest2);
3995 _mov(DestHi, Ctx->getConstantZero(IceType_i32)); 4024 _mov(DestHi, Ctx->getConstantZero(IceType_i32));
3996 } 4025 }
3997 4026
3998 template <class Machine> 4027 template <class Machine>
3999 void TargetX86Base<Machine>::typedLoad(Type Ty, Variable *Dest, Variable *Base, 4028 void TargetX86Base<Machine>::typedLoad(Type Ty, Variable *Dest, Variable *Base,
4000 Constant *Offset) { 4029 Constant *Offset) {
4030 // If Offset is a ConstantRelocatable in Non-SFI mode, we will need to
4031 // legalize Mem properly.
4032 if (Offset)
4033 assert(!llvm::isa<ConstantRelocatable>(Offset));
4034
4001 auto *Mem = Traits::X86OperandMem::create(Func, Ty, Base, Offset); 4035 auto *Mem = Traits::X86OperandMem::create(Func, Ty, Base, Offset);
4002 4036
4003 if (isVectorType(Ty)) 4037 if (isVectorType(Ty))
4004 _movp(Dest, Mem); 4038 _movp(Dest, Mem);
4005 else if (Ty == IceType_f64) 4039 else if (Ty == IceType_f64)
4006 _movq(Dest, Mem); 4040 _movq(Dest, Mem);
4007 else 4041 else
4008 _mov(Dest, Mem); 4042 _mov(Dest, Mem);
4009 } 4043 }
4010 4044
4011 template <class Machine> 4045 template <class Machine>
4012 void TargetX86Base<Machine>::typedStore(Type Ty, Variable *Value, 4046 void TargetX86Base<Machine>::typedStore(Type Ty, Variable *Value,
4013 Variable *Base, Constant *Offset) { 4047 Variable *Base, Constant *Offset) {
4048 // If Offset is a ConstantRelocatable in Non-SFI mode, we will need to
4049 // legalize Mem properly.
4050 if (Offset)
4051 assert(!llvm::isa<ConstantRelocatable>(Offset));
4052
4014 auto *Mem = Traits::X86OperandMem::create(Func, Ty, Base, Offset); 4053 auto *Mem = Traits::X86OperandMem::create(Func, Ty, Base, Offset);
4015 4054
4016 if (isVectorType(Ty)) 4055 if (isVectorType(Ty))
4017 _storep(Value, Mem); 4056 _storep(Value, Mem);
4018 else if (Ty == IceType_f64) 4057 else if (Ty == IceType_f64)
4019 _storeq(Value, Mem); 4058 _storeq(Value, Mem);
4020 else 4059 else
4021 _store(Value, Mem); 4060 _store(Value, Mem);
4022 } 4061 }
4023 4062
(...skipping 289 matching lines...) Expand 10 before | Expand all | Expand 10 after
4313 Str << "<null>"; 4352 Str << "<null>";
4314 Str << ", Index="; 4353 Str << ", Index=";
4315 if (Index) 4354 if (Index)
4316 Index->dump(Func); 4355 Index->dump(Func);
4317 else 4356 else
4318 Str << "<null>"; 4357 Str << "<null>";
4319 Str << ", Shift=" << Shift << ", Offset=" << Offset 4358 Str << ", Shift=" << Shift << ", Offset=" << Offset
4320 << ", Relocatable=" << Relocatable << "\n"; 4359 << ", Relocatable=" << Relocatable << "\n";
4321 } 4360 }
4322 4361
4323 inline bool matchAssign(const VariablesMetadata *VMetadata, Variable *&Var, 4362 inline bool matchAssign(const VariablesMetadata *VMetadata, Variable *GotVar,
4324 ConstantRelocatable *&Relocatable, int32_t &Offset, 4363 Variable *&Var, ConstantRelocatable *&Relocatable,
4325 const Inst *&Reason) { 4364 int32_t &Offset, const Inst *&Reason) {
4326 // Var originates from Var=SrcVar ==> set Var:=SrcVar 4365 // Var originates from Var=SrcVar ==> set Var:=SrcVar
4327 if (Var == nullptr) 4366 if (Var == nullptr)
4328 return false; 4367 return false;
4329 if (const Inst *VarAssign = VMetadata->getSingleDefinition(Var)) { 4368 if (const Inst *VarAssign = VMetadata->getSingleDefinition(Var)) {
4330 assert(!VMetadata->isMultiDef(Var)); 4369 assert(!VMetadata->isMultiDef(Var));
4331 if (llvm::isa<InstAssign>(VarAssign)) { 4370 if (llvm::isa<InstAssign>(VarAssign)) {
4332 Operand *SrcOp = VarAssign->getSrc(0); 4371 Operand *SrcOp = VarAssign->getSrc(0);
4333 assert(SrcOp); 4372 assert(SrcOp);
4334 if (auto *SrcVar = llvm::dyn_cast<Variable>(SrcOp)) { 4373 if (auto *SrcVar = llvm::dyn_cast<Variable>(SrcOp)) {
4335 if (!VMetadata->isMultiDef(SrcVar) && 4374 if (!VMetadata->isMultiDef(SrcVar) &&
4336 // TODO: ensure SrcVar stays single-BB 4375 // TODO: ensure SrcVar stays single-BB
4337 true) { 4376 true) {
4338 Var = SrcVar; 4377 Var = SrcVar;
4339 Reason = VarAssign; 4378 Reason = VarAssign;
4340 return true; 4379 return true;
4341 } 4380 }
4342 } else if (auto *Const = llvm::dyn_cast<ConstantInteger32>(SrcOp)) { 4381 } else if (auto *Const = llvm::dyn_cast<ConstantInteger32>(SrcOp)) {
4343 int32_t MoreOffset = Const->getValue(); 4382 int32_t MoreOffset = Const->getValue();
4344 if (Utils::WouldOverflowAdd(Offset, MoreOffset)) 4383 if (Utils::WouldOverflowAdd(Offset, MoreOffset))
4345 return false; 4384 return false;
4346 Var = nullptr; 4385 Var = nullptr;
4347 Offset += MoreOffset; 4386 Offset += MoreOffset;
4348 Reason = VarAssign; 4387 Reason = VarAssign;
4349 return true; 4388 return true;
4350 } else if (auto *AddReloc = llvm::dyn_cast<ConstantRelocatable>(SrcOp)) { 4389 } else if (auto *AddReloc = llvm::dyn_cast<ConstantRelocatable>(SrcOp)) {
4351 if (Relocatable == nullptr) { 4390 if (Relocatable == nullptr) {
4352 Var = nullptr; 4391 Var = GotVar;
4353 Relocatable = AddReloc; 4392 Relocatable = AddReloc;
4354 Reason = VarAssign; 4393 Reason = VarAssign;
4355 return true; 4394 return true;
4356 } 4395 }
4357 } 4396 }
4358 } 4397 }
4359 } 4398 }
4360 return false; 4399 return false;
4361 } 4400 }
4362 4401
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
4461 return true; 4500 return true;
4462 } 4501 }
4463 } 4502 }
4464 } 4503 }
4465 } 4504 }
4466 } 4505 }
4467 } 4506 }
4468 return false; 4507 return false;
4469 } 4508 }
4470 4509
4471 inline bool matchOffsetBase(const VariablesMetadata *VMetadata, Variable *&Base, 4510 inline bool matchOffsetBase(const VariablesMetadata *VMetadata,
4511 Variable *GotVar, Variable *&Base,
4512 Variable *&BaseOther,
4472 ConstantRelocatable *&Relocatable, int32_t &Offset, 4513 ConstantRelocatable *&Relocatable, int32_t &Offset,
4473 const Inst *&Reason) { 4514 const Inst *&Reason) {
4474 // Base is Base=Var+Const || Base is Base=Const+Var ==> 4515 // Base is Base=Var+Const || Base is Base=Const+Var ==>
4475 // set Base=Var, Offset+=Const 4516 // set Base=Var, Offset+=Const
4476 // Base is Base=Var-Const ==> 4517 // Base is Base=Var-Const ==>
4477 // set Base=Var, Offset-=Const 4518 // set Base=Var, Offset-=Const
4478 if (Base == nullptr) { 4519 if (Base == nullptr) {
4479 return false; 4520 return false;
4480 } 4521 }
4481 const Inst *BaseInst = VMetadata->getSingleDefinition(Base); 4522 const Inst *BaseInst = VMetadata->getSingleDefinition(Base);
(...skipping 30 matching lines...) Expand all
4512 if ((Relocatable && (Reloc0 || Reloc1)) || (Reloc0 && Reloc1)) 4553 if ((Relocatable && (Reloc0 || Reloc1)) || (Reloc0 && Reloc1))
4513 return false; 4554 return false;
4514 // Don't know how to subtract a relocatable. 4555 // Don't know how to subtract a relocatable.
4515 if (!IsAdd && Reloc1) 4556 if (!IsAdd && Reloc1)
4516 return false; 4557 return false;
4517 // Incorporate ConstantRelocatables. 4558 // Incorporate ConstantRelocatables.
4518 if (Reloc0) 4559 if (Reloc0)
4519 NewRelocatable = Reloc0; 4560 NewRelocatable = Reloc0;
4520 else if (Reloc1) 4561 else if (Reloc1)
4521 NewRelocatable = Reloc1; 4562 NewRelocatable = Reloc1;
4563 if ((Reloc0 || Reloc1) && BaseOther && GotVar)
John 2016/01/04 21:33:51 add '!= nullptr'? or not. :)
Jim Stichnoth 2016/01/04 23:32:12 hmm, maybe not, since this follows the style of th
4564 return false;
4522 // Compute the updated constant offset. 4565 // Compute the updated constant offset.
4523 if (Const0) { 4566 if (Const0) {
4524 int32_t MoreOffset = IsAdd ? Const0->getValue() : -Const0->getValue(); 4567 int32_t MoreOffset = IsAdd ? Const0->getValue() : -Const0->getValue();
4525 if (Utils::WouldOverflowAdd(NewOffset, MoreOffset)) 4568 if (Utils::WouldOverflowAdd(NewOffset, MoreOffset))
4526 return false; 4569 return false;
4527 NewOffset += MoreOffset; 4570 NewOffset += MoreOffset;
4528 } 4571 }
4529 if (Const1) { 4572 if (Const1) {
4530 int32_t MoreOffset = IsAdd ? Const1->getValue() : -Const1->getValue(); 4573 int32_t MoreOffset = IsAdd ? Const1->getValue() : -Const1->getValue();
4531 if (Utils::WouldOverflowAdd(NewOffset, MoreOffset)) 4574 if (Utils::WouldOverflowAdd(NewOffset, MoreOffset))
4532 return false; 4575 return false;
4533 NewOffset += MoreOffset; 4576 NewOffset += MoreOffset;
4534 } 4577 }
4535 // Update the computed address parameters once we are sure optimization 4578 // Update the computed address parameters once we are sure optimization
4536 // is valid. 4579 // is valid.
4580 if ((Reloc0 || Reloc1) && GotVar) {
4581 assert(BaseOther == nullptr);
4582 BaseOther = GotVar;
4583 }
4537 Base = NewBase; 4584 Base = NewBase;
4538 Offset = NewOffset; 4585 Offset = NewOffset;
4539 Relocatable = NewRelocatable; 4586 Relocatable = NewRelocatable;
4540 Reason = BaseInst; 4587 Reason = BaseInst;
4541 return true; 4588 return true;
4542 } 4589 }
4543 return false; 4590 return false;
4544 } 4591 }
4545 4592
4546 // Builds information for a canonical address expresion: 4593 // Builds information for a canonical address expresion:
4547 // <Relocatable + Offset>(Base, Index, Shift) 4594 // <Relocatable + Offset>(Base, Index, Shift)
4548 // On entry: 4595 // On entry:
4549 // Relocatable == null, 4596 // Relocatable == null,
4550 // Offset == 0, 4597 // Offset == 0,
4551 // Base is a Variable, 4598 // Base is a Variable,
4552 // Index == nullptr, 4599 // Index == nullptr,
4553 // Shift == 0 4600 // Shift == 0
4554 inline bool computeAddressOpt(Cfg *Func, const Inst *Instr, 4601 inline bool computeAddressOpt(Cfg *Func, const Inst *Instr, Variable *GotVar,
4555 ConstantRelocatable *&Relocatable, 4602 ConstantRelocatable *&Relocatable,
4556 int32_t &Offset, Variable *&Base, 4603 int32_t &Offset, Variable *&Base,
4557 Variable *&Index, uint16_t &Shift) { 4604 Variable *&Index, uint16_t &Shift) {
4558 bool AddressWasOptimized = false; 4605 bool AddressWasOptimized = false;
4559 Func->resetCurrentNode(); 4606 Func->resetCurrentNode();
4560 if (Func->isVerbose(IceV_AddrOpt)) { 4607 if (Func->isVerbose(IceV_AddrOpt)) {
4561 OstreamLocker L(Func->getContext()); 4608 OstreamLocker L(Func->getContext());
4562 Ostream &Str = Func->getContext()->getStrDump(); 4609 Ostream &Str = Func->getContext()->getStrDump();
4563 Str << "\nStarting computeAddressOpt for instruction:\n "; 4610 Str << "\nStarting computeAddressOpt for instruction:\n ";
4564 Instr->dumpDecorated(Func); 4611 Instr->dumpDecorated(Func);
(...skipping 10 matching lines...) Expand all
4575 const bool MockBounds = Func->getContext()->getFlags().getMockBoundsCheck(); 4622 const bool MockBounds = Func->getContext()->getFlags().getMockBoundsCheck();
4576 const VariablesMetadata *VMetadata = Func->getVMetadata(); 4623 const VariablesMetadata *VMetadata = Func->getVMetadata();
4577 const Inst *Reason = nullptr; 4624 const Inst *Reason = nullptr;
4578 do { 4625 do {
4579 if (Reason) { 4626 if (Reason) {
4580 dumpAddressOpt(Func, Relocatable, Offset, Base, Index, Shift, Reason); 4627 dumpAddressOpt(Func, Relocatable, Offset, Base, Index, Shift, Reason);
4581 AddressWasOptimized = true; 4628 AddressWasOptimized = true;
4582 Reason = nullptr; 4629 Reason = nullptr;
4583 } 4630 }
4584 // Update Base and Index to follow through assignments to definitions. 4631 // Update Base and Index to follow through assignments to definitions.
4585 if (matchAssign(VMetadata, Base, Relocatable, Offset, Reason)) { 4632 if (matchAssign(VMetadata, GotVar, Base, Relocatable, Offset, Reason)) {
4586 // Assignments of Base from a Relocatable or ConstantInt32 can result 4633 // Assignments of Base from a Relocatable or ConstantInt32 can result
4587 // in Base becoming nullptr. To avoid code duplication in this loop we 4634 // in Base becoming nullptr. To avoid code duplication in this loop we
4588 // prefer that Base be non-nullptr if possible. 4635 // prefer that Base be non-nullptr if possible.
4589 if ((Base == nullptr) && (Index != nullptr) && Shift == 0) 4636 if ((Base == nullptr) && (Index != nullptr) && Shift == 0)
4590 std::swap(Base, Index); 4637 std::swap(Base, Index);
4591 continue; 4638 continue;
4592 } 4639 }
4593 if (matchAssign(VMetadata, Index, Relocatable, Offset, Reason)) 4640 if (matchAssign(VMetadata, GotVar, Index, Relocatable, Offset, Reason))
4594 continue; 4641 continue;
4595 4642
4596 if (!MockBounds) { 4643 if (!MockBounds) {
4597 // Transition from: 4644 // Transition from:
4598 // <Relocatable + Offset>(Base) to 4645 // <Relocatable + Offset>(Base) to
4599 // <Relocatable + Offset>(Base, Index) 4646 // <Relocatable + Offset>(Base, Index)
4600 if (matchCombinedBaseIndex(VMetadata, Base, Index, Shift, Reason)) 4647 if (matchCombinedBaseIndex(VMetadata, Base, Index, Shift, Reason))
4601 continue; 4648 continue;
4602 // Recognize multiply/shift and update Shift amount. 4649 // Recognize multiply/shift and update Shift amount.
4603 // Index becomes Index=Var<<Const && Const+Shift<=3 ==> 4650 // Index becomes Index=Var<<Const && Const+Shift<=3 ==>
4604 // Index=Var, Shift+=Const 4651 // Index=Var, Shift+=Const
4605 // Index becomes Index=Const*Var && log2(Const)+Shift<=3 ==> 4652 // Index becomes Index=Const*Var && log2(Const)+Shift<=3 ==>
4606 // Index=Var, Shift+=log2(Const) 4653 // Index=Var, Shift+=log2(Const)
4607 if (matchShiftedIndex(VMetadata, Index, Shift, Reason)) 4654 if (matchShiftedIndex(VMetadata, Index, Shift, Reason))
4608 continue; 4655 continue;
4609 // If Shift is zero, the choice of Base and Index was purely arbitrary. 4656 // If Shift is zero, the choice of Base and Index was purely arbitrary.
4610 // Recognize multiply/shift and set Shift amount. 4657 // Recognize multiply/shift and set Shift amount.
4611 // Shift==0 && Base is Base=Var*Const && log2(Const)+Shift<=3 ==> 4658 // Shift==0 && Base is Base=Var*Const && log2(Const)+Shift<=3 ==>
4612 // swap(Index,Base) 4659 // swap(Index,Base)
4613 // Similar for Base=Const*Var and Base=Var<<Const 4660 // Similar for Base=Const*Var and Base=Var<<Const
4614 if (Shift == 0 && matchShiftedIndex(VMetadata, Base, Shift, Reason)) { 4661 if (Shift == 0 && matchShiftedIndex(VMetadata, Base, Shift, Reason)) {
4615 std::swap(Base, Index); 4662 std::swap(Base, Index);
4616 continue; 4663 continue;
4617 } 4664 }
4618 } 4665 }
4619 // Update Offset to reflect additions/subtractions with constants and 4666 // Update Offset to reflect additions/subtractions with constants and
4620 // relocatables. 4667 // relocatables.
4621 // TODO: consider overflow issues with respect to Offset. 4668 // TODO: consider overflow issues with respect to Offset.
4622 if (matchOffsetBase(VMetadata, Base, Relocatable, Offset, Reason)) 4669 if (matchOffsetBase(VMetadata, GotVar, Base, Index, Relocatable, Offset,
4670 Reason))
4623 continue; 4671 continue;
4624 if (Shift == 0 && 4672 if (Shift == 0 && matchOffsetBase(VMetadata, GotVar, Index, Base,
4625 matchOffsetBase(VMetadata, Index, Relocatable, Offset, Reason)) 4673 Relocatable, Offset, Reason))
4626 continue; 4674 continue;
4627 // TODO(sehr, stichnot): Handle updates of Index with Shift != 0. 4675 // TODO(sehr, stichnot): Handle updates of Index with Shift != 0.
4628 // Index is Index=Var+Const ==> 4676 // Index is Index=Var+Const ==>
4629 // set Index=Var, Offset+=(Const<<Shift) 4677 // set Index=Var, Offset+=(Const<<Shift)
4630 // Index is Index=Const+Var ==> 4678 // Index is Index=Const+Var ==>
4631 // set Index=Var, Offset+=(Const<<Shift) 4679 // set Index=Var, Offset+=(Const<<Shift)
4632 // Index is Index=Var-Const ==> 4680 // Index is Index=Var-Const ==>
4633 // set Index=Var, Offset-=(Const<<Shift) 4681 // set Index=Var, Offset-=(Const<<Shift)
4634 break; 4682 break;
4635 } while (Reason); 4683 } while (Reason);
4684 // Undo any addition of GotVar. It will be added back when the mem operand is
4685 // legalized.
4686 if (Base == GotVar)
4687 Base = nullptr;
4688 if (Index == GotVar)
4689 Index = nullptr;
4636 return AddressWasOptimized; 4690 return AddressWasOptimized;
4637 } 4691 }
4638 4692
4639 /// Add a mock bounds check on the memory address before using it as a load or 4693 /// Add a mock bounds check on the memory address before using it as a load or
4640 /// store operand. The basic idea is that given a memory operand [reg], we 4694 /// store operand. The basic idea is that given a memory operand [reg], we
4641 /// would first add bounds-check code something like: 4695 /// would first add bounds-check code something like:
4642 /// 4696 ///
4643 /// cmp reg, <lb> 4697 /// cmp reg, <lb>
4644 /// jl out_of_line_error 4698 /// jl out_of_line_error
4645 /// cmp reg, <ub> 4699 /// cmp reg, <ub>
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
4691 _br(Traits::Cond::Br_e, Label); 4745 _br(Traits::Cond::Br_e, Label);
4692 _cmp(Opnd, Ctx->getConstantInt32(1)); 4746 _cmp(Opnd, Ctx->getConstantInt32(1));
4693 _br(Traits::Cond::Br_e, Label); 4747 _br(Traits::Cond::Br_e, Label);
4694 Context.insert(Label); 4748 Context.insert(Label);
4695 } 4749 }
4696 4750
4697 template <class Machine> 4751 template <class Machine>
4698 void TargetX86Base<Machine>::lowerLoad(const InstLoad *Load) { 4752 void TargetX86Base<Machine>::lowerLoad(const InstLoad *Load) {
4699 // A Load instruction can be treated the same as an Assign instruction, after 4753 // A Load instruction can be treated the same as an Assign instruction, after
4700 // the source operand is transformed into an Traits::X86OperandMem operand. 4754 // the source operand is transformed into an Traits::X86OperandMem operand.
4701 // Note that the address mode optimization already creates an 4755 // Note that the address mode optimization already creates a
4702 // Traits::X86OperandMem operand, so it doesn't need another level of 4756 // Traits::X86OperandMem operand, so it doesn't need another level of
4703 // transformation. 4757 // transformation.
4704 Variable *DestLoad = Load->getDest(); 4758 Variable *DestLoad = Load->getDest();
4705 Type Ty = DestLoad->getType(); 4759 Type Ty = DestLoad->getType();
4706 Operand *Src0 = formMemoryOperand(Load->getSourceAddress(), Ty); 4760 Operand *Src0 = formMemoryOperand(Load->getSourceAddress(), Ty);
4707 doMockBoundsCheck(Src0); 4761 doMockBoundsCheck(Src0);
4708 auto *Assign = InstAssign::create(Func, DestLoad, Src0); 4762 auto *Assign = InstAssign::create(Func, DestLoad, Src0);
4709 lowerAssign(Assign); 4763 lowerAssign(Assign);
4710 } 4764 }
4711 4765
4712 template <class Machine> void TargetX86Base<Machine>::doAddressOptLoad() { 4766 template <class Machine> void TargetX86Base<Machine>::doAddressOptLoad() {
4713 Inst *Inst = Context.getCur(); 4767 Inst *Inst = Context.getCur();
4714 Variable *Dest = Inst->getDest(); 4768 Variable *Dest = Inst->getDest();
4715 Operand *Addr = Inst->getSrc(0); 4769 Operand *Addr = Inst->getSrc(0);
4716 Variable *Index = nullptr; 4770 Variable *Index = nullptr;
4717 ConstantRelocatable *Relocatable = nullptr; 4771 ConstantRelocatable *Relocatable = nullptr;
4718 uint16_t Shift = 0; 4772 uint16_t Shift = 0;
4719 int32_t Offset = 0; 4773 int32_t Offset = 0;
4720 // Vanilla ICE load instructions should not use the segment registers, and 4774 // Vanilla ICE load instructions should not use the segment registers, and
4721 // computeAddressOpt only works at the level of Variables and Constants, not 4775 // computeAddressOpt only works at the level of Variables and Constants, not
4722 // other Traits::X86OperandMem, so there should be no mention of segment 4776 // other Traits::X86OperandMem, so there should be no mention of segment
4723 // registers there either. 4777 // registers there either.
4724 const typename Traits::X86OperandMem::SegmentRegisters SegmentReg = 4778 constexpr auto SegmentReg =
4725 Traits::X86OperandMem::DefaultSegment; 4779 Traits::X86OperandMem::SegmentRegisters::DefaultSegment;
4726 auto *Base = llvm::dyn_cast<Variable>(Addr); 4780 auto *Base = llvm::dyn_cast<Variable>(Addr);
4727 if (computeAddressOpt(Func, Inst, Relocatable, Offset, Base, Index, Shift)) { 4781 if (computeAddressOpt(Func, Inst, GotVar, Relocatable, Offset, Base, Index,
4782 Shift)) {
4728 Inst->setDeleted(); 4783 Inst->setDeleted();
4729 Constant *OffsetOp = nullptr; 4784 Constant *OffsetOp = nullptr;
4730 if (Relocatable == nullptr) { 4785 if (Relocatable == nullptr) {
4731 OffsetOp = Ctx->getConstantInt32(Offset); 4786 OffsetOp = Ctx->getConstantInt32(Offset);
4732 } else { 4787 } else {
4733 OffsetOp = Ctx->getConstantSym(Relocatable->getOffset() + Offset, 4788 OffsetOp = Ctx->getConstantSym(Relocatable->getOffset() + Offset,
4734 Relocatable->getName(), 4789 Relocatable->getName(),
4735 Relocatable->getSuppressMangling()); 4790 Relocatable->getSuppressMangling());
4736 } 4791 }
4792 // The new mem operand is created without IsPIC being set, because
4793 // computeAddressOpt() doesn't include GotVar in its final result.
4737 Addr = Traits::X86OperandMem::create(Func, Dest->getType(), Base, OffsetOp, 4794 Addr = Traits::X86OperandMem::create(Func, Dest->getType(), Base, OffsetOp,
4738 Index, Shift, SegmentReg); 4795 Index, Shift, SegmentReg);
4739 Context.insert<InstLoad>(Dest, Addr); 4796 Context.insert<InstLoad>(Dest, Addr);
4740 } 4797 }
4741 } 4798 }
4742 4799
4743 template <class Machine> 4800 template <class Machine>
4744 void TargetX86Base<Machine>::randomlyInsertNop(float Probability, 4801 void TargetX86Base<Machine>::randomlyInsertNop(float Probability,
4745 RandomNumberGenerator &RNG) { 4802 RandomNumberGenerator &RNG) {
4746 RandomNumberGeneratorWrapper RNGW(RNG); 4803 RandomNumberGeneratorWrapper RNGW(RNG);
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after
5021 Operand *Addr = Inst->getAddr(); 5078 Operand *Addr = Inst->getAddr();
5022 Variable *Index = nullptr; 5079 Variable *Index = nullptr;
5023 ConstantRelocatable *Relocatable = nullptr; 5080 ConstantRelocatable *Relocatable = nullptr;
5024 uint16_t Shift = 0; 5081 uint16_t Shift = 0;
5025 int32_t Offset = 0; 5082 int32_t Offset = 0;
5026 auto *Base = llvm::dyn_cast<Variable>(Addr); 5083 auto *Base = llvm::dyn_cast<Variable>(Addr);
5027 // Vanilla ICE store instructions should not use the segment registers, and 5084 // Vanilla ICE store instructions should not use the segment registers, and
5028 // computeAddressOpt only works at the level of Variables and Constants, not 5085 // computeAddressOpt only works at the level of Variables and Constants, not
5029 // other Traits::X86OperandMem, so there should be no mention of segment 5086 // other Traits::X86OperandMem, so there should be no mention of segment
5030 // registers there either. 5087 // registers there either.
5031 const typename Traits::X86OperandMem::SegmentRegisters SegmentReg = 5088 constexpr auto SegmentReg = Traits::X86OperandMem::DefaultSegment;
5032 Traits::X86OperandMem::DefaultSegment; 5089 if (computeAddressOpt(Func, Inst, GotVar, Relocatable, Offset, Base, Index,
John 2016/01/04 21:33:51 These address mode formation helpers are now bette
Jim Stichnoth 2016/01/04 23:32:12 That seems good, but maybe as a follow-on CL? Giv
5033 if (computeAddressOpt(Func, Inst, Relocatable, Offset, Base, Index, Shift)) { 5090 Shift)) {
5034 Inst->setDeleted(); 5091 Inst->setDeleted();
5035 Constant *OffsetOp = nullptr; 5092 Constant *OffsetOp = nullptr;
5036 if (Relocatable == nullptr) { 5093 if (Relocatable == nullptr) {
5037 OffsetOp = Ctx->getConstantInt32(Offset); 5094 OffsetOp = Ctx->getConstantInt32(Offset);
5038 } else { 5095 } else {
5039 OffsetOp = Ctx->getConstantSym(Relocatable->getOffset() + Offset, 5096 OffsetOp = Ctx->getConstantSym(Relocatable->getOffset() + Offset,
5040 Relocatable->getName(), 5097 Relocatable->getName(),
5041 Relocatable->getSuppressMangling()); 5098 Relocatable->getSuppressMangling());
5042 } 5099 }
5100 // The new mem operand is created without IsPIC being set, because
5101 // computeAddressOpt() doesn't include GotVar in its final result.
5043 Addr = Traits::X86OperandMem::create(Func, Data->getType(), Base, OffsetOp, 5102 Addr = Traits::X86OperandMem::create(Func, Data->getType(), Base, OffsetOp,
5044 Index, Shift, SegmentReg); 5103 Index, Shift, SegmentReg);
5045 auto *NewStore = Context.insert<InstStore>(Data, Addr); 5104 auto *NewStore = Context.insert<InstStore>(Data, Addr);
5046 if (Inst->getDest()) 5105 if (Inst->getDest())
5047 NewStore->setRmwBeacon(Inst->getRmwBeacon()); 5106 NewStore->setRmwBeacon(Inst->getRmwBeacon());
5048 } 5107 }
5049 } 5108 }
5050 5109
5051 template <class Machine> 5110 template <class Machine>
5052 Operand *TargetX86Base<Machine>::lowerCmpRange(Operand *Comparison, 5111 Operand *TargetX86Base<Machine>::lowerCmpRange(Operand *Comparison,
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
5093 Variable *Index; 5152 Variable *Index;
5094 if (RangeIndex->getType() != getPointerType()) { 5153 if (RangeIndex->getType() != getPointerType()) {
5095 Index = makeReg(getPointerType()); 5154 Index = makeReg(getPointerType());
5096 _movzx(Index, RangeIndex); 5155 _movzx(Index, RangeIndex);
5097 } else { 5156 } else {
5098 Index = legalizeToReg(RangeIndex); 5157 Index = legalizeToReg(RangeIndex);
5099 } 5158 }
5100 5159
5101 constexpr RelocOffsetT RelocOffset = 0; 5160 constexpr RelocOffsetT RelocOffset = 0;
5102 constexpr bool SuppressMangling = true; 5161 constexpr bool SuppressMangling = true;
5162 const bool IsPIC = Ctx->getFlags().getUseNonsfi();
5103 IceString MangledName = Ctx->mangleName(Func->getFunctionName()); 5163 IceString MangledName = Ctx->mangleName(Func->getFunctionName());
5104 Constant *Base = Ctx->getConstantSym( 5164 Variable *Base = IsPIC ? legalizeToReg(GotVar) : nullptr;
5165 Constant *Offset = Ctx->getConstantSym(
5105 RelocOffset, InstJumpTable::makeName(MangledName, JumpTable->getId()), 5166 RelocOffset, InstJumpTable::makeName(MangledName, JumpTable->getId()),
5106 SuppressMangling); 5167 SuppressMangling);
5107 Constant *Offset = nullptr;
5108 uint16_t Shift = typeWidthInBytesLog2(getPointerType()); 5168 uint16_t Shift = typeWidthInBytesLog2(getPointerType());
5109 // TODO(ascull): remove need for legalize by allowing null base in memop 5169 constexpr auto Segment = Traits::X86OperandMem::DefaultSegment;
5110 auto *TargetInMemory = Traits::X86OperandMem::create( 5170 auto *TargetInMemory = Traits::X86OperandMem::create(
5111 Func, getPointerType(), legalizeToReg(Base), Offset, Index, Shift); 5171 Func, getPointerType(), Base, Offset, Index, Shift, Segment, IsPIC);
5112 Variable *Target = nullptr; 5172 Variable *Target = nullptr;
5113 _mov(Target, TargetInMemory); 5173 _mov(Target, TargetInMemory);
5114 lowerIndirectJump(Target); 5174 lowerIndirectJump(Target);
5115 5175
5116 if (DefaultTarget == nullptr) 5176 if (DefaultTarget == nullptr)
5117 Context.insert(SkipJumpTable); 5177 Context.insert(SkipJumpTable);
5118 return; 5178 return;
5119 } 5179 }
5120 case CaseCluster::Range: { 5180 case CaseCluster::Range: {
5121 if (Case.isUnitRange()) { 5181 if (Case.isUnitRange()) {
(...skipping 310 matching lines...) Expand 10 before | Expand all | Expand 10 after
5432 if (const auto *RMW = 5492 if (const auto *RMW =
5433 llvm::dyn_cast<typename Traits::Insts::FakeRMW>(Instr)) { 5493 llvm::dyn_cast<typename Traits::Insts::FakeRMW>(Instr)) {
5434 lowerRMW(RMW); 5494 lowerRMW(RMW);
5435 } else { 5495 } else {
5436 TargetLowering::lowerOther(Instr); 5496 TargetLowering::lowerOther(Instr);
5437 } 5497 }
5438 } 5498 }
5439 5499
5440 /// Turn an i64 Phi instruction into a pair of i32 Phi instructions, to preserve 5500 /// Turn an i64 Phi instruction into a pair of i32 Phi instructions, to preserve
5441 /// integrity of liveness analysis. Undef values are also turned into zeroes, 5501 /// integrity of liveness analysis. Undef values are also turned into zeroes,
5442 /// since loOperand() and hiOperand() don't expect Undef input. 5502 /// since loOperand() and hiOperand() don't expect Undef input. Also, in
5503 /// Non-SFI mode, add a FakeUse(GotVar) for every pooled constant operand.
5443 template <class Machine> void TargetX86Base<Machine>::prelowerPhis() { 5504 template <class Machine> void TargetX86Base<Machine>::prelowerPhis() {
5505 if (Ctx->getFlags().getUseNonsfi()) {
5506 assert(GotVar);
5507 CfgNode *Node = Context.getNode();
5508 uint32_t GotVarUseCount = 0;
5509 for (Inst &I : Node->getPhis()) {
5510 auto *Phi = llvm::dyn_cast<InstPhi>(&I);
5511 if (Phi->isDeleted())
5512 continue;
5513 for (SizeT I = 0; I < Phi->getSrcSize(); ++I) {
5514 Operand *Src = Phi->getSrc(I);
5515 // TODO(stichnot): This over-counts for +0.0, and under-counts for other
5516 // kinds of pooling.
5517 if (llvm::isa<ConstantRelocatable>(Src) ||
5518 llvm::isa<ConstantFloat>(Src) || llvm::isa<ConstantDouble>(Src)) {
5519 ++GotVarUseCount;
5520 }
5521 }
5522 }
5523 if (GotVarUseCount) {
5524 Node->getInsts().push_front(InstFakeUse::create(Func, GotVar));
5525 }
5526 }
5444 if (Traits::Is64Bit) { 5527 if (Traits::Is64Bit) {
5445 // On x86-64 we don't need to prelower phis -- the architecture can handle 5528 // On x86-64 we don't need to prelower phis -- the architecture can handle
5446 // 64-bit integer natively. 5529 // 64-bit integer natively.
5447 return; 5530 return;
5448 } 5531 }
5449 5532
5450 // Pause constant blinding or pooling, blinding or pooling will be done later 5533 // Pause constant blinding or pooling, blinding or pooling will be done later
5451 // during phi lowering assignments 5534 // during phi lowering assignments
5452 BoolFlagSaver B(RandomizationPoolingPaused, true); 5535 BoolFlagSaver B(RandomizationPoolingPaused, true);
5453 PhiLowering::prelowerPhis32Bit<TargetX86Base<Machine>>( 5536 PhiLowering::prelowerPhis32Bit<TargetX86Base<Machine>>(
(...skipping 460 matching lines...) Expand 10 before | Expand all | Expand 10 after
5914 _movp(Reg, Src); 5997 _movp(Reg, Src);
5915 } else { 5998 } else {
5916 _mov(Reg, Src); 5999 _mov(Reg, Src);
5917 } 6000 }
5918 return Reg; 6001 return Reg;
5919 } 6002 }
5920 6003
5921 template <class Machine> 6004 template <class Machine>
5922 Operand *TargetX86Base<Machine>::legalize(Operand *From, LegalMask Allowed, 6005 Operand *TargetX86Base<Machine>::legalize(Operand *From, LegalMask Allowed,
5923 int32_t RegNum) { 6006 int32_t RegNum) {
5924 Type Ty = From->getType(); 6007 const bool UseNonsfi = Func->getContext()->getFlags().getUseNonsfi();
6008 const Type Ty = From->getType();
5925 // Assert that a physical register is allowed. To date, all calls to 6009 // Assert that a physical register is allowed. To date, all calls to
5926 // legalize() allow a physical register. If a physical register needs to be 6010 // legalize() allow a physical register. If a physical register needs to be
5927 // explicitly disallowed, then new code will need to be written to force a 6011 // explicitly disallowed, then new code will need to be written to force a
5928 // spill. 6012 // spill.
5929 assert(Allowed & Legal_Reg); 6013 assert(Allowed & Legal_Reg);
5930 // If we're asking for a specific physical register, make sure we're not 6014 // If we're asking for a specific physical register, make sure we're not
5931 // allowing any other operand kinds. (This could be future work, e.g. allow 6015 // allowing any other operand kinds. (This could be future work, e.g. allow
5932 // the shl shift amount to be either an immediate or in ecx.) 6016 // the shl shift amount to be either an immediate or in ecx.)
5933 assert(RegNum == Variable::NoRegister || Allowed == Legal_Reg); 6017 assert(RegNum == Variable::NoRegister || Allowed == Legal_Reg);
5934 6018
(...skipping 13 matching lines...) Expand all
5948 } 6032 }
5949 } 6033 }
5950 } 6034 }
5951 } 6035 }
5952 6036
5953 if (auto *Mem = llvm::dyn_cast<typename Traits::X86OperandMem>(From)) { 6037 if (auto *Mem = llvm::dyn_cast<typename Traits::X86OperandMem>(From)) {
5954 // Before doing anything with a Mem operand, we need to ensure that the 6038 // Before doing anything with a Mem operand, we need to ensure that the
5955 // Base and Index components are in physical registers. 6039 // Base and Index components are in physical registers.
5956 Variable *Base = Mem->getBase(); 6040 Variable *Base = Mem->getBase();
5957 Variable *Index = Mem->getIndex(); 6041 Variable *Index = Mem->getIndex();
6042 Constant *Offset = Mem->getOffset();
5958 Variable *RegBase = nullptr; 6043 Variable *RegBase = nullptr;
5959 Variable *RegIndex = nullptr; 6044 Variable *RegIndex = nullptr;
5960 if (Base) { 6045 if (Base) {
5961 RegBase = llvm::cast<Variable>( 6046 RegBase = llvm::cast<Variable>(
5962 legalize(Base, Legal_Reg | Legal_Rematerializable)); 6047 legalize(Base, Legal_Reg | Legal_Rematerializable));
5963 } 6048 }
5964 if (Index) { 6049 if (Index) {
5965 RegIndex = llvm::cast<Variable>( 6050 RegIndex = llvm::cast<Variable>(
5966 legalize(Index, Legal_Reg | Legal_Rematerializable)); 6051 legalize(Index, Legal_Reg | Legal_Rematerializable));
5967 } 6052 }
6053 // For Non-SFI mode, if the Offset field is a ConstantRelocatable, we
6054 // replace either Base or Index with a legalized GotVar. At emission time,
6055 // the ConstantRelocatable will be emitted with the @GOTOFF relocation.
6056 bool NeedPIC = false;
6057 if (UseNonsfi && !Mem->getIsPIC() && Offset &&
6058 llvm::isa<ConstantRelocatable>(Offset)) {
6059 assert(!(Allowed & Legal_AddrAbs));
6060 NeedPIC = true;
6061 if (RegBase == nullptr) {
6062 RegBase = legalizeToReg(GotVar);
6063 } else if (RegIndex == nullptr) {
6064 RegIndex = legalizeToReg(GotVar);
6065 } else {
6066 llvm::report_fatal_error(
6067 "Either Base or Index must be unused in Non-SFI mode");
6068 }
6069 }
5968 if (Base != RegBase || Index != RegIndex) { 6070 if (Base != RegBase || Index != RegIndex) {
5969 Mem = Traits::X86OperandMem::create(Func, Ty, RegBase, Mem->getOffset(), 6071 Mem = Traits::X86OperandMem::create(Func, Ty, RegBase, Offset, RegIndex,
5970 RegIndex, Mem->getShift(), 6072 Mem->getShift(),
5971 Mem->getSegmentRegister()); 6073 Mem->getSegmentRegister(), NeedPIC);
5972 } 6074 }
5973 6075
5974 // For all Memory Operands, we do randomization/pooling here 6076 // For all Memory Operands, we do randomization/pooling here
5975 From = randomizeOrPoolImmediate(Mem); 6077 From = randomizeOrPoolImmediate(Mem);
5976 6078
5977 if (!(Allowed & Legal_Mem)) { 6079 if (!(Allowed & Legal_Mem)) {
5978 From = copyToReg(From, RegNum); 6080 From = copyToReg(From, RegNum);
5979 } 6081 }
5980 return From; 6082 return From;
5981 } 6083 }
6084
5982 if (auto *Const = llvm::dyn_cast<Constant>(From)) { 6085 if (auto *Const = llvm::dyn_cast<Constant>(From)) {
5983 if (llvm::isa<ConstantUndef>(Const)) { 6086 if (llvm::isa<ConstantUndef>(Const)) {
5984 From = legalizeUndef(Const, RegNum); 6087 From = legalizeUndef(Const, RegNum);
5985 if (isVectorType(Ty)) 6088 if (isVectorType(Ty))
5986 return From; 6089 return From;
5987 Const = llvm::cast<Constant>(From); 6090 Const = llvm::cast<Constant>(From);
5988 } 6091 }
5989 // There should be no constants of vector type (other than undef). 6092 // There should be no constants of vector type (other than undef).
5990 assert(!isVectorType(Ty)); 6093 assert(!isVectorType(Ty));
5991 6094
(...skipping 10 matching lines...) Expand all
6002 6105
6003 // If the operand is an 32 bit constant integer, we should check whether we 6106 // If the operand is an 32 bit constant integer, we should check whether we
6004 // need to randomize it or pool it. 6107 // need to randomize it or pool it.
6005 if (auto *C = llvm::dyn_cast<ConstantInteger32>(Const)) { 6108 if (auto *C = llvm::dyn_cast<ConstantInteger32>(Const)) {
6006 Operand *NewConst = randomizeOrPoolImmediate(C, RegNum); 6109 Operand *NewConst = randomizeOrPoolImmediate(C, RegNum);
6007 if (NewConst != Const) { 6110 if (NewConst != Const) {
6008 return NewConst; 6111 return NewConst;
6009 } 6112 }
6010 } 6113 }
6011 6114
6115 // If the operand is a ConstantRelocatable, and Legal_AddrAbs is not
6116 // specified, and UseNonsfi is indicated, we need to add GotVar.
6117 if (auto *CR = llvm::dyn_cast<ConstantRelocatable>(Const)) {
6118 if (UseNonsfi && !(Allowed & Legal_AddrAbs)) {
6119 assert(Ty == IceType_i32);
6120 Variable *RegBase = legalizeToReg(GotVar);
6121 Variable *NewVar = makeReg(Ty, RegNum);
6122 auto *Mem = Traits::X86OperandMem::create(Func, Ty, RegBase, CR);
6123 Mem->setIsPIC();
6124 _lea(NewVar, Mem);
6125 From = NewVar;
6126 }
6127 }
6128
6012 // Convert a scalar floating point constant into an explicit memory 6129 // Convert a scalar floating point constant into an explicit memory
6013 // operand. 6130 // operand.
6014 if (isScalarFloatingType(Ty)) { 6131 if (isScalarFloatingType(Ty)) {
6015 if (auto *ConstFloat = llvm::dyn_cast<ConstantFloat>(Const)) { 6132 if (auto *ConstFloat = llvm::dyn_cast<ConstantFloat>(Const)) {
6016 if (Utils::isPositiveZero(ConstFloat->getValue())) 6133 if (Utils::isPositiveZero(ConstFloat->getValue()))
6017 return makeZeroedRegister(Ty, RegNum); 6134 return makeZeroedRegister(Ty, RegNum);
6018 } else if (auto *ConstDouble = llvm::dyn_cast<ConstantDouble>(Const)) { 6135 } else if (auto *ConstDouble = llvm::dyn_cast<ConstantDouble>(Const)) {
6019 if (Utils::isPositiveZero(ConstDouble->getValue())) 6136 if (Utils::isPositiveZero(ConstDouble->getValue()))
6020 return makeZeroedRegister(Ty, RegNum); 6137 return makeZeroedRegister(Ty, RegNum);
6021 } 6138 }
6022 Variable *Base = nullptr; 6139 Variable *Base = UseNonsfi ? legalizeToReg(GotVar) : nullptr;
6023 std::string Buffer; 6140 std::string Buffer;
6024 llvm::raw_string_ostream StrBuf(Buffer); 6141 llvm::raw_string_ostream StrBuf(Buffer);
6025 llvm::cast<Constant>(From)->emitPoolLabel(StrBuf, Ctx); 6142 llvm::cast<Constant>(From)->emitPoolLabel(StrBuf, Ctx);
6026 llvm::cast<Constant>(From)->setShouldBePooled(true); 6143 llvm::cast<Constant>(From)->setShouldBePooled(true);
6027 Constant *Offset = Ctx->getConstantSym(0, StrBuf.str(), true); 6144 Constant *Offset = Ctx->getConstantSym(0, StrBuf.str(), true);
6028 From = Traits::X86OperandMem::create(Func, Ty, Base, Offset); 6145 auto *Mem = Traits::X86OperandMem::create(Func, Ty, Base, Offset);
6146 if (UseNonsfi)
6147 Mem->setIsPIC();
6148 From = Mem;
6029 } 6149 }
6030 bool NeedsReg = false; 6150 bool NeedsReg = false;
6031 if (!(Allowed & Legal_Imm) && !isScalarFloatingType(Ty)) 6151 if (!(Allowed & Legal_Imm) && !isScalarFloatingType(Ty))
6032 // Immediate specifically not allowed 6152 // Immediate specifically not allowed
6033 NeedsReg = true; 6153 NeedsReg = true;
6034 if (!(Allowed & Legal_Mem) && isScalarFloatingType(Ty)) 6154 if (!(Allowed & Legal_Mem) && isScalarFloatingType(Ty))
6035 // On x86, FP constants are lowered to mem operands. 6155 // On x86, FP constants are lowered to mem operands.
6036 NeedsReg = true; 6156 NeedsReg = true;
6037 if (NeedsReg) { 6157 if (NeedsReg) {
6038 From = copyToReg(From, RegNum); 6158 From = copyToReg(From, RegNum);
6039 } 6159 }
6040 return From; 6160 return From;
6041 } 6161 }
6162
6042 if (auto *Var = llvm::dyn_cast<Variable>(From)) { 6163 if (auto *Var = llvm::dyn_cast<Variable>(From)) {
6043 // Check if the variable is guaranteed a physical register. This can happen 6164 // Check if the variable is guaranteed a physical register. This can happen
6044 // either when the variable is pre-colored or when it is assigned infinite 6165 // either when the variable is pre-colored or when it is assigned infinite
6045 // weight. 6166 // weight.
6046 bool MustHaveRegister = (Var->hasReg() || Var->mustHaveReg()); 6167 bool MustHaveRegister = (Var->hasReg() || Var->mustHaveReg());
6047 bool MustRematerialize = 6168 bool MustRematerialize =
6048 (Var->isRematerializable() && !(Allowed & Legal_Rematerializable)); 6169 (Var->isRematerializable() && !(Allowed & Legal_Rematerializable));
6049 // We need a new physical register for the operand if: 6170 // We need a new physical register for the operand if:
6050 // - Mem is not allowed and Var isn't guaranteed a physical register, or 6171 // - Mem is not allowed and Var isn't guaranteed a physical register, or
6051 // - RegNum is required and Var->getRegNum() doesn't match, or 6172 // - RegNum is required and Var->getRegNum() doesn't match, or
6052 // - Var is a rematerializable variable and rematerializable pass-through is 6173 // - Var is a rematerializable variable and rematerializable pass-through is
6053 // not allowed (in which case we need an lea instruction). 6174 // not allowed (in which case we need an lea instruction).
6054 if (MustRematerialize) { 6175 if (MustRematerialize) {
6055 assert(Ty == IceType_i32); 6176 assert(Ty == IceType_i32);
6056 Variable *NewVar = makeReg(Ty, RegNum); 6177 Variable *NewVar = makeReg(Ty, RegNum);
6057 // Since Var is rematerializable, the offset will be added when the lea is 6178 // Since Var is rematerializable, the offset will be added when the lea is
6058 // emitted. 6179 // emitted.
6059 constexpr Constant *NoOffset = nullptr; 6180 constexpr Constant *NoOffset = nullptr;
6060 auto *Mem = Traits::X86OperandMem::create(Func, Ty, Var, NoOffset); 6181 auto *Mem = Traits::X86OperandMem::create(Func, Ty, Var, NoOffset);
6061 _lea(NewVar, Mem); 6182 _lea(NewVar, Mem);
6062 From = NewVar; 6183 From = NewVar;
6063 } else if ((!(Allowed & Legal_Mem) && !MustHaveRegister) || 6184 } else if ((!(Allowed & Legal_Mem) && !MustHaveRegister) ||
6064 (RegNum != Variable::NoRegister && RegNum != Var->getRegNum()) || 6185 (RegNum != Variable::NoRegister && RegNum != Var->getRegNum()) ||
6065 MustRematerialize) { 6186 MustRematerialize) {
6066 From = copyToReg(From, RegNum); 6187 From = copyToReg(From, RegNum);
6067 } 6188 }
6068 return From; 6189 return From;
6069 } 6190 }
6070 llvm_unreachable("Unhandled operand kind in legalize()"); 6191
6192 llvm::report_fatal_error("Unhandled operand kind in legalize()");
6071 return From; 6193 return From;
6072 } 6194 }
6073 6195
6074 /// Provide a trivial wrapper to legalize() for this common usage. 6196 /// Provide a trivial wrapper to legalize() for this common usage.
6075 template <class Machine> 6197 template <class Machine>
6076 Variable *TargetX86Base<Machine>::legalizeToReg(Operand *From, int32_t RegNum) { 6198 Variable *TargetX86Base<Machine>::legalizeToReg(Operand *From, int32_t RegNum) {
6077 return llvm::cast<Variable>(legalize(From, Legal_Reg, RegNum)); 6199 return llvm::cast<Variable>(legalize(From, Legal_Reg, RegNum));
6078 } 6200 }
6079 6201
6080 /// Legalize undef values to concrete values. 6202 /// Legalize undef values to concrete values.
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
6128 // transformation. 6250 // transformation.
6129 if (!Mem) { 6251 if (!Mem) {
6130 auto *Base = llvm::dyn_cast<Variable>(Opnd); 6252 auto *Base = llvm::dyn_cast<Variable>(Opnd);
6131 auto *Offset = llvm::dyn_cast<Constant>(Opnd); 6253 auto *Offset = llvm::dyn_cast<Constant>(Opnd);
6132 assert(Base || Offset); 6254 assert(Base || Offset);
6133 if (Offset) { 6255 if (Offset) {
6134 // During memory operand building, we do not blind or pool the constant 6256 // During memory operand building, we do not blind or pool the constant
6135 // offset, we will work on the whole memory operand later as one entity 6257 // offset, we will work on the whole memory operand later as one entity
6136 // later, this save one instruction. By turning blinding and pooling off, 6258 // later, this save one instruction. By turning blinding and pooling off,
6137 // we guarantee legalize(Offset) will return a Constant*. 6259 // we guarantee legalize(Offset) will return a Constant*.
6138 { 6260 if (!llvm::isa<ConstantRelocatable>(Offset)) {
6139 BoolFlagSaver B(RandomizationPoolingPaused, true); 6261 BoolFlagSaver B(RandomizationPoolingPaused, true);
6140 6262
6141 Offset = llvm::cast<Constant>(legalize(Offset)); 6263 Offset = llvm::cast<Constant>(legalize(Offset));
6142 } 6264 }
6143 6265
6144 assert(llvm::isa<ConstantInteger32>(Offset) || 6266 assert(llvm::isa<ConstantInteger32>(Offset) ||
6145 llvm::isa<ConstantRelocatable>(Offset)); 6267 llvm::isa<ConstantRelocatable>(Offset));
6146 } 6268 }
6269 // Not completely sure whether it's OK to leave IsPIC unset when creating
6270 // the mem operand. If DoLegalize is true, it will definitely be applied
6271 // during the legalize() call, but perhaps not during the
6272 // randomizeOrPoolImmediate() call. In any case, the emit routines will
6273 // assert that PIC legalization has been applied.
6147 Mem = Traits::X86OperandMem::create(Func, Ty, Base, Offset); 6274 Mem = Traits::X86OperandMem::create(Func, Ty, Base, Offset);
6148 } 6275 }
6149 // Do legalization, which contains randomization/pooling or do 6276 // Do legalization, which contains randomization/pooling or do
6150 // randomization/pooling. 6277 // randomization/pooling.
6151 return llvm::cast<typename Traits::X86OperandMem>( 6278 return llvm::cast<typename Traits::X86OperandMem>(
6152 DoLegalize ? legalize(Mem) : randomizeOrPoolImmediate(Mem)); 6279 DoLegalize ? legalize(Mem) : randomizeOrPoolImmediate(Mem));
6153 } 6280 }
6154 6281
6155 template <class Machine> 6282 template <class Machine>
6156 Variable *TargetX86Base<Machine>::makeReg(Type Type, int32_t RegNum) { 6283 Variable *TargetX86Base<Machine>::makeReg(Type Type, int32_t RegNum) {
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
6204 const llvm::SmallBitVector &ExcludeRegisters, uint64_t Salt) const { 6331 const llvm::SmallBitVector &ExcludeRegisters, uint64_t Salt) const {
6205 Traits::makeRandomRegisterPermutation(Ctx, Func, Permutation, 6332 Traits::makeRandomRegisterPermutation(Ctx, Func, Permutation,
6206 ExcludeRegisters, Salt); 6333 ExcludeRegisters, Salt);
6207 } 6334 }
6208 6335
6209 template <class Machine> 6336 template <class Machine>
6210 void TargetX86Base<Machine>::emit(const ConstantInteger32 *C) const { 6337 void TargetX86Base<Machine>::emit(const ConstantInteger32 *C) const {
6211 if (!BuildDefs::dump()) 6338 if (!BuildDefs::dump())
6212 return; 6339 return;
6213 Ostream &Str = Ctx->getStrEmit(); 6340 Ostream &Str = Ctx->getStrEmit();
6214 Str << getConstantPrefix() << C->getValue(); 6341 Str << "$" << C->getValue();
6215 } 6342 }
6216 6343
6217 template <class Machine> 6344 template <class Machine>
6218 void TargetX86Base<Machine>::emit(const ConstantInteger64 *C) const { 6345 void TargetX86Base<Machine>::emit(const ConstantInteger64 *C) const {
6219 if (!Traits::Is64Bit) { 6346 if (!Traits::Is64Bit) {
6220 llvm::report_fatal_error("Not expecting to emit 64-bit integers"); 6347 llvm::report_fatal_error("Not expecting to emit 64-bit integers");
6221 } else { 6348 } else {
6222 if (!BuildDefs::dump()) 6349 if (!BuildDefs::dump())
6223 return; 6350 return;
6224 Ostream &Str = Ctx->getStrEmit(); 6351 Ostream &Str = Ctx->getStrEmit();
6225 Str << getConstantPrefix() << C->getValue(); 6352 Str << "$" << C->getValue();
6226 } 6353 }
6227 } 6354 }
6228 6355
6229 template <class Machine> 6356 template <class Machine>
6230 void TargetX86Base<Machine>::emit(const ConstantFloat *C) const { 6357 void TargetX86Base<Machine>::emit(const ConstantFloat *C) const {
6231 if (!BuildDefs::dump()) 6358 if (!BuildDefs::dump())
6232 return; 6359 return;
6233 Ostream &Str = Ctx->getStrEmit(); 6360 Ostream &Str = Ctx->getStrEmit();
6234 C->emitPoolLabel(Str, Ctx); 6361 C->emitPoolLabel(Str, Ctx);
6235 } 6362 }
6236 6363
6237 template <class Machine> 6364 template <class Machine>
6238 void TargetX86Base<Machine>::emit(const ConstantDouble *C) const { 6365 void TargetX86Base<Machine>::emit(const ConstantDouble *C) const {
6239 if (!BuildDefs::dump()) 6366 if (!BuildDefs::dump())
6240 return; 6367 return;
6241 Ostream &Str = Ctx->getStrEmit(); 6368 Ostream &Str = Ctx->getStrEmit();
6242 C->emitPoolLabel(Str, Ctx); 6369 C->emitPoolLabel(Str, Ctx);
6243 } 6370 }
6244 6371
6245 template <class Machine> 6372 template <class Machine>
6246 void TargetX86Base<Machine>::emit(const ConstantUndef *) const { 6373 void TargetX86Base<Machine>::emit(const ConstantUndef *) const {
6247 llvm::report_fatal_error("undef value encountered by emitter."); 6374 llvm::report_fatal_error("undef value encountered by emitter.");
6248 } 6375 }
6249 6376
6377 template <class Machine>
6378 void TargetX86Base<Machine>::emit(const ConstantRelocatable *C) const {
6379 if (!BuildDefs::dump())
6380 return;
6381 assert(!Ctx->getFlags().getUseNonsfi());
6382 Ostream &Str = Ctx->getStrEmit();
6383 Str << "$";
6384 emitWithoutPrefix(C);
6385 }
6386
6250 /// Randomize or pool an Immediate. 6387 /// Randomize or pool an Immediate.
6251 template <class Machine> 6388 template <class Machine>
6252 Operand *TargetX86Base<Machine>::randomizeOrPoolImmediate(Constant *Immediate, 6389 Operand *TargetX86Base<Machine>::randomizeOrPoolImmediate(Constant *Immediate,
6253 int32_t RegNum) { 6390 int32_t RegNum) {
6254 assert(llvm::isa<ConstantInteger32>(Immediate) || 6391 assert(llvm::isa<ConstantInteger32>(Immediate) ||
6255 llvm::isa<ConstantRelocatable>(Immediate)); 6392 llvm::isa<ConstantRelocatable>(Immediate));
6256 if (Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == RPI_None || 6393 if (Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == RPI_None ||
6257 RandomizationPoolingPaused == true) { 6394 RandomizationPoolingPaused == true) {
6258 // Immediates randomization/pooling off or paused 6395 // Immediates randomization/pooling off or paused
6259 return Immediate; 6396 return Immediate;
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
6303 // assigned register as this assignment is that start of its use-def 6440 // assigned register as this assignment is that start of its use-def
6304 // chain. So we add RegNum argument here. 6441 // chain. So we add RegNum argument here.
6305 Variable *Reg = makeReg(Immediate->getType(), RegNum); 6442 Variable *Reg = makeReg(Immediate->getType(), RegNum);
6306 IceString Label; 6443 IceString Label;
6307 llvm::raw_string_ostream Label_stream(Label); 6444 llvm::raw_string_ostream Label_stream(Label);
6308 Immediate->emitPoolLabel(Label_stream, Ctx); 6445 Immediate->emitPoolLabel(Label_stream, Ctx);
6309 constexpr RelocOffsetT Offset = 0; 6446 constexpr RelocOffsetT Offset = 0;
6310 constexpr bool SuppressMangling = true; 6447 constexpr bool SuppressMangling = true;
6311 Constant *Symbol = 6448 Constant *Symbol =
6312 Ctx->getConstantSym(Offset, Label_stream.str(), SuppressMangling); 6449 Ctx->getConstantSym(Offset, Label_stream.str(), SuppressMangling);
6450 const bool UseNonsfi = Ctx->getFlags().getUseNonsfi();
6451 Variable *Base = UseNonsfi ? legalizeToReg(GotVar) : nullptr;
6313 typename Traits::X86OperandMem *MemOperand = 6452 typename Traits::X86OperandMem *MemOperand =
6314 Traits::X86OperandMem::create(Func, Immediate->getType(), nullptr, 6453 Traits::X86OperandMem::create(Func, Immediate->getType(), Base,
6315 Symbol); 6454 Symbol);
6455 if (UseNonsfi)
6456 MemOperand->setIsPIC();
6316 _mov(Reg, MemOperand); 6457 _mov(Reg, MemOperand);
6317 return Reg; 6458 return Reg;
6318 } 6459 }
6319 assert("Unsupported -randomize-pool-immediates option" && false); 6460 assert("Unsupported -randomize-pool-immediates option" && false);
6320 } 6461 }
6321 // the constant Immediate is not eligible for blinding/pooling 6462 // the constant Immediate is not eligible for blinding/pooling
6322 return Immediate; 6463 return Immediate;
6323 } 6464 }
6324 6465
6325 template <class Machine> 6466 template <class Machine>
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
6400 return MemOperand; 6541 return MemOperand;
6401 Variable *RegTemp = makeReg(IceType_i32); 6542 Variable *RegTemp = makeReg(IceType_i32);
6402 IceString Label; 6543 IceString Label;
6403 llvm::raw_string_ostream Label_stream(Label); 6544 llvm::raw_string_ostream Label_stream(Label);
6404 MemOperand->getOffset()->emitPoolLabel(Label_stream, Ctx); 6545 MemOperand->getOffset()->emitPoolLabel(Label_stream, Ctx);
6405 MemOperand->getOffset()->setShouldBePooled(true); 6546 MemOperand->getOffset()->setShouldBePooled(true);
6406 constexpr RelocOffsetT SymOffset = 0; 6547 constexpr RelocOffsetT SymOffset = 0;
6407 constexpr bool SuppressMangling = true; 6548 constexpr bool SuppressMangling = true;
6408 Constant *Symbol = Ctx->getConstantSym(SymOffset, Label_stream.str(), 6549 Constant *Symbol = Ctx->getConstantSym(SymOffset, Label_stream.str(),
6409 SuppressMangling); 6550 SuppressMangling);
6551 const bool UseNonsfi = Ctx->getFlags().getUseNonsfi();
6552 Variable *Base = UseNonsfi ? legalizeToReg(GotVar) : nullptr;
6410 typename Traits::X86OperandMem *SymbolOperand = 6553 typename Traits::X86OperandMem *SymbolOperand =
6411 Traits::X86OperandMem::create( 6554 Traits::X86OperandMem::create(
6412 Func, MemOperand->getOffset()->getType(), nullptr, Symbol); 6555 Func, MemOperand->getOffset()->getType(), Base, Symbol);
6556 if (UseNonsfi)
6557 SymbolOperand->setIsPIC();
6413 _mov(RegTemp, SymbolOperand); 6558 _mov(RegTemp, SymbolOperand);
6414 // If we have a base variable here, we should add the lea instruction 6559 // If we have a base variable here, we should add the lea instruction
6415 // to add the value of the base variable to RegTemp. If there is no 6560 // to add the value of the base variable to RegTemp. If there is no
6416 // base variable, we won't need this lea instruction. 6561 // base variable, we won't need this lea instruction.
6417 if (MemOperand->getBase()) { 6562 if (MemOperand->getBase()) {
6418 typename Traits::X86OperandMem *CalculateOperand = 6563 typename Traits::X86OperandMem *CalculateOperand =
6419 Traits::X86OperandMem::create( 6564 Traits::X86OperandMem::create(
6420 Func, MemOperand->getType(), MemOperand->getBase(), nullptr, 6565 Func, MemOperand->getType(), MemOperand->getBase(), nullptr,
6421 RegTemp, 0, MemOperand->getSegmentRegister()); 6566 RegTemp, 0, MemOperand->getSegmentRegister());
6422 _lea(RegTemp, CalculateOperand); 6567 _lea(RegTemp, CalculateOperand);
(...skipping 10 matching lines...) Expand all
6433 } 6578 }
6434 // the offset is not eligible for blinding or pooling, return the original 6579 // the offset is not eligible for blinding or pooling, return the original
6435 // mem operand 6580 // mem operand
6436 return MemOperand; 6581 return MemOperand;
6437 } 6582 }
6438 6583
6439 } // end of namespace X86Internal 6584 } // end of namespace X86Internal
6440 } // end of namespace Ice 6585 } // end of namespace Ice
6441 6586
6442 #endif // SUBZERO_SRC_ICETARGETLOWERINGX86BASEIMPL_H 6587 #endif // SUBZERO_SRC_ICETARGETLOWERINGX86BASEIMPL_H
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698